[yt-svn] commit/yt: 8 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Nov 14 12:47:34 PST 2012


8 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/5e0edcf60e2b/
changeset:   5e0edcf60e2b
branch:      yt
user:        ngoldbaum
date:        2012-09-26 23:14:25
summary:     Merging.
affected #:  22 files

diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -133,6 +133,7 @@
         else:
             exec(operation)
 
+        if self.children is None: return
         for child in self.children:
             child.pass_down(operation)
 


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/__init__.py
--- /dev/null
+++ b/yt/frontends/_skeleton/__init__.py
@@ -0,0 +1,25 @@
+"""
+API for yt.frontends.skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/api.py
--- /dev/null
+++ b/yt/frontends/_skeleton/api.py
@@ -0,0 +1,37 @@
+"""
+API for yt.frontends._skeleton
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      SkeletonGrid, \
+      SkeletonHierarchy, \
+      SkeletonStaticOutput
+
+from .fields import \
+      SkeletonFieldInfo, \
+      add_flash_field
+
+from .io import \
+      IOHandlerSkeleton


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/data_structures.py
--- /dev/null
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -0,0 +1,157 @@
+"""
+Skeleton data structures
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.hierarchy import \
+    AMRHierarchy
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+
+class SkeletonGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class SkeletonHierarchy(AMRHierarchy):
+
+    grid = SkeletonGrid
+    float_type = np.float64
+    
+    def __init__(self, pf, data_style='skeleton'):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        # This needs to set a self.field_list that contains all the available,
+        # on-disk fields.
+        pass
+    
+    def _count_grids(self):
+        # This needs to set self.num_grids
+        pass
+        
+    def _parse_hierarchy(self):
+        # This needs to fill the following arrays, where N is self.num_grids:
+        #   self.grid_left_edge         (N, 3) <= float64
+        #   self.grid_right_edge        (N, 3) <= float64
+        #   self.grid_dimensions        (N, 3) <= int
+        #   self.grid_particle_count    (N, 1) <= int
+        #   self.grid_levels            (N, 1) <= int
+        #   self.grids                  (N, 1) <= grid objects
+        #   
+        pass
+                        
+    def _populate_grid_objects(self):
+        # For each grid, this must call:
+        #   grid._prepare_grid()
+        #   grid._setup_dx()
+        # This must also set:
+        #   grid.Children <= list of child grids
+        #   grid.Parent   <= parent grid
+        # This is handled by the frontend because often the children must be
+        # identified.
+        pass
+
+class SkeletonStaticOutput(StaticOutput):
+    _hierarchy_class = SkeletonHierarchy
+    _fieldinfo_fallback = SkeletonFieldInfo
+    _fieldinfo_known = KnownSkeletonFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='skeleton',
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+    def _set_units(self):
+        # This needs to set up the dictionaries that convert from code units to
+        # CGS.  The needed items are listed in the second entry:
+        #   self.time_units         <= sec_conversion
+        #   self.conversion_factors <= mpc_conversion
+        #   self.units              <= On-disk fields
+        pass
+
+    def _parse_parameter_file(self):
+        # This needs to set up the following items:
+        #
+        #   self.unique_identifier
+        #   self.parameters             <= full of code-specific items of use
+        #   self.domain_left_edge       <= array of float64
+        #   self.domain_right_edge      <= array of float64
+        #   self.dimensionality         <= int
+        #   self.domain_dimensions      <= array of int64
+        #   self.current_time           <= simulation time in code units
+        #
+        # We also set up cosmological information.  Set these to zero if
+        # non-cosmological.
+        #
+        #   self.cosmological_simulation    <= int, 0 or 1
+        #   self.current_redshift           <= float
+        #   self.omega_lambda               <= float
+        #   self.omega_matter               <= float
+        #   self.hubble_constant            <= float
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        # This accepts a filename or a set of arguments and returns True or
+        # False depending on if the file is of the type requested.
+        return False
+
+




diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/fields.py
--- /dev/null
+++ b/yt/frontends/_skeleton/fields.py
@@ -0,0 +1,102 @@
+"""
+Skeleton-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import \
+    kboltz
+
+# The first field container is where any fields that exist on disk go, along
+# with their conversion factors, display names, etc.
+
+KnownSkeletonFields = FieldInfoContainer()
+add_skeleton_field = KnownSkeletonFields.add_field
+
+SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = SkeletonFieldInfo.add_field
+
+# Often, we want to translate between fields on disk and fields in yt.  This
+# construct shows how to do that.  Note that we use TranslationFunc.
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Temperature": "temp",
+                    "Pressure" : "pres", 
+                    "Grav_Potential" : "gpot",
+                    "particle_position_x" : "particle_posx",
+                    "particle_position_y" : "particle_posy",
+                    "particle_position_z" : "particle_posz",
+                    "particle_velocity_x" : "particle_velx",
+                    "particle_velocity_y" : "particle_vely",
+                    "particle_velocity_z" : "particle_velz",
+                    "particle_index" : "particle_tag",
+                    "Electron_Fraction" : "elec",
+                    "HI_Fraction" : "h   ",
+                    "HD_Fraction" : "hd  ",
+                    "HeI_Fraction": "hel ",
+                    "HeII_Fraction": "hep ",
+                    "HeIII_Fraction": "hepp",
+                    "HM_Fraction": "hmin",
+                    "HII_Fraction": "hp  ",
+                    "H2I_Fraction": "htwo",
+                    "H2II_Fraction": "htwp",
+                    "DI_Fraction": "deut",
+                    "DII_Fraction": "dplu",
+                    "ParticleMass": "particle_mass",
+                    "Flame_Fraction": "flam"}
+
+for f,v in translation_dict.items():
+    if v not in KnownSkeletonFields:
+        pfield = v.startswith("particle")
+        add_skeleton_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownSkeletonFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownSkeletonFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
+
+# Here's an example of adding a new field:
+
+add_skeleton_field("dens", function=NullFunc, take_log=True,
+                convert_function=_get_convert("dens"),
+                units=r"\rm{g}/\rm{cm}^3")


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/io.py
--- /dev/null
+++ b/yt/frontends/_skeleton/io.py
@@ -0,0 +1,44 @@
+"""
+Skeleton-specific IO functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import h5py
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerSkeleton(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "skeleton"
+
+    def _read_data_set(self, grid, field):
+        # This must return the array, of size/shape grid.ActiveDimensions, that
+        # corresponds to 'field'.
+        pass
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        # If this is not implemented, the IO handler will just slice a
+        # _read_data_set item.
+        pass




diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/_skeleton/setup.py
--- /dev/null
+++ b/yt/frontends/_skeleton/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('skeleton', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -101,18 +101,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -89,17 +89,8 @@
         return self.start_index
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
@@ -176,11 +167,13 @@
         # 'Chombo_global'
         levels = f.keys()[1:]
         grids = []
+        self.dds_list = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
             boxes = f[lev]['boxes'].value
             dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -102,17 +102,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -100,18 +100,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz here.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE - LE) / self.ActiveDimensions)
-
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -99,17 +99,11 @@
             self.Parent = None
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if self.Parent is not None:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/testing.py
--- /dev/null
+++ b/yt/testing.py
@@ -0,0 +1,143 @@
+"""Provides utility and helper functions for testing in yt.
+
+Author: Anthony Scpatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Anthony Scopatz.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from yt.funcs import *
+from numpy.testing import assert_array_equal
+
+def amrspace(extent, levels=7, cells=8):
+    """Creates two numpy arrays representing the left and right bounds of 
+    an AMR grid as well as an array for the AMR level of each cell.
+
+    Parameters
+    ----------
+    extent : array-like
+        This a sequence of length 2*ndims that is the bounds of each dimension.
+        For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
+        A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
+    levels : int or sequence of ints, optional
+        This is the number of AMR refinement levels.  If given as a sequence (of
+        length ndims), then each dimension will be refined down to this level.
+        All values in this array must be the same or zero.  A zero valued dimension
+        indicates that this dim should not be refined.  Taking the 3D cylindrical
+        example above if we don't want refine theta but want r and z at 5 we would 
+        set levels=(5, 5, 0).
+    cells : int, optional
+        This is the number of cells per refinement level.
+
+    Returns
+    -------
+    left : float ndarray, shape=(npoints, ndims)
+        The left AMR grid points.
+    right : float ndarray, shape=(npoints, ndims)
+        The right AMR grid points.
+    level : int ndarray, shape=(npoints,)
+        The AMR level for each point.
+
+    Examples
+    --------
+    >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
+    >>> print l
+    [[ 0.     1.     0.   ]
+     [ 0.25   1.     0.   ]
+     [ 0.     1.125  0.   ]
+     [ 0.25   1.125  0.   ]
+     [ 0.5    1.     0.   ]
+     [ 0.     1.25   0.   ]
+     [ 0.5    1.25   0.   ]
+     [ 1.     1.     0.   ]
+     [ 0.     1.5    0.   ]
+     [ 1.     1.5    0.   ]]
+
+    """
+    extent = np.asarray(extent, dtype='f8')
+    dextent = extent[1::2] - extent[::2]
+    ndims = len(dextent)
+
+    if isinstance(levels, int):
+        minlvl = maxlvl = levels
+        levels = np.array([levels]*ndims, dtype='int32')
+    else:
+        levels = np.asarray(levels, dtype='int32')
+        minlvl = levels.min()
+        maxlvl = levels.max()
+        if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
+            raise ValueError("all levels must have the same value or zero.")
+    dims_zero = (levels == 0)
+    dims_nonzero = ~dims_zero
+    ndims_nonzero = dims_nonzero.sum()
+
+    npoints = (cells**ndims_nonzero - 1)*maxlvl + 1
+    left = np.empty((npoints, ndims), dtype='float64')
+    right = np.empty((npoints, ndims), dtype='float64')
+    level = np.empty(npoints, dtype='int32')
+
+    # fill zero dims
+    left[:,dims_zero] = extent[::2][dims_zero]
+    right[:,dims_zero] = extent[1::2][dims_zero]
+
+    # fill non-zero dims
+    dcell = 1.0 / cells
+    left_slice =  tuple([slice(extent[2*n], extent[2*n+1], extent[2*n+1]) if \
+        dims_zero[n] else slice(0.0,1.0,dcell) for n in range(ndims)])
+    right_slice = tuple([slice(extent[2*n+1], extent[2*n], -extent[2*n+1]) if \
+        dims_zero[n] else slice(dcell,1.0+dcell,dcell) for n in range(ndims)])
+    left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
+    lng_zero = left_norm_grid[:,dims_zero]
+    lng_nonzero = left_norm_grid[:,dims_nonzero]
+
+    right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
+    rng_zero = right_norm_grid[:,dims_zero]
+    rng_nonzero = right_norm_grid[:,dims_nonzero]
+
+    level[0] = maxlvl
+    left[0,:] = extent[::2]
+    right[0,dims_zero] = extent[1::2][dims_zero]
+    right[0,dims_nonzero] = (dcell**maxlvl)*dextent[dims_nonzero] + extent[::2][dims_nonzero]
+    for i, lvl in enumerate(range(maxlvl, 0, -1)):
+        start = (cells**ndims_nonzero - 1)*i + 1
+        stop = (cells**ndims_nonzero - 1)*(i+1) + 1
+        dsize = dcell**(lvl-1) * dextent[dims_nonzero]
+        level[start:stop] = lvl
+        left[start:stop,dims_zero] = lng_zero
+        left[start:stop,dims_nonzero] = lng_nonzero*dsize + extent[::2][dims_nonzero]
+        right[start:stop,dims_zero] = rng_zero
+        right[start:stop,dims_nonzero] = rng_nonzero*dsize + extent[::2][dims_nonzero]
+
+    return left, right, level
+
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+    from yt.frontends.stream.api import load_uniform_grid
+    if not iterable(ndims):
+        ndims = [ndims, ndims, ndims]
+    else:
+        assert(len(ndims) == 3)
+    if negative:
+        offset = 0.5
+    else:
+        offset = 0.0
+    data = dict((field, (np.random.random(ndims) - offset) * peak_value)
+                 for field in fields)
+    ug = load_uniform_grid(data, ndims, 1.0)
+    return ug


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/flagging_methods.py
--- /dev/null
+++ b/yt/utilities/flagging_methods.py
@@ -0,0 +1,51 @@
+"""
+Utilities for flagging zones for refinement in a dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np # For modern purposes
+
+flagging_method_registry = {}
+
+def flag_cells(grid, methods):
+    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+    for method in methods:
+        flagged |= method(grid)
+    return flagged
+
+class FlaggingMethod(object):
+    _skip_add = False
+    class __metaclass__(type):
+        def __init__(cls, name, b, d):
+            type.__init__(cls, name, b, d)
+            if hasattr(cls, "_type_name") and not cls._skip_add:
+                flagging_method_registry[cls._type_name] = cls
+
+class OverDensity(FlaggingMethod):
+    _type_name = "overdensity"
+    def __init__(self, over_density):
+        self.over_density = over_density
+
+    def __call__(self, pf, grid):
+        rho = grid["Density"] / (pf.refine_by**grid.Level)
+        return (rho > self.over_density)


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -53,8 +53,8 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_and_seek(char *filename, int offset1, int offset2,
-                  np.ndarray buffer, int bytes):
+def read_and_seek(char *filename, np.int64_t offset1,
+                  np.int64_t offset2, np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
     cdef char line[1024]


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -67,6 +67,8 @@
             t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
             east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
@@ -82,7 +84,7 @@
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a an orientable object.
+        to an orientable object.
 
         Parameters
         ----------


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/tests/test_flagging_methods.py
--- /dev/null
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -0,0 +1,12 @@
+from yt.testing import *
+from yt.utilities.flagging_methods import flagging_method_registry
+
+def setup():
+    global pf
+    pf = fake_random_pf(64)
+    pf.h
+
+def test_over_density():
+    od_flag = flagging_method_registry["overdensity"](0.75) 
+    criterion = (pf.h.grids[0]["Density"] > 0.75)
+    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/utilities/tests/test_interpolators.py
--- /dev/null
+++ b/yt/utilities/tests/test_interpolators.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+import yt.utilities.linear_interpolators as lin
+
+def setup():
+    pass
+
+def test_linear_interpolator_1d():
+    random_data = np.random.random(64)
+    fv = {'x': np.mgrid[0.0:1.0:64j]}
+    ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
+    assert_array_equal(ufi(fv), random_data)
+
+def test_linear_interpolator_2d():
+    random_data = np.random.random((64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0), "xy", True)
+    assert_array_equal(bfi(fv), random_data)
+
+def test_linear_interpolator_3d():
+    random_data = np.random.random((64, 64, 64))
+    fv = dict((ax, v) for ax, v in zip("xyz",
+               np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
+    assert_array_equal(tfi(fv), random_data)


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -872,11 +872,15 @@
         return names
 
     def _send_zmq(self):
-        from IPython.zmq.pylab.backend_inline import \
-                    send_figure
+        try:
+            # pre-IPython v0.14        
+            from IPython.zmq.pylab.backend_inline import send_figure as display
+        except ImportError:
+            # IPython v0.14+ 
+            from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
-            send_figure(v.figure)
+            display(v.figure)
 
     def show(self):
         r"""This will send any existing plots to the IPython notebook.


diff -r 79743892acbc5d49bbc5067db8077e349a745e1f -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -195,7 +195,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self.rotation_vector = self.orienter.north_vector
+        self.rotation_vector = self.orienter.unit_vectors[1]
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -282,7 +282,7 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.orienter.north_vector
+            north_vector = self.orienter.unit_vectors[1]
         if normal_vector is None:
             normal_vector = self.orienter.normal_vector
         self.orienter.switch_orientation(normal_vector = normal_vector,
@@ -587,7 +587,7 @@
         """
         rot_vector = self.orienter.normal_vector
         R = get_rotation_matrix(theta, rot_vector)
-        north_vector = self.orienter.north_vector
+        north_vector = self.orienter.unit_vectors[1]
         self.switch_view(north_vector=np.dot(R, north_vector))
 
     def rotation(self, theta, n_steps, rot_vector=None, clip_ratio = None):



https://bitbucket.org/yt_analysis/yt/changeset/4cb47cdf4520/
changeset:   4cb47cdf4520
branch:      yt
user:        ngoldbaum
date:        2012-09-27 21:03:32
summary:     Some bugfixes for OffAxisProjectionPlot
affected #:  2 files

diff -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1,3 +1,4 @@
+
 """
 A plotting mechanism based on the idea of a "window" into the data.
 
@@ -184,15 +185,14 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
 
-    # Transforming to the cutting plane coordinate system
-    center = np.array(center)
-    center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
-    (normal,perp1,perp2) = ortho_find(normal)
-    mat = np.transpose(np.column_stack((perp1,perp2,normal)))
-    center = np.dot(mat,center)
-    width = width
+    if width.shape == (2,):
+        # Transforming to the cutting plane coordinate system
+        center = np.array(center)
+        center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
+        (normal,perp1,perp2) = ortho_find(normal)
+        mat = np.transpose(np.column_stack((perp1,perp2,normal)))
+        center = np.dot(mat,center)
     
-    if width.shape == (2,):
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     else:
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
@@ -270,12 +270,13 @@
                                                      bounds, self.buff_size, 
                                                      self.antialias, 
                                                      periodic=self._periodic)
-        elif 'Projection' in class_name or 'Slice' in class_name:
+        elif 'AMRQuadTreeProj' in class_name or 'Slice' in class_name:
             self._frb = FixedResolutionBuffer(self.data_source, 
                                               bounds, self.buff_size, 
                                               self.antialias, 
                                               periodic=self._periodic)
         else:
+            pdb.set_trace()
             raise RuntimeError("Failed to repixelize.")
         if old_fields is None:
             self._frb._get_data_source_fields()
@@ -367,7 +368,7 @@
             mylog.info("ylim = %f %f" %self.ylim)
         else:
             self.xlim = bounds[0:2]
-            self.ylim = bounds[2:]
+            self.ylim = bounds[2:4]
             
     @invalidate_data
     def set_width(self, width, unit = '1'):
@@ -1121,8 +1122,9 @@
             image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
                                         ds.width, ds.resolution, item,
                                         weight=ds.weight_field, volume=ds.volume,
-                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated)
-            self.internal_dict[item] = image
+                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                        north_vector=ds.north_vector)
+            self.internal_dict[item] = image.T
         return image
     
     def _get_data_source_fields(self):


diff -r 5e0edcf60e2b0d1f073478f0d5ec89405310ee8f -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1666,8 +1666,8 @@
 data_object_registry["projection_camera"] = ProjectionCamera
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, 
-                        volume = None, no_ghost = False, interpolated = False):
+                        field, weight=None, volume=None, no_ghost=False, 
+                        interpolated=False, north_vector=None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1707,10 +1707,14 @@
         accuracy/smoothness in resulting image.  The effects are
         less notable when the transfer function is smooth and
         broad. Default: True
-    interpolated : optional, default False
+    interpolated : bool, optional
         If True, the data is first interpolated to vertex-centered data, 
         then tri-linearly interpolated along the ray. Not suggested for 
         quantitative studies.
+    north_vector : array-like, optional
+        A vector defining the 'up' direction in the plot.  This                                                                  
+        option sets the orientation of the slicing plane.  If not                                                                
+        set, an arbitrary grid-aligned north-vector is chosen.              
 
     Returns
     -------
@@ -1726,8 +1730,9 @@
 
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
-            field, weight=weight, pf=pf, volume=volume,
-            no_ghost=no_ghost, interpolated=interpolated)
+                               field, weight=weight, pf=pf, volume=volume,
+                               no_ghost=no_ghost, interpolated=interpolated,
+                               north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:
         pf.field_info.pop("temp_weightfield")



https://bitbucket.org/yt_analysis/yt/changeset/f77fd14bdaa7/
changeset:   f77fd14bdaa7
branch:      yt
user:        jzuhone
date:        2012-10-05 00:47:02
summary:     Merging
affected #:  1 file

diff -r 4cb47cdf45206350ccf754d76d8a78e9b01eaeaf -r f77fd14bdaa7190ce4b3677ced28eeb0749d668a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -753,8 +753,8 @@
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                labels = [r'$\rm{x'+axes_unit_label+'}$',
+                          r'$\rm{y'+axes_unit_label+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])



https://bitbucket.org/yt_analysis/yt/changeset/d5228350c515/
changeset:   d5228350c515
branch:      yt
user:        jzuhone
date:        2012-10-06 20:54:46
summary:     Support for 1D and 2D Athena datasets.
affected #:  1 file

diff -r f77fd14bdaa7190ce4b3677ced28eeb0749d668a -r d5228350c51505e38a21defe051d28ca8a6e66d0 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,12 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +339,9 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        print ND
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0



https://bitbucket.org/yt_analysis/yt/changeset/ae18bc654a02/
changeset:   ae18bc654a02
branch:      yt
user:        jzuhone
date:        2012-10-06 21:39:59
summary:     Merging
affected #:  13 files

diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -400,7 +400,7 @@
 
 echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
@@ -429,7 +429,7 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.1.0.tar.gz
 get_ytproject mercurial-2.2.2.tar.gz
@@ -554,11 +554,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -65,6 +65,9 @@
     quantity_info, \
     add_quantity
 
+from image_array import \
+    ImageArray
+
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/image_array.py
--- /dev/null
+++ b/yt/data_objects/image_array.py
@@ -0,0 +1,271 @@
+"""
+ImageArray Class
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+    Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  """
+
+import numpy as np
+import h5py as h5
+from yt.visualization.image_writer import write_bitmap, write_image
+
+class ImageArray(np.ndarray):
+    r"""A custom Numpy ndarray used for images.
+
+    This differs from ndarray in that you can optionally specify an
+    info dictionary which is used later in saving, and can be accessed with
+    ImageArray.info.
+
+    Parameters
+    ----------
+    input_array: array_like
+        A numpy ndarray, or list. 
+
+    Other Parameters
+    ----------------
+    info: dictionary
+        Contains information to be stored with image.
+
+    Returns
+    -------
+    obj: ImageArray object 
+
+    Raises
+    ------
+    None
+
+    See Also
+    --------
+    numpy.ndarray : Inherits
+
+    Notes
+    -----
+
+    References
+    ----------
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector. 
+
+    >>> im = np.zeros([64,128,3])
+    >>> for i in xrange(im.shape[0]):
+    >>>     for k in xrange(im.shape[2]):
+    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    >>> im_arr = ImageArray(im, info=myinfo)
+    >>> im_arr.save('test_ImageArray')
+
+    Numpy ndarray documentation appended:
+
+    """
+    def __new__(cls, input_array, info=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        if info is None:
+            info = {}
+        obj.info = info
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self, obj):
+        # see InfoArray.__array_finalize__ for comments
+        if obj is None: return
+        self.info = getattr(obj, 'info', None)
+
+    def write_hdf5(self, filename):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_hdf5('test_ImageArray.h5')
+
+        """
+        array_name = self.info.get("name","image")
+
+        f = h5.File(filename)
+        if array_name in f.keys():
+            del f[array_name]
+        d = f.create_dataset(array_name, data=self)
+        for k, v in self.info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    def write_png(self, filename, clip_ratio=None):
+        r"""Writes ImageArray to png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128,3])
+        >>> for i in xrange(im.shape[0]):
+        >>>     for k in xrange(im.shape[2]):
+        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_png('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if clip_ratio is not None:
+            return write_bitmap(self.swapaxes(0, 1), filename,
+                                clip_ratio * self.std())
+        else:
+            return write_bitmap(self.swapaxes(0, 1), filename)
+
+    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+        r"""Writes a single channel of the ImageArray to a png file.
+
+        Parameters
+        ----------
+        filename: string
+            Note filename not be modified.
+       
+        Other Parameters
+        ----------------
+        channel: int
+            Which channel to write out as an image. Defaults to 0
+        cmap_name: string
+            Name of the colormap to be used.
+        color_bounds : tuple of floats, optional
+            The min and max to scale between.  Outlying values will be clipped.
+        cmap_name : string, optional
+            An acceptable colormap.  See either yt.visualization.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+        func : function, optional
+            A function to transform the buffer before applying a colormap. 
+
+        Returns
+        -------
+        scaled_image : uint8 image that has been saved
+        
+        Examples
+        --------
+        
+        >>> im = np.zeros([64,128])
+        >>> for i in xrange(im.shape[0]):
+        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+
+        >>> im_arr = ImageArray(im, info=myinfo)
+        >>> im_arr.write_image('test_ImageArray.png')
+
+        """
+        if filename[-4:] != '.png': 
+            filename += '.png'
+
+        if channel is None:
+            return write_image(self.swapaxes(0,1), filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+        else:
+            return write_image(self.swapaxes(0,1)[:,:,channel], filename, 
+                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               func=func)
+
+    def save(self, filename, png=True, hdf5=True):
+        """
+        Saves ImageArray. 
+
+        Arguments:
+          filename: string
+            This should not contain the extension type (.png, .h5, ...)
+
+        Optional Arguments:
+          png: boolean, default True
+            Save to a png
+
+          hdf5: boolean, default True
+            Save to hdf5 file, including info dictionary as attributes.
+
+        """
+        if png:
+            if len(self.shape) > 2:
+                self.write_png("%s.png" % filename)
+            else:
+                self.write_image("%s.png" % filename)
+        if hdf5:
+            self.write_hdf5("%s.h5" % filename)
+
+    __doc__ += np.ndarray.__doc__
+
+if __name__ == "__main__":
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -938,6 +938,25 @@
           validators=[ValidateParameter("cp_%s_vec" % ax)
                       for ax in 'xyz'], units=r"\rm{km}/\rm{s}")
 
+def _CuttingPlaneBx(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(x_vec, b_vec)
+add_field("CuttingPlaneBx", 
+          function=_CuttingPlaneBx,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+def _CuttingPlaneBy(field, data):
+    x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
+                           for ax in 'xyz']
+    b_vec = np.array([data["B%s" % ax] for ax in 'xyz'])
+    return np.dot(y_vec, b_vec)
+add_field("CuttingPlaneBy", 
+          function=_CuttingPlaneBy,
+          validators=[ValidateParameter("cp_%s_vec" % ax)
+                      for ax in 'xyz'], units=r"\rm{Gauss}")
+
 def _MeanMolecularWeight(field,data):
     return (data["Density"] / (mh *data["NumberDensity"]))
 add_field("MeanMolecularWeight",function=_MeanMolecularWeight,units=r"")
@@ -975,12 +994,37 @@
     units of Gauss. If you use MKS, make sure to write your own
     MagneticEnergy field to deal with non-unitary \mu_0.
     """
-    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/(8*np.pi)
 add_field("MagneticEnergy",function=_MagneticEnergy,
-          units=r"",
-          validators = [ValidateDataField("Bx"),
-                        ValidateDataField("By"),
-                        ValidateDataField("Bz")])
+          units=r"\rm{ergs}\/\rm{cm}^{-3}",
+          display_name=r"\rm{Magnetic}\/\rm{Energy}")
+
+def _BMagnitude(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    BMagnitude field to deal with non-unitary \mu_0.
+    """
+    return np.sqrt((data["Bx"]**2 + data["By"]**2 + data["Bz"]**2))
+add_field("BMagnitude",
+          function=_BMagnitude,
+          display_name=r"|B|", units=r"\rm{Gauss}")
+
+def _PlasmaBeta(field,data):
+    """This assumes that your front end has provided Bx, By, Bz in
+    units of Gauss. If you use MKS, make sure to write your own
+    PlasmaBeta field to deal with non-unitary \mu_0.
+    """
+    return data['Pressure']/data['MagneticEnergy']
+add_field("PlasmaBeta",
+          function=_PlasmaBeta,
+          display_name=r"\rm{Plasma}\/\beta", units="")
+
+def _MagneticPressure(field,data):
+    return data['MagneticEnergy']
+add_field("MagneticPressure",
+          function=_MagneticPressure,
+          display_name=r"\rm{Magnetic}\/\rm{Energy}",
+          units="\rm{ergs}\/\rm{cm}^{-3}")
 
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -98,6 +98,21 @@
 add_field("Density",function=_Density, take_log=True,
           units=r'\rm{g}/\rm{cm^3}')
 
+def _Bx(field,data):
+    return data["X-magnfield"]
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(field,data):
+    return data["Y-magnfield"]
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(field,data):
+    return data["Z-magnfield"]
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +
             data["Y-magnfield"]**2 +


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -97,7 +98,10 @@
     if fn1.endswith("_Fraction"):
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
-                  display_name="%s\/Density" % fn1.split("_")[0])
+                  display_name="%s\/Density" % fn1.split("_")[0],
+                  units = r"\rm{g}/\rm{cm}^3",
+                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  )
 
 def _get_convert(fname):
     def _conv(data):
@@ -106,7 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^3",
+                projected_units = r"\rm{g}/\rm{cm}^2"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -203,6 +208,7 @@
     add_field(f, TranslationFunc(v),
               take_log=KnownFLASHFields[v].take_log,
               units = ff._units, display_name=dname,
+              projected_units = ff._projected_units,
               particle_type = pfield)
 
 def _convertParticleMassMsun(data):
@@ -254,3 +260,43 @@
 
 add_field("GasEnergy", function=_GasEnergy, 
           units=r"\rm{ergs}/\rm{g}")
+
+# See http://flash.uchicago.edu/pipermail/flash-users/2012-October/001180.html
+# along with the attachment to that e-mail for details
+def GetMagRescalingFactor(pf):
+    if pf['unitsystem'].lower() == "cgs":
+         factor = 1
+    if pf['unitsystem'].lower() == "si":
+         factor = np.sqrt(4*np.pi/1e7)
+    if pf['unitsystem'].lower() == "none":
+         factor = np.sqrt(4*np.pi)
+    else:
+        raise RuntimeError("Runtime parameter unitsystem with"
+                           "value %s is unrecognized" % pf['unitsystem'])
+    return factor
+
+def _Bx(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magx']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magy']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['magz']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+
+def _DivB(fields, data):
+    factor = GetMagRescalingFactor(data.pf)
+    return data['divb']*factor
+add_field("DivB", function=_DivB, take_log=False,
+          units=r"\rm{Gauss}\/\rm{cm}^{-1}")
+
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -62,7 +62,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection
+    ParticleTrajectoryCollection, ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -590,7 +590,7 @@
         cdef np.float64_t *pointer = <np.float64_t *> star_colors.data
         for i in range(pos_x.shape[0]):
             kdtree_utils.kd_insert3(self.tree,
-                pos_x[i], pos_y[i], pos_z[i], pointer + i*3)
+                pos_x[i], pos_y[i], pos_z[i], <void *> (pointer + i*3))
 
     def __dealloc__(self):
         kdtree_utils.kd_free(self.tree)
@@ -616,7 +616,7 @@
     cdef np.float64_t slopes[6], dp[3], ds[3]
     cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
     cdef np.float64_t dvs[6], cell_left[3], local_dds[3], pos[3]
-    cdef int nstars
+    cdef int nstars, dti, i, j
     cdef np.float64_t *colors = NULL, gexp, gaussian, px, py, pz
     for i in range(3):
         dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
@@ -648,6 +648,7 @@
         dvs[i] = temp
     for dti in range(vri.n_samples): 
         # Now we add the contribution from stars
+        kdtree_utils.kd_res_rewind(ballq)
         for i in range(nstars):
             kdtree_utils.kd_res_item3(ballq, &px, &py, &pz)
             colors = <np.float64_t *> kdtree_utils.kd_res_item_data(ballq)
@@ -655,20 +656,22 @@
             gexp = (px - pos[0])*(px - pos[0]) \
                  + (py - pos[1])*(py - pos[1]) \
                  + (pz - pos[2])*(pz - pos[2])
-            gaussian = vri.star_coeff * expl(-gexp/vri.star_sigma_num)
-            for i in range(3): im.rgba[i] += gaussian*dt*colors[i]
+            gaussian = vri.star_coeff * exp(-gexp/vri.star_sigma_num)
+            for j in range(3): im.rgba[j] += gaussian*dt*colors[j]
         for i in range(3):
             pos[i] += local_dds[i]
         FIT_eval_transfer(dt, dvs, im.rgba, vri.n_fits, vri.fits,
                           vri.field_table_ids, vri.grey_opacity)
         for i in range(vc.n_fields):
             dvs[i] += slopes[i]
+    kdtree_utils.kd_res_free(ballq)
 
 cdef class VolumeRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
     cdef public object tf_obj
     cdef public object my_field_tables
     cdef kdtree_utils.kdtree **trees
+    cdef object tree_containers
     def __cinit__(self, 
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,
@@ -709,6 +712,7 @@
             self.vra.field_table_ids[i] = tf_obj.field_table_ids[i]
         self.supp_data = <void *> self.vra
         cdef star_kdtree_container skdc
+        self.tree_containers = star_list
         if star_list is None:
             self.trees = NULL
         else:
@@ -719,10 +723,15 @@
                 self.trees[i] = skdc.tree
 
     cdef void setup(self, PartitionedGrid pg):
+        cdef star_kdtree_container star_tree
         if self.trees == NULL:
             self.sampler = volume_render_sampler
         else:
+            star_tree = self.tree_containers[pg.parent_grid_id]
             self.vra.star_list = self.trees[pg.parent_grid_id]
+            self.vra.star_sigma_num = 2.0*star_tree.sigma**2.0
+            self.vra.star_er = 2.326 * star_tree.sigma
+            self.vra.star_coeff = star_tree.coeff
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -28,6 +28,7 @@
     x_dict, \
     y_dict, \
     axis_names
+from .volume_rendering.api import off_axis_projection
 import _MPL
 import numpy as np
 import weakref
@@ -384,3 +385,28 @@
                                self.bounds).transpose()
         self[item] = buff
         return buff
+
+
+class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
+    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
+                 periodic = False):
+        self.data = {}
+        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
+
+    def __getitem__(self, item):
+        if item in self.data: return self.data[item]
+        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+            (item, self.buff_size[0], self.buff_size[1]))
+        ds = self.data_source
+        width = (self.bounds[1] - self.bounds[0],
+                 self.bounds[3] - self.bounds[2],
+                 self.bounds[5] - self.bounds[4])
+        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+                                   width, ds.resolution, item,
+                                   weight=ds.weight_field, volume=ds.volume,
+                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+                                   north_vector=ds.north_vector)
+        self[item] = buff.swapaxes(0,1)
+        return buff
+
+


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -116,7 +116,7 @@
     image = image.transpose().copy() # Have to make sure it's contiguous 
     au.write_png(image, fn)
 
-def write_bitmap(bitmap_array, filename, max_val = None, transpose=True):
+def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
     r"""Write out a bitmapped image directly to a PNG file.
 
     This accepts a three- or four-channel `bitmap_array`.  If the image is not
@@ -152,8 +152,7 @@
         alpha_channel = 255*np.ones((s1,s2,1), dtype='uint8')
         bitmap_array = np.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
-        for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
+        bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
         au.write_png(bitmap_array.copy(), filename)
     else:


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -146,7 +146,9 @@
     def __call__(self, plot):
         # Instantiation of these is cheap
         if plot._type_name == "CuttingPlane":
-            print "WARNING: Magnetic field on Cutting Plane Not implemented."
+            qcb = CuttingQuiverCallback("CuttingPlaneBx",
+                                        "CuttingPlaneBy",
+                                        self.factor)
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
@@ -432,6 +434,9 @@
             iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
             lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
             lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        # scale into data units
+        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
+        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
         for i in range(self.data_size[0]):
             for j in range(self.data_size[1]):
                 plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
@@ -650,8 +655,8 @@
                                plot.data[self.field_y],
                                int(nx), int(ny),
                                (x0, x1, y0, y1),).transpose()
-        X = np.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = np.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
         plot._axes.quiver(X,Y, pixX, pixY)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
@@ -723,9 +728,13 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_plot(plot, self.pos)
+        x, y = self.convert_to_plot(plot, pos)
         dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
@@ -745,12 +754,13 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-
-
+        if len(self.pos) == 3:
+            pos = (self.pos[x_dict[plot.data.axis]],
+                   self.pos[y_dict[plot.data.axis]])
+        else: pos = self.pos
         width,height = plot.image._A.shape
-        x,y = self.convert_to_plot(plot, self.pos)
-        x,y = x/width,y/height
-
+        x,y = self.convert_to_plot(plot, pos)
+        
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1,4 +1,3 @@
-
 """
 A plotting mechanism based on the idea of a "window" into the data.
 
@@ -44,11 +43,11 @@
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer
+    ObliqueFixedResolutionBuffer, \
+    OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
-from .volume_rendering.api import off_axis_projection
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
 
@@ -108,7 +107,10 @@
         self.pf = frb.pf
         self.xlim = viewer.xlim
         self.ylim = viewer.ylim
-        self._type_name = ''
+        if 'Cutting' in self.data.__class__.__name__:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = ''
 
 class FieldTransform(object):
     def __init__(self, name, func, locator):
@@ -192,6 +194,7 @@
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
+        width = width
     
         bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
     else:
@@ -258,26 +261,14 @@
         old_fields = None
         if self._frb is not None:
             old_fields = self._frb.keys()
-        bounds = self.xlim+self.ylim
-        class_name = self.data_source.__class__.__name__
-        if 'OffAxisProjection' in class_name:
-            self._frb = OffAxisProjectionDummyFRB(self.data_source,
-                                                  bounds, self.buff_size,
-                                                  self.antialias,
-                                                  periodic=self._periodic)
-        elif 'Cutting' in class_name:
-            self._frb = ObliqueFixedResolutionBuffer(self.data_source, 
-                                                     bounds, self.buff_size, 
-                                                     self.antialias, 
-                                                     periodic=self._periodic)
-        elif 'AMRQuadTreeProj' in class_name or 'Slice' in class_name:
-            self._frb = FixedResolutionBuffer(self.data_source, 
-                                              bounds, self.buff_size, 
-                                              self.antialias, 
-                                              periodic=self._periodic)
+        if hasattr(self,'zlim'):
+            bounds = self.xlim+self.ylim+self.zlim
         else:
-            pdb.set_trace()
-            raise RuntimeError("Failed to repixelize.")
+            bounds = self.xlim+self.ylim
+        self._frb = self._frb_generator(self.data_source,
+                                        bounds, self.buff_size,
+                                        self.antialias,
+                                        periodic=self._periodic)
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:
@@ -318,6 +309,7 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
+                    
 
     @invalidate_data
     def pan(self, deltas):
@@ -364,12 +356,16 @@
             dy = bounds[3] - bounds[2]
             self.xlim = (self.center[0] - dx/2., self.center[0] + dx/2.)
             self.ylim = (self.center[1] - dy/2., self.center[1] + dy/2.)
-            mylog.info("xlim = %f %f" %self.xlim)
-            mylog.info("ylim = %f %f" %self.ylim)
         else:
-            self.xlim = bounds[0:2]
-            self.ylim = bounds[2:4]
-            
+            self.xlim = tuple(bounds[0:2])
+            self.ylim = tuple(bounds[2:4])
+            if len(bounds) == 6:
+                self.zlim = tuple(bounds[4:6])
+        mylog.info("xlim = %f %f" %self.xlim)
+        mylog.info("ylim = %f %f" %self.ylim)
+        if hasattr(self,'zlim'):
+            mylog.info("zlim = %f %f" %self.zlim)
+
     @invalidate_data
     def set_width(self, width, unit = '1'):
         """set the width of the plot window
@@ -415,14 +411,20 @@
         width = (Wx,Wy)
         width = [w / self.pf[unit] for w in width]
 
-        centerx = (self.xlim[1] + self.xlim[0])/2 
-        centery = (self.ylim[1] + self.ylim[0])/2 
+        centerx = (self.xlim[1] + self.xlim[0])/2.
+        centery = (self.ylim[1] + self.ylim[0])/2. 
         
         self.xlim = (centerx - width[0]/2.,
                      centerx + width[0]/2.)
         self.ylim = (centery - width[1]/2.,
                      centery + width[1]/2.)
         
+        if hasattr(self,'zlim'):
+            centerz = (self.zlim[1] + self.zlim[0])/2.
+            mw = max(width)
+            self.zlim = (centerz - mw/2.,
+                         centerz + mw/2.)
+        
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -753,8 +755,8 @@
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
                         axes_unit_label + r'}$' for i in (0,1)]
             else:
-                labels = [r'$\rm{x'+axes_unit_label+'}$',
-                          r'$\rm{y'+axes_unit_label+'}$']
+                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
+                          r'$\rm{Image\/y'+axes_unit_label+'}$']
 
             self.plots[f].axes.set_xlabel(labels[0])
             self.plots[f].axes.set_ylabel(labels[1])
@@ -850,16 +852,9 @@
         if mpl_kwargs is None: mpl_kwargs = {}
         axis = axis_names[self.data_source.axis]
         weight = None
-        if 'Slice' in self.data_source.__class__.__name__:
-            type = 'Slice'
-        if 'Proj' in self.data_source.__class__.__name__:
-            if 'OffAxis' in self.data_source.__class__.__name__:
-                type = 'OffAxisProjection'
-            else:
-                type = 'Projection'
+        type = self._plot_type
+        if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        if 'Cutting' in self.data_source.__class__.__name__:
-            type = 'OffAxisSlice'
         names = []
         for k, v in self.plots.iteritems():
             if axis:
@@ -907,6 +902,9 @@
             raise YTNotInsideNotebook
 
 class SlicePlot(PWViewerMPL):
+    _plot_type = 'Slice'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window'):
         r"""Creates a slice plot from a parameter file
@@ -982,6 +980,9 @@
         self.set_axes_unit(axes_unit)
 
 class ProjectionPlot(PWViewerMPL):
+    _plot_type = 'Projection'
+    _frb_generator = FixedResolutionBuffer
+
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window'):
         r"""Creates a projection plot from a parameter file
@@ -1061,6 +1062,9 @@
         self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
+    _plot_type = 'OffAxisSlice'
+    _frb_generator = ObliqueFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  axes_unit=None, north_vector=None):
         r"""Creates an off axis slice plot from a parameter file
@@ -1108,32 +1112,10 @@
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
         self.set_axes_unit(axes_unit)
 
-class OffAxisProjectionDummyFRB(FixedResolutionBuffer):
-    def __init__(self, data_source, bounds, buff_size, antialias = True,                                                         
-                 periodic = False):
-        self.internal_dict = {}
-        FixedResolutionBuffer.__init__(self, data_source, bounds, buff_size, antialias, periodic)
-
-    def __getitem__(self, item):
-        try:
-            image = self.internal_dict[item]
-        except KeyError:
-            ds = self.data_source
-            image = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
-                                        ds.width, ds.resolution, item,
-                                        weight=ds.weight_field, volume=ds.volume,
-                                        no_ghost=ds.no_ghost, interpolated=ds.interpolated,
-                                        north_vector=ds.north_vector)
-            self.internal_dict[item] = image.T
-        return image
-    
-    def _get_data_source_fields(self):
-        for f in self.data_source.fields:
-            self[f] = None
-
 class OffAxisProjectionDummyDataSource(object):
     _type_name = 'proj'
     proj_style = 'integrate'
+    _key_fields = []
     def __init__(self, center, pf, normal_vector, width, fields, 
                  interpolated, resolution = (800,800), weight=None,  
                  volume=None, no_ghost=False, le=None, re=None, 
@@ -1154,6 +1136,9 @@
         self.north_vector = north_vector
 
 class OffAxisProjectionPlot(PWViewerMPL):
+    _plot_type = 'OffAxisProjection'
+    _frb_generator = OffAxisProjectionFixedResolutionBuffer
+
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
@@ -1204,7 +1189,6 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        self.OffAxisProjection = True
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
         # Hard-coding the resolution for now
         fields = ensure_list(fields)[:]


diff -r d5228350c51505e38a21defe051d28ca8a6e66d0 -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -37,6 +37,7 @@
     arr_ang2pix_nest, arr_fisheye_vectors
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.utilities.orientation import Orientation
+from yt.data_objects.api import ImageArray
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -301,7 +302,11 @@
                 np.array(self.width), self.transfer_function, self.sub_samples)
         return args
 
+    star_trees = None
     def get_sampler(self, args):
+        kwargs = {}
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
         if self.use_light:
             if self.light_dir is None:
                 self.set_default_light_dir()
@@ -312,9 +317,10 @@
             if self.light_rgba is None:
                 self.set_default_light_rgba()
             sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba)
+                    light_rgba=self.light_rgba, **kwargs)
         else:
-            sampler = self._sampler_object(*args)
+            sampler = self._sampler_object(*args, **kwargs)
+        print sampler, kwargs
         return sampler
 
     def finalize_image(self, image):
@@ -342,15 +348,21 @@
 
     def save_image(self, fn, clip_ratio, image):
         if self.comm.rank is 0 and fn is not None:
-            if clip_ratio is not None:
-                write_bitmap(image, fn, clip_ratio * image.std())
-            else:
-                write_bitmap(image, fn)
-
+            image.write_png(fn, clip_ratio=clip_ratio)
 
     def initialize_source(self):
         return self.volume.initialize_source()
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'east_vector':self.orienter.unit_vectors[0],
+                     'north_vector':self.orienter.unit_vectors[1],
+                     'normal_vector':self.orienter.unit_vectors[2],
+                     'width':self.width,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0):
         r"""Ray-cast the camera.
@@ -385,7 +397,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clip_ratio, image)
         return image
 
@@ -665,7 +679,7 @@
 class PerspectiveCamera(Camera):
     expand_factor = 1.0
     def __init__(self, *args, **kwargs):
-        expand_factor = kwargs.pop('expand_factor', 1.0)
+        self.expand_factor = kwargs.pop('expand_factor', 1.0)
         Camera.__init__(self, *args, **kwargs)
 
     def get_sampler_args(self, image):
@@ -704,6 +718,27 @@
                 self.transfer_function, self.sub_samples)
         return args
 
+    def _render(self, double_check, num_threads, image, sampler):
+        pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        if double_check:
+            for brick in self.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center
+        for brick in self.volume.traverse(view_pos, self.front_center, image):
+            sampler(brick, num_threads=num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+
+        pbar.finish()
+        image = sampler.aimage
+        self.finalize_image(image)
+        return image
+
+
     def finalize_image(self, image):
         image.shape = self.resolution[0], self.resolution[0], 3
 
@@ -720,6 +755,9 @@
     ], dtype='float64')
 
 class HEALpixCamera(Camera):
+
+    _sampler_object = None 
+    
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
@@ -733,6 +771,12 @@
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
+
+        if isinstance(self.transfer_function, ProjectionTransferFunction):
+            self._sampler_object = ProjectionSampler
+        else:
+            self._sampler_object = VolumeRenderSampler
+
         if fields is None: fields = ["Density"]
         self.fields = fields
         self.sub_samples = sub_samples
@@ -787,6 +831,15 @@
 
         return image
 
+    def get_information(self):
+        info_dict = {'fields':self.fields,
+                     'type':self.__class__.__name__,
+                     'center':self.center,
+                     'radius':self.radius,
+                     'dataset':self.pf.fullpath}
+        return info_dict
+
+
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
@@ -814,7 +867,9 @@
         args = self.get_sampler_args(image)
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
         self.save_image(fn, clim, image, label = label)
         return image
 
@@ -1250,8 +1305,9 @@
 
         if self.image is not None:
             del self.image
+        image = ImageArray(image,
+                           info=self.get_information())
         self.image = image
-       
         return image
 
     def save_image(self, fn, clip_ratio=None):
@@ -1656,7 +1712,9 @@
 
         self.initialize_source()
 
-        image = self._render(double_check, num_threads, image, sampler)
+        image = ImageArray(self._render(double_check, num_threads, 
+                                        image, sampler),
+                           info=self.get_information())
 
         self.save_image(fn, clip_ratio, image)
 
@@ -1666,8 +1724,9 @@
 data_object_registry["projection_camera"] = ProjectionCamera
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight=None, volume=None, no_ghost=False, 
-                        interpolated=False, north_vector=None):
+                        field, weight = None, 
+                        volume = None, no_ghost = False, interpolated = False,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1707,14 +1766,10 @@
         accuracy/smoothness in resulting image.  The effects are
         less notable when the transfer function is smooth and
         broad. Default: True
-    interpolated : bool, optional
+    interpolated : optional, default False
         If True, the data is first interpolated to vertex-centered data, 
         then tri-linearly interpolated along the ray. Not suggested for 
         quantitative studies.
-    north_vector : array-like, optional
-        A vector defining the 'up' direction in the plot.  This                                                                  
-        option sets the orientation of the slicing plane.  If not                                                                
-        set, an arbitrary grid-aligned north-vector is chosen.              
 
     Returns
     -------
@@ -1731,7 +1786,7 @@
     """
     projcam = ProjectionCamera(center, normal_vector, width, resolution,
                                field, weight=weight, pf=pf, volume=volume,
-                               no_ghost=no_ghost, interpolated=interpolated,
+                               no_ghost=no_ghost, interpolated=interpolated, 
                                north_vector=north_vector)
     image = projcam.snapshot()
     if weight is not None:



https://bitbucket.org/yt_analysis/yt/changeset/923899f7509b/
changeset:   923899f7509b
branch:      yt
user:        jzuhone
date:        2012-10-10 20:53:13
summary:     Updates to the Athena frontend for compatibility with 2D datasets and magnetic fields.

The FLASH frontend was missing the proton mass constant for a field definition.
affected #:  3 files

diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -340,7 +340,6 @@
         self.boundary_conditions = [1]*6
 
         ND = self.dimensionality
-        print ND
         self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \


diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r ae18bc654a0282f8a9bf1c550d8d8091d0774222 -r 923899f7509b566ba4193bfea7a5deb918e630fd yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 



https://bitbucket.org/yt_analysis/yt/changeset/5c39b3425b47/
changeset:   5c39b3425b47
branch:      yt
user:        jzuhone
date:        2012-11-09 22:21:01
summary:     Merging
affected #:  64 files

diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -220,11 +220,24 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
     fi
     if [ ! -z "${CFLAGS}" ]
     then


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
 try:
-    from yt.utilities.kdtree import \
+    from yt.utilities.kdtree.api import \
         chainHOP_tags_dens, \
         create_tree, fKD, find_nn_nearest_neighbors, \
         free_tree, find_chunk_nearest_neighbors


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -606,6 +606,7 @@
 
         if newProfile:
             mylog.info("Writing halo %d" % halo['id'])
+            if os.path.exists(filename): os.remove(filename)
             if filename.endswith('.h5'):
                 profile.write_out_h5(filename)
             else:
@@ -717,7 +718,9 @@
             Default=True.
         njobs : int
             The number of jobs over which to split the projections.  Set
-            to -1 so that each halo is done by a single processor.
+            to -1 so that each halo is done by a single processor.  Halo 
+            projections do not currently work in parallel, so this must 
+            be set to -1.
             Default: -1.
         dynamic : bool
             If True, distribute halos using a task queue.  If False,
@@ -731,6 +734,12 @@
 
         """
 
+        # Halo projections cannot run in parallel because they are done by 
+        # giving a data source to the projection object.
+        if njobs > 0:
+            mylog.warn("Halo projections cannot use more than one processor per halo, setting njobs to -1.")
+            njobs = -1
+        
         # Get list of halos for projecting.
         if halo_list == 'filtered':
             halo_projection_list = self.filtered_halos


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -30,7 +30,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree import *
+    from yt.utilities.kdtree.api import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,6 +38,7 @@
     inline = 'False',
     numthreads = '-1',
     __withinreason = 'False',
+    __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',
     __global_parallel_size = '1',
@@ -53,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')
@@ -237,6 +237,7 @@
     def __set_default_field_parameters(self):
         self.set_field_parameter("center",np.zeros(3,dtype='float64'))
         self.set_field_parameter("bulk_velocity",np.zeros(3,dtype='float64'))
+        self.set_field_parameter("normal",np.array([0,0,1],dtype='float64'))
 
     def _set_center(self, center):
         if center is None:
@@ -708,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -724,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -748,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -767,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask
@@ -855,6 +859,22 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
+    def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
+        axis = self.axis
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+                         plot_type=plot_type)
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
@@ -916,26 +936,6 @@
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-    def to_pw(self):
-        r"""Create a :class:`~yt.visualization.plot_window.PlotWindow` from this
-        object.
-
-        This is a bare-bones mechanism of creating a plot window from this
-        object, which can then be moved around, zoomed, and on and on.  All
-        behavior of the plot window is relegated to that routine.
-        """
-        axis = self.axis
-        center = self.get_field_parameter("center")
-        if center is None:
-            center = (self.pf.domain_right_edge
-                    + self.pf.domain_left_edge)/2.0
-        width = (1.0, 'unitary')
-        from yt.visualization.plot_window import \
-            PWViewerMPL, GetBoundsAndCenter
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
-        pw = PWViewerMPL(self, bounds)
-        return pw
-
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
         """
         This returns a uniform grid of points between *LE* and *RE*,
@@ -1193,6 +1193,18 @@
     def hub_upload(self):
         self._mrep.upload()
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Slice')
+        return pw
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1355,6 +1367,30 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        normal = self.normal
+        center = self.center
+        if fields == None:
+            if self.fields == None:
+                raise SyntaxError("The fields keyword argument must be set")
+        else:
+            self.fields = ensure_list(fields)
+        from yt.visualization.plot_window import \
+            GetOffAxisBoundsAndCenter, PWViewerMPL
+        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
+                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw.set_axes_unit(axes_unit)
+        return pw
+
     def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
@@ -1762,6 +1798,18 @@
             convs[:] = 1.0
         return dls, convs
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
@@ -2254,6 +2302,18 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+               origin='center-window'):
+        r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
+        object.
+
+        This is a bare-bones mechanism of creating a plot window from this
+        object, which can then be moved around, zoomed, and on and on.  All
+        behavior of the plot window is relegated to that routine.
+        """
+        pw = self._get_pw(fields, center, width, origin, axes_unit, 'Projection')
+        return pw
+
     def _project_grid(self, grid, fields, zero_out):
         # We split this next bit into two sections to try to limit the IO load
         # on the system.  This way, we perserve grid state (@restore_grid_state
@@ -3445,10 +3505,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3474,17 +3531,45 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x
@@ -3602,7 +3687,7 @@
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
+    _con_args = ('level', 'left_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
         """A 3D region with all data extracted to a single, specified
@@ -3629,8 +3714,9 @@
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = np.array(left_edge)
         self.level = level
-        self.dds = self.pf.h.select_grids(self.level)[0].dds.copy()
-        self.ActiveDimensions = np.array(dims,dtype='int32')
+        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+        self.dds = self.pf.domain_width/rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -598,16 +598,16 @@
                     continue
             else:
                 nz_filter = None
-            mins.append(data[field][nz_filter].min())
-            maxs.append(data[field][nz_filter].max())
+            mins.append(np.nanmin(data[field][nz_filter]))
+            maxs.append(np.nanmax(data[field][nz_filter]))
         else:
             if this_filter.any():
                 if non_zero:
                     nz_filter = ((this_filter) &
                                  (data[field][this_filter] > 0.0))
                 else: nz_filter = this_filter
-                mins.append(data[field][nz_filter].min())
-                maxs.append(data[field][nz_filter].max())
+                mins.append(np.nanmin(data[field][nz_filter]))
+                maxs.append(np.nanmax(data[field][nz_filter]))
             else:
                 mins.append(1e90)
                 maxs.append(-1e90)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):
@@ -160,7 +221,8 @@
             # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
-                pf.hubble_constant = pf.cosmological_simulation = 0.0
+                pf.cosmological_simulation = 0.0
+            pf.hubble_constant = 0.7
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_covering_grid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -0,0 +1,27 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            yield assert_equal, cg["Ones"].max(), 1.0
+            yield assert_equal, cg["Ones"].min(), 1.0
+            yield assert_equal, cg["CellVolume"].sum(), pf.domain_width.prod()
+            for g in pf.h.grids:
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_derived_quantities.py
--- /dev/null
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -0,0 +1,24 @@
+from yt.testing import *
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_extrema():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",
+                "x-velocity", "y-velocity", "z-velocity"))
+        sp = pf.h.sphere("c", (0.25, '1'))
+        (mi, ma), = sp.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(sp["Density"])
+        yield assert_equal, ma, np.nanmax(sp["Density"])
+        dd = pf.h.all_data()
+        (mi, ma), = dd.quantities["Extrema"]("Density")
+        yield assert_equal, mi, np.nanmin(dd["Density"])
+        yield assert_equal, ma, np.nanmax(dd["Density"])
+        sp = pf.h.sphere("max", (0.25, '1'))
+        yield assert_equal, np.any(np.isnan(sp["RadialVelocity"])), True
+        (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
+        yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
+        yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_fields.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fields.py
@@ -0,0 +1,91 @@
+from yt.testing import *
+import numpy as np
+from yt.data_objects.field_info_container import \
+    FieldInfo
+import yt.data_objects.universal_fields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+_sample_parameters = dict(
+    axis = 0,
+    center = np.array((0.0, 0.0, 0.0)),
+    bulk_velocity = np.array((0.0, 0.0, 0.0)),
+    normal = np.array((0.0, 0.0, 1.0)),
+    cp_x_vec = np.array((1.0, 0.0, 0.0)),
+    cp_y_vec = np.array((0.0, 1.0, 0.0)),
+    cp_z_vec = np.array((0.0, 0.0, 1.0)),
+)
+
+_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+
+def realistic_pf(fields, nprocs):
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    pf.parameters["HydroMethod"] = "streaming"
+    pf.parameters["Gamma"] = 5.0/3.0
+    pf.parameters["EOSType"] = 1.0
+    pf.parameters["EOSSoundSpeed"] = 1.0
+    pf.conversion_factors["Time"] = 1.0
+    pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.current_redshift = 0.0001
+    pf.hubble_constant = 0.7
+    for unit in mpc_conversion:
+        pf.units[unit+'h'] = pf.units[unit]
+        pf.units[unit+'cm'] = pf.units[unit]
+        pf.units[unit+'hcm'] = pf.units[unit]
+    return pf
+
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, nproc):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nproc)
+        self.nproc = nproc
+
+    def __call__(self):
+        field = FieldInfo[self.field_name]
+        deps = field.get_dependencies()
+        fields = deps.requested + _base_fields
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            f = getattr(v, "fields", None)
+            if f: fields += f
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+        pf = realistic_pf(fields, self.nproc)
+        # This gives unequal sized grids as well as subgrids
+        dd1 = pf.h.all_data()
+        dd2 = pf.h.all_data()
+        dd1.field_parameters.update(_sample_parameters)
+        dd2.field_parameters.update(_sample_parameters)
+        v1 = dd1[self.field_name]
+        conv = field._convert_function(dd1) or 1.0
+        if not needs_spatial:
+            assert_equal(v1, conv*field._function(field, dd2))
+        if not skip_grids:
+            for g in pf.h.grids:
+                g.field_parameters.update(_sample_parameters)
+                conv = field._convert_function(g) or 1.0
+                v1 = g[self.field_name]
+                g.clear_data()
+                g.field_parameters.update(_sample_parameters)
+                assert_equal(v1, conv*field._function(field, g))
+
+def test_all_fields():
+    for field in FieldInfo:
+        if field.startswith("CuttingPlane"): continue
+        if field.startswith("particle"): continue
+        if field.startswith("CIC"): continue
+        if field.startswith("WeakLensingConvergence"): continue
+        if FieldInfo[field].particle_type: continue
+        for nproc in [1, 4, 8]:
+            yield TestFieldAccess(field, nproc)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_ortho_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -0,0 +1,25 @@
+from yt.testing import *
+
+def test_ortho_ray():
+    pf = fake_random_pf(64, nprocs=8)
+    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+    axes = ['x', 'y', 'z']
+    for ax, an in enumerate(axes):
+        ocoord = np.random.random(2)
+
+        my_oray = pf.h.ortho_ray(ax, ocoord)
+
+        my_axes = range(3)
+        del my_axes[ax]
+
+        # find the cells intersected by the ortho ray
+        my_all = pf.h.all_data()
+        my_cells = (np.abs(my_all[axes[my_axes[0]]] - ocoord[0]) <= 
+                    0.5 * dx[my_axes[0]]) & \
+                   (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
+                    0.5 * dx[my_axes[1]])
+
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_profiles.py
--- /dev/null
+++ b/yt/data_objects/tests/test_profiles.py
@@ -0,0 +1,74 @@
+from yt.testing import *
+from yt.data_objects.profiles import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+
+_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
+
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    for nb in [8, 16, 32, 64]:
+        for lr in [True, False]:
+            # We log all the fields or don't log 'em all.  No need to do them
+            # individually.
+            for lf in [True, False]: 
+                # We have the min and the max, but to avoid cutting them off
+                # since we aren't doing end-collect, we cut a bit off the edges
+                for ec, e1, e2 in [(False, 0.9, 1.1), (True, 1.0, 1.0)]:
+                    p1d = BinnedProfile1D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        lr, end_collect=ec)
+                    p1d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p1d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+                    p2d = BinnedProfile2D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        lr, end_collect=ec)
+                    p2d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p2d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+                    p3d = BinnedProfile3D(dd, 
+                        nb, "Density", rmi*e1, rma*e2, lf,
+                        nb, "Temperature", tmi*e1, tma*e2, lf,
+                        nb, "Dinosaurs", dmi*e1, dma*e2, lf,
+                        lr, end_collect=ec)
+                    p3d.add_fields(["Ones", "Temperature"], weight=None)
+                    yield assert_equal, p3d["Ones"].sum(), nv
+                    yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+            p1d = BinnedProfile1D(dd, nb, "x", 0.0, 1.0, log_space=False)
+            p1d.add_fields("Ones", weight=None)
+            av = nv / nb
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)*av
+            # We re-bin ones with a weight now
+            p1d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p1d["Ones"][:-1], np.ones(nb)
+
+            p2d = BinnedProfile2D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False)
+            p2d.add_fields("Ones", weight=None)
+            av = nv / nb**2
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))*av
+            # We re-bin ones with a weight now
+            p2d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p2d["Ones"][:-1,:-1], np.ones((nb, nb))
+
+            p3d = BinnedProfile3D(dd, nb, "x", 0.0, 1.0, False,
+                                      nb, "y", 0.0, 1.0, False,
+                                      nb, "z", 0.0, 1.0, False)
+            p3d.add_fields("Ones", weight=None)
+            av = nv / nb**3
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb, nb, nb))*av
+            # We re-bin ones with a weight now
+            p3d.add_fields(["Ones"], weight="Temperature")
+            yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_projection.py
--- /dev/null
+++ b/yt/data_objects/tests/test_projection.py
@@ -0,0 +1,59 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_projection():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        dd = pf.h.all_data()
+        rho_tot = dd.quantities["TotalQuantity"]("Density")[0]
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        # Some simple projection tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
+                yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
+                yield assert_equal, proj["Ones"].min(), 1.0
+                yield assert_equal, proj["Ones"].max(), 1.0
+                yield assert_equal, np.unique(proj["px"]), uc[xax]
+                yield assert_equal, np.unique(proj["py"]), uc[yax]
+                yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
+            # wf == None
+            yield assert_equal, wf, None
+            v1 = proj["Density"].sum()
+            v2 = (dd["Density"] * dd["d%s" % an]).sum()
+            yield assert_rel_equal, v1, v2, 10
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_rays.py
--- /dev/null
+++ b/yt/data_objects/tests/test_rays.py
@@ -0,0 +1,33 @@
+from yt.testing import *
+
+def test_ray():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
+
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
+
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
+
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec
+from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -54,7 +54,19 @@
      kboltz, \
      G, \
      rho_crit_now, \
-     speed_of_light_cgs
+     speed_of_light_cgs, \
+     km_per_cm
+
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -179,12 +191,8 @@
 
 def _VelocityMagnitude(field, data):
     """M{|v|}"""
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    return ( (data["x-velocity"]-bulk_velocity[0])**2.0 + \
-             (data["y-velocity"]-bulk_velocity[1])**2.0 + \
-             (data["z-velocity"]-bulk_velocity[2])**2.0 )**(1.0/2.0)
+    velocities = obtain_rv_vec(data)
+    return np.sqrt(np.sum(velocities**2,axis=0))
 add_field("VelocityMagnitude", function=_VelocityMagnitude,
           take_log=False, units=r"\rm{cm}/\rm{s}")
 
@@ -194,13 +202,6 @@
           function=_TangentialOverVelocityMagnitude,
           take_log=False)
 
-def _TangentialVelocity(field, data):
-    return np.sqrt(data["VelocityMagnitude"]**2.0
-                 - data["RadialVelocity"]**2.0)
-add_field("TangentialVelocity", 
-          function=_TangentialVelocity,
-          take_log=False, units=r"\rm{cm}/\rm{s}")
-
 def _Pressure(field, data):
     """M{(Gamma-1.0)*rho*E}"""
     return (data.pf["Gamma"] - 1.0) * \
@@ -223,14 +224,9 @@
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The spherical coordinates radius is simply the magnitude of the
-    ## coords vector.
-
-    return np.sqrt(np.sum(coords**2,axis=-1))
+    return get_sph_r(coords)
 
 def _Convert_sph_r_CGS(data):
    return data.convert("cm")
@@ -245,20 +241,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The angle (theta) with respect to the normal (J), is the arccos
-    ## of the dot product of the normal with the normalized coords
-    ## vector.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JdotCoords = np.sum(J*coords,axis=-1)
-    
-    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=-1)) )
+    return get_sph_theta(coords, normal)
 
 add_field("sph_theta", function=_sph_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
@@ -269,54 +254,21 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
-    
-    ## We have freedom with respect to what axis (xprime) to define
-    ## the disk angle. Here I've chosen to use the axis that is
-    ## perpendicular to the normal and the y-axis. When normal ==
-    ## y-hat, then set xprime = z-hat. With this definition, when
-    ## normal == z-hat (as is typical), then xprime == x-hat.
-    ##
-    ## The angle is then given by the arctan of the ratio of the
-    ## yprime-component and the xprime-component of the coords vector.
+    coords = obtain_rvec(data)
 
-    xprime = np.cross([0.0,1.0,0.0],normal)
-    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
-    yprime = np.cross(normal,xprime)
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    Jx = np.tile(xprime,tile_shape)
-    Jy = np.tile(yprime,tile_shape)
-    
-    Px = np.sum(Jx*coords,axis=-1)
-    Py = np.sum(Jy*coords,axis=-1)
-    
-    return np.arctan2(Py,Px)
+    return get_sph_phi(coords, normal)
 
 add_field("sph_phi", function=_sph_phi,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
-
 ### cylindrical coordinates: R (radius in the cylinder's plane)
 def _cyl_R(field, data):
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
       
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The cross product of the normal (J) with the coords vector
-    ## gives a vector of magnitude equal to the cylindrical radius.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    JcrossCoords = np.cross(J,coords)
-    return np.sqrt(np.sum(JcrossCoords**2,axis=-1))
+    return get_cyl_r(coords, normal)
 
 def _Convert_cyl_R_CGS(data):
    return data.convert("cm")
@@ -324,6 +276,9 @@
 add_field("cyl_R", function=_cyl_R,
          validators=[ValidateParameter("center"),ValidateParameter("normal")],
          convert_function = _Convert_cyl_R_CGS, units=r"\rm{cm}")
+add_field("cyl_RCode", function=_cyl_R,
+          validators=[ValidateParameter("center"),ValidateParameter("normal")],
+          units=r"Radius (code)")
 
 
 ### cylindrical coordinates: z (height above the cylinder's plane)
@@ -331,17 +286,9 @@
     center = data.get_field_parameter("center")
     normal = data.get_field_parameter("normal")
     
-    coords = np.array([data['x'] - center[0],
-                       data['y'] - center[1],
-                       data['z'] - center[2]]).transpose()
+    coords = obtain_rvec(data)
 
-    ## The dot product of the normal (J) with the coords vector gives
-    ## the cylindrical height.
-    
-    tile_shape = list(coords.shape)[:-1] + [1]
-    J = np.tile(normal,tile_shape)
-
-    return np.sum(J*coords,axis=-1)  
+    return get_cyl_z(coords, normal)
 
 def _Convert_cyl_z_CGS(data):
    return data.convert("cm")
@@ -352,14 +299,17 @@
 
 
 ### cylindrical coordinates: theta (angle in the cylinder's plane)
-### [This is identical to the spherical coordinate's 'phi' angle.]
 def _cyl_theta(field, data):
-    return data['sph_phi']
+    center = data.get_field_parameter("center")
+    normal = data.get_field_parameter("normal")
+    
+    coords = obtain_rvec(data)
+
+    return get_cyl_theta(coords, normal)
 
 add_field("cyl_theta", function=_cyl_theta,
          validators=[ValidateParameter("center"),ValidateParameter("normal")])
 
-
 ### The old field DiskAngle is the same as the spherical coordinates'
 ### 'theta' angle. I'm keeping DiskAngle for backwards compatibility.
 def _DiskAngle(field, data):
@@ -392,6 +342,54 @@
                       ValidateParameter("normal")],
           units=r"AU", display_field=False)
 
+def _cyl_RadialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+
+    theta = data['cyl_theta']
+
+    return get_cyl_r_component(velocities, theta, normal)
+
+def _cyl_RadialVelocityABS(field, data):
+    return np.abs(_cyl_RadialVelocity(field, data))
+def _Convert_cyl_RadialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_RadialVelocity", function=_cyl_RadialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityABS", function=_cyl_RadialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMS", function=_cyl_RadialVelocity,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_RadialVelocityKMSABS", function=_cyl_RadialVelocityABS,
+          convert_function=_Convert_cyl_RadialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+
+def _cyl_TangentialVelocity(field, data):
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)
+    theta = data['cyl_theta']
+
+    return get_cyl_theta_component(velocities, theta, normal)
+
+def _cyl_TangentialVelocityABS(field, data):
+    return np.abs(_cyl_TangentialVelocity(field, data))
+def _Convert_cyl_TangentialVelocityKMS(data):
+    return km_per_cm
+add_field("cyl_TangentialVelocity", function=_cyl_TangentialVelocity,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityABS", function=_cyl_TangentialVelocityABS,
+          units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMS", function=_cyl_TangentialVelocity,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
+add_field("cyl_TangentialVelocityKMSABS", function=_cyl_TangentialVelocityABS,
+          convert_function=_Convert_cyl_TangentialVelocityKMS, units=r"\rm{km}/\rm{s}",
+          validators=[ValidateParameter("normal")])
 
 def _DynamicalTime(field, data):
     """
@@ -450,7 +448,7 @@
 
 # This is rho_total / rho_cr(z).
 def _Convert_Overdensity(data):
-    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+    return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
@@ -470,8 +468,8 @@
     else:
         omega_baryon_now = 0.0441
     return data['Density'] / (omega_baryon_now * rho_crit_now * 
-                              (data.pf['CosmologyHubbleConstantNow']**2) * 
-                              ((1+data.pf['CosmologyCurrentRedshift'])**3))
+                              (data.pf.hubble_constant**2) * 
+                              ((1+data.pf.current_redshift)**3))
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           units=r"")
 
@@ -640,13 +638,7 @@
           take_log=False, display_field=False)
 
 def obtain_velocities(data):
-    if data.has_field_parameter("bulk_velocity"):
-        bv = data.get_field_parameter("bulk_velocity")
-    else: bv = np.zeros(3, dtype='float64')
-    xv = data["x-velocity"] - bv[0]
-    yv = data["y-velocity"] - bv[1]
-    zv = data["z-velocity"] - bv[2]
-    return xv, yv, zv
+    return obtain_rv_vec(data)
 
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
@@ -711,7 +703,7 @@
 #          convert_function=_convertSpecificAngularMomentum, vector_field=True,
 #          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
-    return data.convert("mpc")/1e5
+    return km_per_cm*data.convert("mpc")
 #add_field("ParticleSpecificAngularMomentumKMSMPC",
 #          function=_ParticleSpecificAngularMomentum, particle_type=True,
 #          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -883,33 +875,32 @@
           display_name = "Radius (code)")
 
 def _RadialVelocity(field, data):
-    center = data.get_field_parameter("center")
-    bulk_velocity = data.get_field_parameter("bulk_velocity")
-    if bulk_velocity == None:
-        bulk_velocity = np.zeros(3)
-    new_field = ( (data['x']-center[0])*(data["x-velocity"]-bulk_velocity[0])
-                + (data['y']-center[1])*(data["y-velocity"]-bulk_velocity[1])
-                + (data['z']-center[2])*(data["z-velocity"]-bulk_velocity[2])
-                )/data["RadiusCode"]
-    if np.any(np.isnan(new_field)): # to fix center = point
-        new_field[np.isnan(new_field)] = 0.0
-    return new_field
+    normal = data.get_field_parameter("normal")
+    velocities = obtain_rv_vec(data)    
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(velocities, theta, phi, normal)
+
 def _RadialVelocityABS(field, data):
     return np.abs(_RadialVelocity(field, data))
 def _ConvertRadialVelocityKMS(data):
-    return 1e-5
+    return km_per_cm
 add_field("RadialVelocity", function=_RadialVelocity,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityABS", function=_RadialVelocityABS,
-          units=r"\rm{cm}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          units=r"\rm{cm}/\rm{s}")
 add_field("RadialVelocityKMS", function=_RadialVelocity,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
-          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}",
-          validators=[ValidateParameter("center")])
+          convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+
+def _TangentialVelocity(field, data):
+    return np.sqrt(data["VelocityMagnitude"]**2.0
+                 - data["RadialVelocity"]**2.0)
+add_field("TangentialVelocity", 
+          function=_TangentialVelocity,
+          take_log=False, units=r"\rm{cm}/\rm{s}")
 
 def _CuttingPlaneVelocityX(field, data):
     x_vec, y_vec, z_vec = [data.get_field_parameter("cp_%s_vec" % (ax))
@@ -1026,6 +1017,47 @@
           display_name=r"\rm{Magnetic}\/\rm{Energy}",
           units="\rm{ergs}\/\rm{cm}^{-3}")
 
+def _BPoloidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_theta_component(Bfields, theta, phi, normal)
+
+add_field("BPoloidal", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BToroidal(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    phi   = data['sph_phi']
+
+    return get_sph_phi_component(Bfields, phi, normal)
+
+add_field("BToroidal", function=_BToroidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
+def _BRadial(field,data):
+    normal = data.get_field_parameter("normal")
+
+    Bfields = np.array([data['Bx'], data['By'], data['Bz']])
+
+    theta = data['sph_theta']
+    phi   = data['sph_phi']
+
+    return get_sph_r_component(Bfields, theta, phi, normal)
+
+add_field("BRadial", function=_BPoloidal,
+          units=r"\rm{Gauss}",
+          validators=[ValidateParameter("normal")])
+
 def _VorticitySquared(field, data):
     mylog.debug("Generating vorticity on %s", data)
     # We need to set up stencils


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -478,7 +478,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -353,7 +355,8 @@
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
                 decompose_array(data[key], psize, bbox)
-            grid_dimensions = np.array([grid.shape for grid in temp[key]])
+            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+                                       dtype="int32")
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():
@@ -364,7 +367,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3)
+        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
     handler = StreamHandler(
         grid_left_edges,
@@ -394,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,8 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook"):
+       "__IPYTHON__" in dir(__builtin__) or \
+       ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
         from yt.gui.reason.extdirect_repl import ExtProgressBar


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -24,7 +24,22 @@
 
 import numpy as np
 from yt.funcs import *
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_almost_equal, \
+    assert_approx_equal, assert_array_almost_equal, assert_equal, \
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
+
+def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
+    return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 
@@ -127,17 +142,23 @@
 
     return left, right, level
 
-def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",), negative = False):
+def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
+                   negative = False, nprocs = 1):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
     else:
         assert(len(ndims) == 3)
-    if negative:
-        offset = 0.5
-    else:
-        offset = 0.0
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
-                 for field in fields)
-    ug = load_uniform_grid(data, ndims, 1.0)
+                 for field,offset in zip(fields,offsets))
+    ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,511 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    prefix = ""
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result))
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i])
+        
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1095,8 +1095,12 @@
                   )
         else:
             from IPython.config.loader import Config
+            import sys
             cfg = Config()
+            # prepend sys.path with current working directory
+            sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
+            
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -68,9 +68,12 @@
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
-    ideal_bsize = 3.0 * (pieces * np.product(n_d) ** 2) ** (1.0 / 3.0)
-    bsize = int(np.sum(
-        ldom / np.array(n_d, dtype=np.float64) * np.product(n_d)))
+    eff_dim = (n_d > 1).sum()
+    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
+                             ) ** (1.0 / eff_dim)
+    mask = np.where(n_d > 1)
+    nd_arr = np.array(n_d, dtype=np.float64)[mask]
+    bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
     load_balance = float(np.product(n_d)) / \
         (float(pieces) * np.product((n_d - 1) / ldom + 1))
 
@@ -134,23 +137,15 @@
 
 
 def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays using internal numpy routine. """
-    temp = [np.array_split(array, psize[1], axis=1)
-            for array in np.array_split(tab, psize[2], axis=2)]
-    temp = [item for sublist in temp for item in sublist]
-    temp = [np.array_split(array, psize[0], axis=0) for array in temp]
-    temp = [item for sublist in temp for item in sublist]
-    return temp
-
-
-if __name__ == "__main__":
-
-    NPROC = 12
-    ARRAY = np.zeros((128, 128, 129))
-    BBOX = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
-
-    PROCS = get_psize(np.array(ARRAY.shape), NPROC)
-    LE, RE, DATA = decompose_array(ARRAY, PROCS, BBOX)
-
-    for idx in range(NPROC):
-        print LE[idx, :], RE[idx, :], DATA[idx].shape
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(tab.shape, dtype=np.int64)
+    slices = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
+    return [tab[slc] for slc in slices]


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,37 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/__init__.py
--- a/yt/utilities/kdtree/__init__.py
+++ b/yt/utilities/kdtree/__init__.py
@@ -1,1 +0,0 @@
-from fKDpy import *


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/api.py
--- /dev/null
+++ b/yt/utilities/kdtree/api.py
@@ -0,0 +1,9 @@
+from fKDpy import \
+    chainHOP_tags_dens, \
+    create_tree, \
+    fKD, \
+    find_nn_nearest_neighbors, \
+    free_tree, \
+    find_chunk_nearest_neighbors
+
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/kdtree/test.py
--- a/yt/utilities/kdtree/test.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from Forthon import *
-from fKDpy import *
-import numpy,random
-
-n = 32768
-
-
-fKD.tags = fzeros((64),'i')
-fKD.dist = fzeros((64),'d')
-fKD.pos = fzeros((3,n),'d')
-fKD.nn = 64
-fKD.nparts = n
-fKD.sort = True
-fKD.rearrange = True
-fKD.qv = numpy.array([16./32, 16./32, 16./32])
-
-fp = open('parts.txt','r')
-xpos = []
-ypos = []
-zpos = []
-line = fp.readline()
-while line:
-    line = line.split()
-    xpos.append(float(line[0]))
-    ypos.append(float(line[1]))
-    zpos.append(float(line[2]))
-    line= fp.readline()
-
-fp.close()
-
-
-for k in range(32):
-    for j in range(32):
-        for i in range(32):
-            fKD.pos[0][i + j*32 + k*1024] = float(i)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[1][i + j*32 + k*1024] = float(j)/32 + 1./64 + 0.0001*random.random()
-            fKD.pos[2][i + j*32 + k*1024] = float(k)/32 + 1./64 + 0.0001*random.random()
-
-            
-
-#print fKD.pos[0][0],fKD.pos[1][0],fKD.pos[2][0]
-
-create_tree()
-
-
-find_nn_nearest_neighbors()
-
-#print 'next'
-
-#fKD.qv = numpy.array([0., 0., 0.])
-
-#find_nn_nearest_neighbors()
-
-
-#print (fKD.tags - 1)
-#print fKD.dist
-
-free_tree()


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -338,3 +338,47 @@
                     rg[2,i,j,k] = zg[i,j,k] - c[2]
         return rg
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def obtain_rv_vec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] vxf
+    cdef np.ndarray[np.float64_t, ndim=1] vyf
+    cdef np.ndarray[np.float64_t, ndim=1] vzf
+    cdef np.ndarray[np.float64_t, ndim=2] rvf
+    cdef np.ndarray[np.float64_t, ndim=3] vxg
+    cdef np.ndarray[np.float64_t, ndim=3] vyg
+    cdef np.ndarray[np.float64_t, ndim=3] vzg
+    cdef np.ndarray[np.float64_t, ndim=4] rvg
+    cdef np.float64_t bv[3]
+    cdef int i, j, k
+    bulk_velocity = data.get_field_parameter("bulk_velocity")
+    if bulk_velocity == None:
+        bulk_velocity = np.zeros(3)
+    bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
+    if len(data['x-velocity'].shape) == 1:
+        # One dimensional data
+        vxf = data['x-velocity'].astype("float64")
+        vyf = data['y-velocity'].astype("float64")
+        vzf = data['z-velocity'].astype("float64")
+        rvf = np.empty((3, vxf.shape[0]), 'float64')
+        for i in range(vxf.shape[0]):
+            rvf[0, i] = vxf[i] - bv[0]
+            rvf[1, i] = vyf[i] - bv[1]
+            rvf[2, i] = vzf[i] - bv[2]
+        return rvf
+    else:
+        # Three dimensional data
+        vxg = data['x-velocity'].astype("float64")
+        vyg = data['y-velocity'].astype("float64")
+        vzg = data['z-velocity'].astype("float64")
+        rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
+        for i in range(vxg.shape[0]):
+            for j in range(vxg.shape[1]):
+                for k in range(vxg.shape[2]):
+                    rvg[0,i,j,k] = vxg[i,j,k] - bv[0]
+                    rvg[1,i,j,k] = vyg[i,j,k] - bv[1]
+                    rvg[2,i,j,k] = vzg[i,j,k] - bv[2]
+        return rvg


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -233,49 +233,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rvec(data):
-    # This is just to let the pointers exist and whatnot.  We can't cdef them
-    # inside conditionals.
-    cdef np.ndarray[np.float64_t, ndim=1] xf
-    cdef np.ndarray[np.float64_t, ndim=1] yf
-    cdef np.ndarray[np.float64_t, ndim=1] zf
-    cdef np.ndarray[np.float64_t, ndim=2] rf
-    cdef np.ndarray[np.float64_t, ndim=3] xg
-    cdef np.ndarray[np.float64_t, ndim=3] yg
-    cdef np.ndarray[np.float64_t, ndim=3] zg
-    cdef np.ndarray[np.float64_t, ndim=4] rg
-    cdef np.float64_t c[3]
-    cdef int i, j, k
-    center = data.get_field_parameter("center")
-    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
-        # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
-        rf = np.empty((3, xf.shape[0]), 'float64')
-        for i in range(xf.shape[0]):
-            rf[0, i] = xf[i] - c[0]
-            rf[1, i] = yf[i] - c[1]
-            rf[2, i] = zf[i] - c[2]
-        return rf
-    else:
-        # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
-        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
-        for i in range(xg.shape[0]):
-            for j in range(xg.shape[1]):
-                for k in range(xg.shape[2]):
-                    rg[0,i,j,k] = xg[i,j,k] - c[0]
-                    rg[1,i,j,k] = yg[i,j,k] - c[1]
-                    rg[2,i,j,k] = zg[i,j,k] - c[2]
-        return rg
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):
@@ -334,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/lib/tests/test_geometry_utils.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_geometry_utils.py
@@ -0,0 +1,30 @@
+from yt.testing import *
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_obtain_rvec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+    
+    dd = pf.h.sphere((0.5,0.5,0.5), 0.2)
+
+    coords = obtain_rvec(dd)
+
+    r = np.sqrt(np.sum(coords*coords,axis=0))
+
+    assert_array_less(r.max(), 0.2)
+
+    assert_array_less(0.0, r.min())
+
+def test_obtain_rv_vec():
+    pf = fake_random_pf(64, nprocs=8, fields=_fields, 
+           negative = [False, True, True, True])
+
+    dd = pf.h.all_data()
+
+    vels = obtain_rv_vec(dd)
+
+    assert_array_equal(vels[0,:], dd['x-velocity'])
+    assert_array_equal(vels[1,:], dd['y-velocity'])
+    assert_array_equal(vels[2,:], dd['z-velocity'])


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -674,3 +674,191 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def get_ortho_basis(normal):
+    xprime = np.cross([0.0,1.0,0.0],normal)
+    if np.sum(xprime) == 0: xprime = np.array([0.0, 0.0, 1.0])
+    yprime = np.cross(normal,xprime)
+    zprime = normal
+    return (xprime, yprime, zprime)
+
+def get_sph_r(coords):
+    # The spherical coordinates radius is simply the magnitude of the
+    # coordinate vector.
+
+    return np.sqrt(np.sum(coords**2, axis=0))
+
+def resize_vector(vector,vector_array):
+    if len(vector_array.shape) == 4:
+        res_vector = np.resize(vector,(3,1,1,1))
+    else:
+        res_vector = np.resize(vector,(3,1))
+    return res_vector
+
+def get_sph_theta(coords, normal):
+    # The angle (theta) with respect to the normal (J), is the arccos
+    # of the dot product of the normal with the normalized coordinate
+    # vector.
+    
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    
+    J = np.tile(res_normal,tile_shape)
+
+    JdotCoords = np.sum(J*coords,axis=0)
+    
+    return np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0)) )
+
+def get_sph_phi(coords, normal):
+    # We have freedom with respect to what axis (xprime) to define
+    # the disk angle. Here I've chosen to use the axis that is
+    # perpendicular to the normal and the y-axis. When normal ==
+    # y-hat, then set xprime = z-hat. With this definition, when
+    # normal == z-hat (as is typical), then xprime == x-hat.
+    #
+    # The angle is then given by the arctan of the ratio of the
+    # yprime-component and the xprime-component of the coordinate 
+    # vector.
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, coords)
+    res_yprime = resize_vector(yprime, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    Px = np.sum(Jx*coords,axis=0)
+    Py = np.sum(Jy*coords,axis=0)
+    
+    return np.arctan2(Py,Px)
+
+def get_cyl_r(coords, normal):
+    # The cross product of the normal (J) with a coordinate vector
+    # gives a vector of magnitude equal to the cylindrical radius.
+
+    res_normal = resize_vector(normal, coords)
+
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+    
+    JcrossCoords = np.cross(J, coords, axisa=0, axisb=0, axisc=0)
+    return np.sqrt(np.sum(JcrossCoords**2, axis=0))
+
+def get_cyl_z(coords, normal):
+    # The dot product of the normal (J) with the coordinate vector 
+    # gives the cylindrical height.
+
+    res_normal = resize_vector(normal, coords)
+    
+    tile_shape = [1] + list(coords.shape)[1:]
+    J = np.tile(res_normal, tile_shape)
+
+    return np.sum(J*coords, axis=0)  
+
+def get_cyl_theta(coords, normal):
+    # This is identical to the spherical phi component
+
+    return get_sph_phi(coords, normal)
+
+
+def get_cyl_r_component(vectors, theta, normal):
+    # The r of a vector is the vector dotted with rhat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    rhat = Jx*np.cos(theta) + Jy*np.sin(theta)
+
+    return np.sum(vectors*rhat,axis=0)
+
+def get_cyl_theta_component(vectors, theta, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    thetahat = -Jx*np.sin(theta) + Jy*np.cos(theta)
+
+    return np.sum(vectors*thetahat, axis=0)
+
+def get_cyl_z_component(vectors, normal):
+    # The z component of a vector is the vector dotted with zhat
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    zhat = np.tile(res_zprime, tile_shape)
+
+    return np.sum(vectors*zhat, axis=0)
+
+def get_sph_r_component(vectors, theta, phi, normal):
+    # The r component of a vector is the vector dotted with rhat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+
+    rhat = Jx*np.sin(theta)*np.cos(phi) + \
+           Jy*np.sin(theta)*np.sin(phi) + \
+           Jz*np.cos(theta)
+
+    return np.sum(vectors*rhat, axis=0)
+
+def get_sph_phi_component(vectors, phi, normal):
+    # The phi component of a vector is the vector dotted with phihat
+
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+
+    phihat = -Jx*np.sin(phi) + Jy*np.cos(phi)
+
+    return np.sum(vectors*phihat, axis=0)
+
+def get_sph_theta_component(vectors, theta, phi, normal):
+    # The theta component of a vector is the vector dotted with thetahat
+    
+    (xprime, yprime, zprime) = get_ortho_basis(normal)
+
+    res_xprime = resize_vector(xprime, vectors)
+    res_yprime = resize_vector(yprime, vectors)
+    res_zprime = resize_vector(zprime, vectors)
+
+    tile_shape = [1] + list(vectors.shape)[1:]
+    Jx = np.tile(res_xprime,tile_shape)
+    Jy = np.tile(res_yprime,tile_shape)
+    Jz = np.tile(res_zprime,tile_shape)
+    
+    thetahat = Jx*np.cos(theta)*np.cos(phi) + \
+               Jy*np.cos(theta)*np.sin(phi) - \
+               Jz*np.sin(theta)
+
+    return np.sum(vectors*thetahat, axis=0)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -581,7 +581,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1060,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_coordinate_conversions.py
--- /dev/null
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -0,0 +1,125 @@
+from yt.testing import *
+from yt.utilities.math_utils import \
+    get_sph_r_component, \
+    get_sph_theta_component, \
+    get_sph_phi_component, \
+    get_cyl_r_component, \
+    get_cyl_z_component, \
+    get_cyl_theta_component, \
+    get_cyl_r, get_cyl_theta, \
+    get_cyl_z, get_sph_r, \
+    get_sph_theta, get_sph_phi
+
+# Randomly generated coordinates in the domain [[-1,1],[-1,1],-1,1]]
+coords = np.array([[-0.41503037, -0.22102472, -0.55774212],
+                   [ 0.73828247, -0.17913899,  0.64076921],
+                   [ 0.08922066, -0.94254844, -0.61774511],
+                   [ 0.10173242, -0.95789145,  0.16294352],
+                   [ 0.73186508, -0.3109153 ,  0.75728738],
+                   [ 0.8757989 , -0.41475119, -0.57039201],
+                   [ 0.58040762,  0.81969082,  0.46759728],
+                   [-0.89983356, -0.9853683 , -0.38355343]]).T
+
+def test_spherical_coordinate_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.72950559,  0.99384957,  1.13047198,  0.97696269,  
+                   1.09807968,  1.12445067,  1.10788685,  1.38843954]
+    real_theta = [ 2.44113629,  0.87012028,  2.14891444,  1.4032274 ,  
+                   0.80979483,  2.10280198,  1.13507735,  1.85068416]
+    real_phi =   [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+
+    calc_r = get_sph_r(coords)
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+    normal = [1, 0, 0]
+    real_theta = [ 2.17598842,  0.73347681,  1.49179079,  1.46647589,  
+                   0.8412984 ,  0.67793705,  1.0193883 ,  2.27586987]
+    real_phi =   [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                   -2.75201227,-0.62870527,  2.08920872, -1.19959244]
+
+    calc_theta = get_sph_theta(coords, normal)
+    calc_phi = get_sph_phi(coords, normal)
+    
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_phi, real_phi)
+
+def test_cylindrical_coordiante_conversion():
+    normal = [0, 0, 1]
+    real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
+                   0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    
+    real_theta = [-2.65224483, -0.23804243, -1.47641858, -1.46498842, 
+                  -0.40172325, -0.4422801 ,  0.95466734, -2.31085392]
+    real_z =     [-0.55774212,  0.64076921, -0.61774511,  0.16294352,
+                   0.75728738, -0.57039201,  0.46759728, -0.38355343]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+    normal = [1, 0, 0]
+    real_r =     [ 0.59994016,  0.66533898,  1.12694569,  0.97165149,
+                   0.81862843,  0.70524152,  0.94368441,  1.05738542]
+    real_theta = [-0.37729951, -2.86898397, -0.99063518, -1.73928995, 
+                  -2.75201227, -0.62870527,  2.08920872, -1.19959244]
+    real_z =     [-0.41503037,  0.73828247,  0.08922066,  0.10173242,
+                   0.73186508,  0.8757989 ,  0.58040762, -0.89983356]
+
+    calc_r = get_cyl_r(coords, normal)
+    calc_theta = get_cyl_theta(coords, normal)
+    calc_z = get_cyl_z(coords, normal)
+
+    assert_array_almost_equal(calc_r, real_r)
+    assert_array_almost_equal(calc_theta, real_theta)
+    assert_array_almost_equal(calc_z, real_z)
+
+def test_spherical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_sph_theta(coords, normal)
+    phi = get_sph_phi(coords, normal)
+    zero = np.tile(0,coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(phi), np.cos(phi), zero])
+    assert_array_almost_equal(zero, get_sph_theta_component(vecs, theta, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+    # Purely poloidal field
+    vecs = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)])
+    assert_array_almost_equal(zero, get_sph_phi_component(vecs, phi, normal))
+    assert_array_almost_equal(zero, get_sph_r_component(vecs, theta, phi, normal))
+
+def test_cylindrical_coordinate_projections():
+    normal = [0, 0, 1]
+    theta = get_cyl_theta(coords, normal)
+    z = get_cyl_z(coords, normal)
+    zero = np.tile(0, coords.shape[1])
+
+    # Purely radial field
+    vecs = np.array([np.cos(theta), np.sin(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+
+    # Purely toroidal field
+    vecs = np.array([-np.sin(theta), np.cos(theta), zero])
+    assert_array_almost_equal(zero, get_cyl_z_component(vecs, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))
+
+    # Purely z field
+    vecs = np.array([zero, zero, z])
+    assert_array_almost_equal(zero, get_cyl_theta_component(vecs, theta, normal))
+    assert_array_almost_equal(zero, get_cyl_r_component(vecs, theta, normal))


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_decompose.py
--- /dev/null
+++ b/yt/utilities/tests/test_decompose.py
@@ -0,0 +1,96 @@
+"""
+Test suite for cartesian domain decomposition.
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import assert_array_equal, assert_almost_equal
+import numpy as np
+import yt.utilities.decompose as dec
+
+
+def setup():
+    pass
+
+
+def test_psize_2d():
+    procs = dec.get_psize(np.array([5, 1, 7]), 6)
+    assert_array_equal(procs, np.array([3, 1, 2]))
+    procs = dec.get_psize(np.array([1, 7, 5]), 6)
+    assert_array_equal(procs, np.array([1, 2, 3]))
+    procs = dec.get_psize(np.array([7, 5, 1]), 6)
+    assert_array_equal(procs, np.array([2, 3, 1]))
+
+
+def test_psize_3d():
+    procs = dec.get_psize(np.array([33, 35, 37]), 12)
+    assert_array_equal(procs, np.array([3, 2, 2]))
+
+
+def test_decomposition_2d():
+    array = np.ones((7, 5, 1))
+    bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
+    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+
+    assert_array_equal(data[1].shape, np.array([3, 2, 1]))
+
+    gold_le = np.array([
+                       [-0.7, 1.5, 0.0], [-0.7, 1.6, 0.0],
+                       [-0.7, 1.8, 0.0], [-0.4, 1.5, 0.0],
+                       [-0.4, 1.6, 0.0], [-0.4, 1.8, 0.0]
+                       ])
+    assert_almost_equal(ledge, gold_le, 8)
+
+    gold_re = np.array(
+        [[-0.4, 1.6, 0.7], [-0.4, 1.8, 0.7],
+         [-0.4, 2.0, 0.7], [0.0, 1.6, 0.7],
+         [0.0, 1.8, 0.7], [0.0, 2.0, 0.7]]
+    )
+    assert_almost_equal(redge, gold_re, 8)
+
+
+def test_decomposition_3d():
+    array = np.ones((33, 35, 37))
+    bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
+
+    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    assert_array_equal(data[0].shape, np.array([11, 17, 18]))
+
+    gold_le = np.array(
+        [[0.00000, -1.50000, 1.00000], [0.00000, -1.50000, 1.72973],
+         [0.00000, -0.04286, 1.00000], [0.00000, -0.04286, 1.72973],
+         [0.33333, -1.50000, 1.00000], [0.33333, -1.50000, 1.72973],
+         [0.33333, -0.04286, 1.00000], [0.33333, -0.04286, 1.72973],
+         [0.66667, -1.50000, 1.00000], [0.66667, -1.50000, 1.72973],
+         [0.66667, -0.04286, 1.00000], [0.66667, -0.04286, 1.72973]]
+    )
+    assert_almost_equal(ledge, gold_le, 5)
+
+    gold_re = np.array(
+        [[0.33333, -0.04286, 1.72973], [0.33333, -0.04286, 2.50000],
+         [0.33333, 1.50000, 1.72973], [0.33333, 1.50000, 2.50000],
+         [0.66667, -0.04286, 1.72973], [0.66667, -0.04286, 2.50000],
+         [0.66667, 1.50000, 1.72973], [0.66667, 1.50000, 2.50000],
+         [1.00000, -0.04286, 1.72973], [1.00000, -0.04286, 2.50000],
+         [1.00000, 1.50000, 1.72973], [1.00000, 1.50000, 2.50000]]
+    )
+    assert_almost_equal(redge, gold_re, 5)


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/utilities/tests/test_kdtrees.py
--- /dev/null
+++ b/yt/utilities/tests/test_kdtrees.py
@@ -0,0 +1,93 @@
+"""
+Unit test the kD trees in yt.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: U of Colorado
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Stephen Skory.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+
+try:
+    from yt.utilities.kdtree.api import *
+except ImportError:
+    mylog.debug("The Fortran kD-Tree did not import correctly.")
+
+from yt.utilities.spatial import cKDTree
+
+def setup():
+    pass
+
+def test_fortran_tree():
+    r"""This test makes sure that the fortran kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    try:
+        fKD.pos = np.empty((3, 4), dtype='float64', order='F')
+    except NameError:
+        return
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    fKD.pos[0, :] = points
+    fKD.pos[1, :] = points
+    fKD.pos[2, :] = points
+    fKD.qv = np.empty(3, dtype='float64')
+    fKD.dist = np.empty(4, dtype='float64')
+    fKD.tags = np.empty(4, dtype='int64')
+    fKD.nn = 4
+    fKD.sort = True
+    create_tree(0)
+    # Now we check to make sure that we find the correct nearest neighbors,
+    # which get stored in dist and tags.
+    fKD.qv[:] = 0.999
+    find_nn_nearest_neighbors()
+    # Fix fortran counting.
+    fKD.tags -= 1
+    # Clean up before the tests.
+    free_tree(0)
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(fKD.dist, dist)
+    assert_array_equal(fKD.tags, tags)
+
+def test_cython_tree():
+    r"""This test makes sure that the cython kdtree is finding the correct
+    nearest neighbors.
+    """
+    # Four points.
+    pos = np.empty((4, 3), dtype='float64')
+    # Make four points by hand that, in particular, will allow us to test
+    # the periodicity of the kdtree.
+    points = np.array([0.01, 0.5, 0.98, 0.99])
+    pos[:, 0] = points
+    pos[:, 1] = points
+    pos[:, 2] = points
+    kdtree = cKDTree(pos, leafsize = 2)
+    qv = np.array([0.999]*3)
+    res = kdtree.query(qv, 4, period=[1.,1.,1])
+    # What the answers should be.
+    dist = np.array([2.43e-04, 3.63e-04, 1.083e-03, 7.47003e-01])
+    tags = np.array([3, 0, 2, 1], dtype='int64')
+    assert_array_almost_equal(res[0], dist)
+    assert_array_equal(res[1], tags)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 
@@ -383,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -406,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -379,7 +379,7 @@
                          take_log=True)
     """
     import matplotlib
-    from ._mpl_imports import *
+    from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 
     # If this is rendered as log, then apply now.
     if take_log:
@@ -420,21 +420,22 @@
     else:
         dpi = None
 
-    if filename[-4:] == '.png':
-        suffix = ''
-    else:
+    suffix = os.path.splitext(filename)[1]
+
+    if suffix == '':
         suffix = '.png'
         filename = "%s%s" % (filename, suffix)
-    mylog.info("Saving plot %s", fn)
+    mylog.info("Saving plot %s", filename)
     if suffix == ".png":
         canvas = FigureCanvasAgg(fig)
     elif suffix == ".pdf":
         canvas = FigureCanvasPdf(fig)
     elif suffix in (".eps", ".ps"):
-        canvas = FigureCanvasPS
+        canvas = FigureCanvasPS(fig)
     else:
         mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
         canvas = FigureCanvasAgg(fig)
+
     canvas.print_figure(filename)
     return filename
 


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -211,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -288,12 +294,16 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -364,38 +374,23 @@
 
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
-    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
-                 xstart=(0,1), ystart=(0,1), nsample=256,
-                 start_at_xedge=False, start_at_yedge=False,
-                 plot_args=None):
+    def __init__(self, field_x, field_y, factor = 16,
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
-                             xstart=(0,1), ystart=(0,1), nsample=256,
-                             start_at_xedge=False, start_at_yedge=False,
-                             plot_args=None):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
-        from the associated data, using *nx* and *ny* starting points
-        that are bounded by *xstart* and *ystart*.  To begin
-        streamlines from the left edge of the plot, set
-        *start_at_xedge* to True; for the bottom edge, use
-        *start_at_yedge*.  A line with the qmean vector magnitude will
-        cover 1.0/*factor* of the image.
+        from the associated data, skipping every *factor* datapoints like
+        'quiver'. *density* is the index of the amount of the streamlines.
         """
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.xstart = xstart
-        self.ystart = ystart
-        self.nsample = nsample
+        self.bv_x = self.bv_y = 0
         self.factor = factor
-        if start_at_xedge:
-            self.data_size = (1,ny)
-        elif start_at_yedge:
-            self.data_size = (nx,1)
-        else:
-            self.data_size = (nx,ny)
-        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
+        self.dens = density
+        if plot_args is None: plot_args = {}
         self.plot_args = plot_args
         
     def __call__(self, plot):
@@ -404,43 +399,26 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[0] / self.factor
+        ny = plot.image._A.shape[1] / self.factor
         pixX = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_x],
+                             plot.data[self.field_x] - self.bv_x,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
+                           (x0, x1, y0, y1),).transpose()
         pixY = _MPL.Pixelize(plot.data['px'],
                              plot.data['py'],
                              plot.data['pdx'],
                              plot.data['pdy'],
-                             plot.data[self.field_y],
+                             plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
-                           (x0, x1, y0, y1),)
-        r0 = np.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
-                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
-        lines = np.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
-        lines[0,:,:,:] = r0
-        mag = np.sqrt(pixX**2 + pixY**2)
-        scale = np.sqrt(nx*ny) / (self.factor * mag.mean())
-        dt = 1.0 / (self.nsample-1)
-        for i in range(1,self.nsample):
-            xt = lines[i-1,0,:,:]
-            yt = lines[i-1,1,:,:]
-            ix = np.maximum(np.minimum((xt).astype('int'), nx-1), 0)
-            iy = np.maximum(np.minimum((yt).astype('int'), ny-1), 0)
-            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
-            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
-        # scale into data units
-        lines[:,0,:,:] = lines[:,0,:,:] * (xx1 - xx0) / nx + xx0
-        lines[:,1,:,:] = lines[:,1,:,:] * (yy1 - yy0) / ny + yy0
-        for i in range(self.data_size[0]):
-            for j in range(self.data_size[1]):
-                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
-                                **self.plot_args)
+                           (x0, x1, y0, y1),).transpose()
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -461,30 +439,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -679,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -708,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -778,13 +735,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -850,6 +812,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -866,18 +829,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1124,3 +1088,170 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time * plot.data.pf['Time']
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,6 +60,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +243,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +257,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +310,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -695,6 +731,15 @@
 
     """
     _current_field = None
+    _frb_generator = None
+    _plot_type = None
+
+    def __init__(self, *args, **kwargs):
+        if self._frb_generator == None:
+            self._frb_generator = kwargs.pop("frb_generator")
+        if self._plot_type == None:
+            self._plot_type = kwargs.pop("plot_type")
+        PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -758,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -774,17 +821,18 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -845,17 +893,22 @@
         >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
+        names = []
+        if mpl_kwargs is None: mpl_kwargs = {}
         if name == None:
             name = str(self.pf)
-        elif name.endswith('.png'):
-            return v.save(name)
-        if mpl_kwargs is None: mpl_kwargs = {}
+        suffix = os.path.splitext(name)[1]
+        if suffix != '':
+            for k, v in self.plots.iteritems():
+                names.append(v.save(name,mpl_kwargs))
+            return names
         axis = axis_names[self.data_source.axis]
         weight = None
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
-        names = []
+        if 'Cutting' in self.data_source.__class__.__name__:
+            type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if axis:
                 n = "%s_%s_%s_%s" % (name, type, axis, k)
@@ -906,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -962,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -973,8 +1028,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -984,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1044,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1055,8 +1116,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1066,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1103,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1142,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1220,6 +1285,7 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
+    _frb_generator = FixedResolutionBuffer
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -1397,24 +1463,25 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        if name[-4:] == '.png':
-            suffix = ''
+        suffix = os.path.splitext(name)[1]
+        
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+        mylog.info("Saving plot %s", name)
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
         else:
-            suffix = '.png'
-        fn = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", fn)
-        if canvas is None:
-            if suffix == ".png":
-                canvas = FigureCanvasAgg(self.figure)
-            elif suffix == ".pdf":
-                canvas = FigureCanvasPdf(self.figure)
-            elif suffix in (".eps", ".ps"):
-                canvas = FigureCanvasPS
-            else:
-                mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-                canvas = FigureCanvasAgg(self.figure)
-        canvas.print_figure(fn,**mpl_kwargs)
-        return fn
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
+
+
+        canvas.print_figure(name,**mpl_kwargs)
+        return name
 
     def _get_best_layout(self, size):
         aspect = 1.0*size[0]/size[1]
@@ -1476,4 +1543,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -146,7 +148,8 @@
     @parallel_passthrough
     def _finalize_parallel(self,data):
         self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
+        if self.get_magnitude:
+            self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):
@@ -205,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         


diff -r 923899f7509b566ba4193bfea7a5deb918e630fd -r 5c39b3425b478d238602b80fb730c3a2bb8f9ea7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt/changeset/588ee2163479/
changeset:   588ee2163479
branch:      yt
user:        MatthewTurk
date:        2012-11-14 21:47:28
summary:     Merged in jzuhone/yt (pull request #339)
affected #:  5 files

diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,12 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +339,8 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0


diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r b0dcdb34be9ee1eebdab4008ffdea8ca39e58f48 -r 588ee216347993c8b57a2be32f5fa9f2c85e1eb9 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list