[yt-svn] commit/yt-3.0: 60 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Oct 9 04:57:35 PDT 2013


60 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/c307f12e6dee/
Changeset:   c307f12e6dee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 20:53:50
Summary:     Enable passing of class and arguments to enable loading difficult files.
Affected #:  1 file

diff -r 73a7cf95d67d589de7b672580bd98852a2f005e5 -r c307f12e6dee208deeeee978a15b4c6c77ebf27b yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -252,26 +252,33 @@
     yield
     os.chdir(oldcwd)
 
-def can_run_pf(pf_fn):
+def can_run_pf(pf_fn, file_check):
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
     with temp_cwd(path):
+        if file_check:
+            return os.path.isfile(pf_fn)
         try:
             load(pf_fn)
         except YTOutputNotIdentified:
             return False
     return AnswerTestingTest.result_storage is not None
 
-def data_dir_load(pf_fn):
+def data_dir_load(pf_fn, cls = None, args = None, kwargs = None):
     path = ytcfg.get("yt", "test_data_dir")
     if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
     with temp_cwd(path):
-        pf = load(pf_fn)
+        if cls is None:
+            pf = load(pf_fn)
+        else:
+            args = args or ()
+            kwargs = kwargs or {}
+            pf = cls(pf_fn, *args, **kwargs)
         pf.h
         return pf
 
@@ -610,14 +617,14 @@
         assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
         for fn in fns: os.remove(fn)
 
-def requires_pf(pf_fn, big_data = False):
+def requires_pf(pf_fn, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
     if run_big_data == False and big_data == True:
         return ffalse
-    elif not can_run_pf(pf_fn):
+    elif not can_run_pf(pf_fn, file_check):
         return ffalse
     else:
         return ftrue


https://bitbucket.org/yt_analysis/yt-3.0/commits/1aa9f0a4d412/
Changeset:   1aa9f0a4d412
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 20:54:20
Summary:     Adding some Tipsy tests.
Affected #:  2 files

diff -r c307f12e6dee208deeeee978a15b4c6c77ebf27b -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -436,7 +436,10 @@
 
         self._unit_base = unit_base or {}
         self._cosmology_parameters = cosmology_parameters
+        if parameter_file is not None:
+            parameter_file = os.path.abspath(parameter_file)
         self._param_file = parameter_file
+        filename = os.path.abspath(filename)
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):

diff -r c307f12e6dee208deeeee978a15b4c6c77ebf27b -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 yt/frontends/sph/tests/test_tipsy.py
--- /dev/null
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -0,0 +1,89 @@
+"""
+Tipsy tests using the AGORA dataset
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.sph.api import TipsyStaticOutput
+
+_fields = (("deposit", "all_density"),
+           ("deposit", "all_count"),
+           ("deposit", "DarkMatter_density"),
+)
+
+pkdgrav = "halo1e11_run1.00400/halo1e11_run1.00400"
+ at requires_pf(pkdgrav, file_check = True)
+def test_pkdgrav():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(endian="<",
+                  field_dtypes = {"Coordinates": "d"},
+                  cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(pkdgrav, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "halo1e11_run1.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 26847360
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf, field, ds)
+
+gasoline = "agora_1e11.00400/agora_1e11.00400"
+ at requires_pf(gasoline, file_check = True)
+def test_gasoline():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(gasoline, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "agora_1e11.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 26847360
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf, field, ds)
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/64758cd4b498/
Changeset:   64758cd4b498
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 21:57:26
Summary:     Switching the order of a bunch of these operations, reducing test count.
Affected #:  5 files

diff -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 -r 64758cd4b498bf469190823944589e80ff15130a yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -31,12 +31,15 @@
     pf = data_dir_load(sizmbhloz)
     yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         sizmbhloz, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        sizmbhloz, field, ds)
+            yield FieldValuesTest(sizmbhloz, field, ds)
+        if ds is None: ds = pf.h.all_data()
+        s1 = ds["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        yield assert_equal, s1, s2

diff -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 -r 64758cd4b498bf469190823944589e80ff15130a yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -49,7 +49,6 @@
             ray = pf.h.ray(p1, p2)
             yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
     for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                yield FieldValuesTest(c5, field, ds)
+        for ds in dso:
+            yield FieldValuesTest(c5, field, ds)
 

diff -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 -r 64758cd4b498bf469190823944589e80ff15130a yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -31,13 +31,15 @@
     pf = data_dir_load(output_00080)
     yield assert_equal, str(pf), "info_00080"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         output_00080, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        output_00080, field, ds)
-
+            yield FieldValuesTest(output_00080, field, ds)
+        if ds is None: ds = pf.h.all_data()
+        s1 = ds["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        yield assert_equal, s1, s2

diff -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 -r 64758cd4b498bf469190823944589e80ff15130a yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -40,13 +40,16 @@
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
     yield assert_equal, tot, (2*128*128*128)
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         os33, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        os33, field, ds)
+            yield FieldValuesTest(os33, field, ds)
+        if ds is None: ds = pf.h.all_data()
+        s1 = ds["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        yield assert_equal, s1, s2
 

diff -r 1aa9f0a4d4122a00dfcbb70f4a86370cffa5d467 -r 64758cd4b498bf469190823944589e80ff15130a yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -49,15 +49,18 @@
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
     yield assert_equal, tot, 26847360
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         pf, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        pf, field, ds)
+            yield FieldValuesTest(pf, field, ds)
+        if ds is None: ds = pf.h.all_data()
+        s1 = ds["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        yield assert_equal, s1, s2
 
 gasoline = "agora_1e11.00400/agora_1e11.00400"
 @requires_pf(gasoline, file_check = True)
@@ -77,13 +80,15 @@
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
     yield assert_equal, tot, 26847360
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         pf, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        pf, field, ds)
-
+            yield FieldValuesTest(pf, field, ds)
+        if ds is None: ds = pf.h.all_data()
+        s1 = ds["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        yield assert_equal, s1, s2


https://bitbucket.org/yt_analysis/yt-3.0/commits/ecf6b2f78392/
Changeset:   ecf6b2f78392
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 21:57:44
Summary:     Merging with bugfix
Affected #:  1 file

diff -r 64758cd4b498bf469190823944589e80ff15130a -r ecf6b2f78392d75e54e16281c479b62b96543cb9 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -402,11 +402,11 @@
     def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_oct_cells(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells), dtype="uint8")
+        coords = np.zeros((num_cells*8), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")


https://bitbucket.org/yt_analysis/yt-3.0/commits/14ae35ea6abf/
Changeset:   14ae35ea6abf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 22:13:11
Summary:     This fixes a test passage for Octree dataset with multi-octrees.
Affected #:  2 files

diff -r ecf6b2f78392d75e54e16281c479b62b96543cb9 -r 14ae35ea6abf1fe5716cf62ab07cdaa3e8ca9179 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -113,7 +113,7 @@
     _domain_ind = None
 
     def select_blocks(self, selector):
-        mask = self.oct_handler.mask(selector)
+        mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
         mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:

diff -r ecf6b2f78392d75e54e16281c479b62b96543cb9 -r 14ae35ea6abf1fe5716cf62ab07cdaa3e8ca9179 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1170,7 +1170,9 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
+             int domain_id = -1): 
+        # We take a domain_id here to avoid subclassing
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.int64_t sfc


https://bitbucket.org/yt_analysis/yt-3.0/commits/ccaa7271e6d7/
Changeset:   ccaa7271e6d7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 22:21:33
Summary:     Blocks of size 1 need this change.
Affected #:  1 file

diff -r 14ae35ea6abf1fe5716cf62ab07cdaa3e8ca9179 -r ccaa7271e6d763dff5f63fbaf38d6e4ea60c7fac yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -271,12 +271,14 @@
 
     @property
     def LeftEdge(self):
-        LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+        LE = (self._fcoords[0,0,0,self.ind,:]
+            - self._fwidth[0,0,0,self.ind,:])*0.5
         return LE
 
     @property
     def RightEdge(self):
-        RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+        RE = (self._fcoords[-1,-1,-1,self.ind,:]
+            + self._fwidth[-1,-1,-1,self.ind,:])*0.5
         return RE
 
     @property


https://bitbucket.org/yt_analysis/yt-3.0/commits/7d5d40d1f42d/
Changeset:   7d5d40d1f42d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-05 21:44:48
Summary:     This fixes ARTIO blocks issues.

Previously, the number of cells returned through:

for block, mask in something.blocks:
    count += mask.sum()

and:

something.ires.size

did not return the same result.  This is because of how masks were set up for
the RootMesh for ARTIO datasets.  Note that this does not result in bad or
incorrect values for nearly *any* situation, as .blocks is hardly used.  But,
it resulted in bad results.  This corrects that, by changing how the root mesh
is exposed.  We now excise completely any and all root mesh SFC cells that have
octs within them.

The alternative to this would be a double-selection call, which is why this is
*now* a problem when it previously would not be; this was the usage of
self.selector_fill inside the self.select_icoords call of OctreeSubset.  For
non-Forest datasets, this was not an issue, as the octs were coverd and that
was selected in recursively_visit_octs.  Here, however, since they were
disconnected, it needs to be explicitly added.  By masking here, we are
essentially mocking up the behavior of recursively_visit_octs.
Affected #:  1 file

diff -r ccaa7271e6d763dff5f63fbaf38d6e4ea60c7fac -r 7d5d40d1f42d673fe370788f2f792686304f733c yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -3,7 +3,8 @@
 cimport numpy as np
 import sys 
 
-from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.geometry.selection_routines cimport \
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
@@ -561,6 +562,7 @@
     cdef np.float64_t dds[3]
     cdef np.int64_t dims[3]
     cdef public np.int64_t total_octs
+    cdef np.int64_t *doct_count
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -619,12 +621,14 @@
             status = artio_grid_read_root_cell_end( self.handle )
             check_artio_status(status)
         free(num_octs_per_level)
+        self.oct_count = oct_count
+        self.doct_count = <np.int64_t *> oct_count.data
         self.root_mesh_handler = ARTIORootMeshContainer(self)
-        self.oct_count = oct_count
 
     def free_mesh(self):
         self.octree_handler = None
         self.root_mesh_handler = None
+        self.doct_count = NULL
         self.oct_count = None
 
 def get_coords(artio_fileset handle, np.int64_t s):
@@ -1013,9 +1017,12 @@
     cdef public object _last_mask
     cdef public object _last_selector_id
     cdef ARTIOSFCRangeHandler range_handler
+    cdef np.uint8_t *sfc_mask
+    cdef np.int64_t nsfc
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
         cdef int i
+        cdef np.int64_t sfci
         for i in range(3):
             self.DLE[i] = range_handler.DLE[i]
             self.DRE[i] = range_handler.DRE[i]
@@ -1027,6 +1034,22 @@
         self.sfc_start = range_handler.sfc_start
         self.sfc_end = range_handler.sfc_end
         self.range_handler = range_handler
+        # We assume that the number of octs has been created and filled
+        # already.  We no longer care about ANY of the SFCs that have octs
+        # inside them -- this goes for every operation that this object
+        # performs.
+        self.sfc_mask = <np.uint8_t *>malloc(sizeof(np.uint8_t) *
+          self.sfc_end - self.sfc_start + 1)
+        self.nsfc = 0
+        for sfci in range(self.sfc_end - self.sfc_start + 1):
+            if self.range_handler.oct_count[sfci] > 0:
+                self.sfc_mask[sfci] = 0
+            else:
+                self.sfc_mask[sfci] = 1
+                self.nsfc += 1
+
+    def __dealloc__(self):
+        free(self.sfc_mask)
 
     @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1057,7 +1080,7 @@
     def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
@@ -1066,7 +1089,9 @@
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1079,7 +1104,7 @@
     def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
@@ -1089,7 +1114,9 @@
         coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1132,7 +1159,7 @@
         # other.  Note that we *do* apply the selector here.
         cdef np.int64_t num_cells = -1
         cdef np.int64_t ind
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef np.float64_t dpos[3]
         cdef int dim, status, filled = 0
@@ -1158,7 +1185,9 @@
         ddata = (<char*>dest.data) + offset*ss*dims
         ind = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             memcpy(ddata, sdata + ind, dims * ss)
             ddata += dims * ss
             filled += 1
@@ -1175,21 +1204,16 @@
         # We take a domain_id here to avoid subclassing
         cdef int i
         cdef np.float64_t pos[3]
-        cdef np.int64_t sfc
-        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        cdef np.int64_t sfc, sfci = -1
         if self._last_selector_id == hash(selector):
             return self._last_mask
-        if num_cells == -1:
-            # We need to count, but this process will only occur one time,
-            # since num_cells will later be cached.
-            num_cells = self.sfc_end - self.sfc_start + 1
-        mask = np.zeros((num_cells), dtype="uint8")
-        oct_count = self.range_handler.oct_count
+        mask = np.zeros((self.nsfc), dtype="uint8")
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if oct_count[sfc - self.sfc_start] > 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
-            mask[sfc - self.sfc_start] = 1
+            mask[sfci] = 1
         self._last_mask = mask.astype("bool")
         self._last_selector_id = hash(selector)
         return self._last_mask
@@ -1203,7 +1227,7 @@
     def fill_sfc(self, SelectorObject selector, field_indices):
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
-        cdef np.int64_t sfc, num_cells
+        cdef np.int64_t sfc, num_cells, sfci = -1
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
@@ -1238,7 +1262,9 @@
             self.sfc_start, self.sfc_end )
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             status = artio_grid_read_root_cell_begin( handle, sfc, 
                     dpos, grid_variables, &num_oct_levels,
                     num_octs_per_level)
@@ -1308,5 +1334,72 @@
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
 
+cdef class SFCRangeSelector(SelectorObject):
+    
+    cdef SelectorObject base_selector
+    cdef ARTIOSFCRangeHandler range_handler
+    cdef ARTIORootMeshContainer mesh_container
+    cdef np.int64_t sfc_start, sfc_end
+
+    def __init__(self, dobj):
+        self.base_selector = dobj.base_selector
+        self.mesh_container = dobj.oct_handler
+        self.range_handler = self.mesh_container.range_handler
+        self.sfc_start = self.mesh_container.sfc_start
+        self.sfc_end = self.mesh_container.sfc_end
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        return self.select_point(pos)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef np.int64_t sfc = self.mesh_container.pos_to_sfc(pos)
+        if sfc > self.sfc_end: return 0
+        cdef np.int64_t oc = self.range_handler.doct_count[
+            sfc - self.sfc_start]
+        if oc > 0: return 0
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        return self.base_selector.select_bbox(left_edge, right_edge)
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        # Because visitors now use select_grid, we should be explicitly
+        # checking this.
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
+    
+    def _hash_vals(self):
+        return (hash(self.base_selector), self.sfc_start, self.sfc_end)
+
 sfc_subset_selector = AlwaysSelector
+#sfc_subset_selector = SFCRangeSelector
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/73eceed0e23d/
Changeset:   73eceed0e23d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 14:51:48
Summary:     Fixing .deposit for ARTIORootMeshContainer.
Affected #:  1 file

diff -r 7d5d40d1f42d673fe370788f2f792686304f733c -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1290,21 +1290,26 @@
         # This implements the necessary calls to enable particle deposition to
         # occur as needed.
         cdef int nf, i, j
+        cdef np.int64_t sfc, sfci
         if fields is None:
             fields = []
         nf = len(fields)
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         cdef np.ndarray[np.int64_t, ndim=1] domain_ind
-        domain_ind = np.zeros(mask.shape[0], dtype="int64") - 1
+        domain_ind = np.zeros(self.sfc_end - self.sfc_start + 1,
+                              dtype="int64") - 1
         j = 0
-        for i in range(mask.shape[0]):
-            if mask[i] == 1:
-                domain_ind[i] = j
-                j += 1
+        sfci = -1
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: 
+                continue
+            domain_ind[sfc - self.sfc_start] = j
+            j += 1
         cdef np.float64_t **field_pointers, *field_vals, pos[3], left_edge[3]
         cdef int coords[3]
-        cdef np.int64_t sfc
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)


https://bitbucket.org/yt_analysis/yt-3.0/commits/a80979a5173c/
Changeset:   a80979a5173c
Branch:      yt
User:        jwise77
Date:        2013-09-30 01:47:01
Summary:     Fixed a couple of misplaced licensing terms.
Affected #:  2 files

diff -r b154dfce6acc5a293d7abfe20d64acc542919627 -r a80979a5173c27eced97c5f35b572d29adaddd1c yt/gui/reason/extdirect_router.py
--- a/yt/gui/reason/extdirect_router.py
+++ b/yt/gui/reason/extdirect_router.py
@@ -9,6 +9,13 @@
 This code was released under the BSD License.
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import inspect
 
 class DirectException(Exception):
@@ -186,12 +193,4 @@
 
 
 
-"""
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r b154dfce6acc5a293d7abfe20d64acc542919627 -r a80979a5173c27eced97c5f35b572d29adaddd1c yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -35,6 +35,14 @@
 I hope this helps,
   Almar
 """
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 from yt.mods import *
 from yt.funcs import *
 
@@ -300,14 +308,3 @@
     ax.Draw()
 
     return mtex, ax
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------


https://bitbucket.org/yt_analysis/yt-3.0/commits/6144554f5038/
Changeset:   6144554f5038
Branch:      yt
User:        ChrisMalone
Date:        2013-10-01 19:13:31
Summary:     make transfer_function a keyword, instead of positional, argument
Affected #:  1 file

diff -r a80979a5173c27eced97c5f35b572d29adaddd1c -r 6144554f5038e1fade96aaba6648544fbbf3b135 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -72,6 +72,9 @@
         cubical, but if not, it is left/right, top/bottom, front/back.
     resolution : int or list of ints
         The number of pixels in each direction.
+    transfer_function : `yt.visualization.volume_rendering.TransferFunction`
+        The transfer function used to map values to colors in an image.  If
+        not specified, defaults to a ProjectionTransferFunction.
     north_vector : array_like, optional
         The 'up' direction for the plane of rays.  If not specific, calculated
         automatically.
@@ -184,7 +187,7 @@
     _tf_figure = None
     _render_figure = None
     def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
+                 resolution, transfer_function = None,
                  north_vector = None, steady_north=False,
                  volume = None, fields = None,
                  log_fields = None,
@@ -1465,7 +1468,7 @@
 
 class MosaicCamera(Camera):
     def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
+                 resolution, transfer_function = None,
                  north_vector = None, steady_north=False,
                  volume = None, fields = None,
                  log_fields = None,


https://bitbucket.org/yt_analysis/yt-3.0/commits/309891bcf064/
Changeset:   309891bcf064
Branch:      yt
User:        jzuhone
Date:        2013-05-13 20:15:32
Summary:     SZ maps using SZpack
Affected #:  1 file

diff -r b0d1232c9eadea6447edbd7ce925ed67d11028e3 -r 309891bcf064d830355e7bf803da5e3c53d06df8 yt/analysis_modules/SZmaps.py
--- /dev/null
+++ b/yt/analysis_modules/SZmaps.py
@@ -0,0 +1,94 @@
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
+from yt.data_objects.image_array import ImageArray
+import numpy as np
+
+Tcmb = 2.726
+mueinv = 0.875
+
+try:
+    import SZpack
+except:
+    raise ImportError
+
+def _t_squared(field, data):
+    return data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    axis = data.get_field_parameter("axis")
+    if axis == "x":
+	vv = np.sqrt(data["y-velocity"]**2+data["z-velocity"]**2)
+    elif axis == "y":
+	vv = np.sqrt(data["x-velocity"]**2+data["z-velocity"]**2)
+    elif axis == "z":
+	vv = np.sqrt(data["x-velocity"]**2+data["y-velocity"]**2)
+    return vv/clight/clight
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par(field, data):
+    axis = data.get_field_parameter("axis")
+    return data["%s-velocity" % (axis)]/clight
+add_field("BetaPar", function=_beta_par)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+vlist = 'xyz'
+
+def SZProjection(pf, axis, freqs, width=(1, "unitary"), nx=800, ny=800)
+
+    num_freqs = len(freqs)
+    freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+    xo = hcgs*freqs*1.0e9/(kboltz*Tcmb)
+
+    proj1 = pf.h.proj("TempkeV", weight_field="Density")
+    proj2 = pf.h.proj("Density")
+
+    if axis in vlist:
+	vfield = "velocity_%s" % (axis)
+	proj1.set_field_parameter("axis", axis)
+    elif axis in xrange(0,3) :
+	vfield = "velocity_%s" % (vlist[axis])
+	proj1.set_field_parameter("axis", vlist[axis])
+    
+    frb1 = proj1.to_frb(width, n)
+    frb2 = proj2.to_frb(width, n)
+    
+    TeSZ = frb1["TempkeV"]
+    omega1 = frb1["Tsquared"]/(TeSZ*TeSZ) - 1.
+    sigma1 = frb1["TBetaPar"]/TeSZ - betac_par
+    kappa1 = frb1["BetaParSquared"] - betac_par
+    
+    frb1["tau"] = sigma_thompson*frb2["Density"]*mueinv/mp
+    frb1["omega1"] = ImageArray(omega1)
+    frb1["kappa1"] = ImageArray(kappa1)
+    frb1["sigma1"] = ImageArray(sigma1)
+
+    SZsignal = np.zeros((num_freqs,nx,ny))
+    omega = np.zeros((3))
+    sigma = np.zeros((3))
+    
+    for i in xrange(nx):
+
+	for j in xrange(ny):
+		
+	    tau = frb1["tau"][i,j]
+	    Te = frb1["TempkeV"][i,j]
+	    bpar = frb1["BetaPar"][i,j]
+	    bperp2 = frb["BetaPerpSquared"][i,j]
+	    omega[0] = frb1["omega1"][i,j]
+	    sigma[0] = frb1["sigma1"][i,j]
+	    kappa = frb1["kappa1"][i,j]
+	
+	    SZsignal[:,i,j] = SZpack.compute_combo_means_ex(xo, tau, Te, bpar, omega,
+							    sigma, kappa, bperp2)
+
+    for i in xrange(num_freqs) :
+	frb1[freq_fields[i]] = ImageArray(SZsignal[i,:,:])
+	
+    return frb1


https://bitbucket.org/yt_analysis/yt-3.0/commits/4f749ff73844/
Changeset:   4f749ff73844
Branch:      yt
User:        jzuhone
Date:        2013-05-13 20:15:47
Summary:     Merging
Affected #:  1 file

diff -r 309891bcf064d830355e7bf803da5e3c53d06df8 -r 4f749ff7384491a1da1a0396ccd16134654f8412 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -115,7 +115,7 @@
 add_field("y-velocity", function=_yvelocity, take_log=False,
           units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
 def _zvelocity(field, data):
-    if "velocity_z" in data.pf.field.info:
+    if "velocity_z" in data.pf.field_info:
         return data["velocity_z"]
     else:
         return data["momentum_z"]/data["density"]


https://bitbucket.org/yt_analysis/yt-3.0/commits/7b35c3296567/
Changeset:   7b35c3296567
Branch:      yt
User:        jzuhone
Date:        2013-05-13 23:29:42
Summary:     Fixed a few bugs
Affected #:  2 files

diff -r 4f749ff7384491a1da1a0396ccd16134654f8412 -r 7b35c3296567868aaffcde67a574f5e28b797785 yt/analysis_modules/SZmaps.py
--- a/yt/analysis_modules/SZmaps.py
+++ b/yt/analysis_modules/SZmaps.py
@@ -1,5 +1,6 @@
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
 from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
 import numpy as np
 
 Tcmb = 2.726
@@ -40,25 +41,27 @@
 
 vlist = 'xyz'
 
-def SZProjection(pf, axis, freqs, width=(1, "unitary"), nx=800, ny=800)
+def SZProjection(pf, axis, freqs, center="c", width=(1, "unitary"), nx=800, ny=800):
 
     num_freqs = len(freqs)
     freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
     xo = hcgs*freqs*1.0e9/(kboltz*Tcmb)
 
-    proj1 = pf.h.proj("TempkeV", weight_field="Density")
-    proj2 = pf.h.proj("Density")
-
-    if axis in vlist:
-	vfield = "velocity_%s" % (axis)
-	proj1.set_field_parameter("axis", axis)
-    elif axis in xrange(0,3) :
-	vfield = "velocity_%s" % (vlist[axis])
-	proj1.set_field_parameter("axis", vlist[axis])
-    
-    frb1 = proj1.to_frb(width, n)
-    frb2 = proj2.to_frb(width, n)
-    
+    if isinstance(axis, np.ndarray) :
+        frb1["TempkeV"] = off_axis_projection(pf, center, axis, width, nx, field="TempkeV", weight="Density")
+        frb2["Density"] = off_axis_projection(pf, center, axis, width, nx, field="Density")
+    else :
+        if axis in vlist:
+            vfield = "velocity_%s" % (axis)
+            proj1.set_field_parameter("axis", axis)
+        elif axis in xrange(0,3) :
+            vfield = "velocity_%s" % (vlist[axis])
+            proj1.set_field_parameter("axis", vlist[axis])
+        proj1 = pf.h.proj(axis, "TempkeV", weight_field="Density")
+        proj2 = pf.h.proj(axis, "Density")
+        frb1 = proj1.to_frb(width, nx)
+        frb2 = proj2.to_frb(width, ny)
+                    
     TeSZ = frb1["TempkeV"]
     omega1 = frb1["Tsquared"]/(TeSZ*TeSZ) - 1.
     sigma1 = frb1["TBetaPar"]/TeSZ - betac_par

diff -r 4f749ff7384491a1da1a0396ccd16134654f8412 -r 7b35c3296567868aaffcde67a574f5e28b797785 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,5 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .SZmaps import SZprojection


https://bitbucket.org/yt_analysis/yt-3.0/commits/6f00933abb02/
Changeset:   6f00933abb02
Branch:      yt
User:        jzuhone
Date:        2013-05-28 15:16:49
Summary:     Merging
Affected #:  45 files

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -565,7 +565,7 @@
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -119,3 +119,7 @@
     FcnSet
 
 from .SZmaps import SZprojection
+
+from .radmc3d_export.api import \
+    RadMC3DWriter
+

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,7 +1062,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1097,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1458,7 +1458,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1657,7 +1657,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2009,13 +2009,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2032,13 +2030,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2059,10 +2055,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2091,15 +2084,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2132,7 +2122,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2461,7 +2451,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2566,7 +2556,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -338,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -105,7 +105,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], 1e-90, 
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -583,7 +584,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -630,6 +631,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -68,8 +68,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold007',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4080,7 +4080,7 @@
             if region in ["OR", "AND", "NOT", "(", ")"]:
                 s += region
             else:
-                s += region.__repr__(clean = True)
+                s += region.__repr__()
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -271,12 +271,14 @@
                 else: self[item] = vv.ravel()
                 return self[item]
         self.requested.append(item)
-        return defaultdict.__missing__(self, item)
+        if item not in self:
+            self[item] = self._read_data(item)
+        return self[item]
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and FI[field_name].particle_type:
+        if field_name in FI and FI[field_name].particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -165,7 +165,7 @@
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30
         good_u = None
-        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'cm']:
+        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
             vv = v*self[unit]
             if vv < max_nu and vv > 1.0:
                 good_u = unit

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -44,7 +44,8 @@
     NeedsOriginalGrid, \
     NeedsDataField, \
     NeedsProperty, \
-    NeedsParameter
+    NeedsParameter, \
+    NullFunc
 
 from yt.utilities.physical_constants import \
      mh, \
@@ -440,7 +441,7 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+    return (data["Density"]+data["particle_density"]) * data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
@@ -453,7 +454,7 @@
           convert_function=_convertCellMassMsun)
 
 def _Matter_Density(field,data):
-    return (data['Density'] + data['Dark_Matter_Density'])
+    return (data['Density'] + data['particle_density'])
 add_field("Matter_Density",function=_Matter_Density,units=r"\rm{g}/\rm{cm^3}")
 
 def _ComovingDensity(field, data):
@@ -982,22 +983,29 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-def _convertDensity(data):
-    return data.convert("Density")
+# We add these fields so that the field detector can use them
+for field in ["particle_position_%s" % ax for ax in "xyz"] + \
+             ["ParticleMass"]:
+    # This marker should let everyone know not to use the fields, but NullFunc
+    # should do that, too.
+    add_field(field, function=NullFunc, particle_type = True,
+        units=r"UNDEFINED")
+
 def _pdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     CICDeposit_3(data["particle_position_x"].astype(np.float64),
                  data["particle_position_y"].astype(np.float64),
                  data["particle_position_z"].astype(np.float64),
-                 data["particle_mass"].astype(np.float32),
+                 data["ParticleMass"],
                  data["particle_position_x"].size,
                  blank, np.array(data.LeftEdge).astype(np.float64),
                  np.array(data.ActiveDimensions).astype(np.int32),
                  np.float64(data['dx']))
+    np.divide(blank, data["CellVolume"], blank)
     return blank
 add_field("particle_density", function=_pdensity,
-          validators=[ValidateGridType()], convert_function=_convertDensity,
+          validators=[ValidateGridType()],
           display_name=r"\mathrm{Particle}\/\mathrm{Density}")
 
 def _MagneticEnergy(field,data):

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -280,12 +280,12 @@
         projection_conversion="1")
 
 def _spdensity(field, data):
-    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float64')
     if data.star_mass.shape[0] ==0 : return grid_mass 
     amr_utils.CICDeposit_3(data.star_position_x,
                            data.star_position_y,
                            data.star_position_z,
-                           data.star_mass.astype('float32'),
+                           data.star_mass,
                            data.star_mass.shape[0],
                            grid_mass, 
                            np.array(data.LeftEdge).astype(np.float64),

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -105,6 +105,9 @@
         """
         Intelligently set the filename.
         """
+        if filename is None:
+            self.filename = filename
+            return
         if self.hierarchy._strip_path:
             self.filename = os.path.join(self.hierarchy.directory,
                                          os.path.basename(filename))
@@ -302,7 +305,7 @@
             LE.append(_next_token_line("GridLeftEdge", f))
             RE.append(_next_token_line("GridRightEdge", f))
             nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
-            fn.append(["-1"])
+            fn.append([None])
             if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
             npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
             if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
@@ -373,6 +376,7 @@
         giter = izip(grids, levels, procs, parents)
         bn = self._bn % (self.pf)
         pmap = [(bn % P,) for P in xrange(procs.max()+1)]
+        pmap.append((None, )) # Now, P==-1 will give None
         for grid,L,P,Pid in giter:
             grid.Level = L
             grid._parent_id = Pid
@@ -405,7 +409,10 @@
                 parents.append(g.Parent.id)
             else:
                 parents.append(-1)
-            procs.append(int(self.filenames[i][0][-4:]))
+            if self.filenames[i][0] is None:
+                procs.append(-1)
+            else:
+                procs.append(int(self.filenames[i][0][-4:]))
             levels.append(g.Level)
 
         parents = np.array(parents, dtype='int64')

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -352,14 +352,14 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter].astype(np.float32),
+                           data["particle_mass"][filter],
                            np.int64(np.where(filter)[0].size),
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -369,7 +369,7 @@
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float32')
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
@@ -381,7 +381,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter].astype(np.float32),
+                           data["particle_mass"][filter],
                            num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -396,24 +396,24 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = np.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           particle_field_data.astype(np.float32),
+                           particle_field_data,
                            data["particle_position_x"].size,
                            top, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     del particle_field_data
 
-    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
-                           data["particle_mass"].astype(np.float32),
+                           data["particle_mass"],
                            data["particle_position_x"].size,
                            bottom, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -435,7 +435,7 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = np.zeros(data.ActiveDimensions, dtype='float32')
+    top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
@@ -443,18 +443,18 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          particle_field_data.astype(np.float32),
+                          particle_field_data,
                           np.int64(np.where(filter)[0].size),
                           top, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 
                           np.float64(data['dx']))
     del particle_field_data
 
-    bottom = np.zeros(data.ActiveDimensions, dtype='float32')
+    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                           data["particle_position_y"][filter].astype(np.float64),
                           data["particle_position_z"][filter].astype(np.float64),
-                          data["particle_mass"][filter].astype(np.float32),
+                          data["particle_mass"][filter],
                           np.int64(np.where(filter)[0].size),
                           bottom, np.array(data.LeftEdge).astype(np.float64),
                           np.array(data.ActiveDimensions).astype(np.int32), 

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -119,9 +119,13 @@
         files_keys = defaultdict(lambda: [])
         pf_field_list = grids[0].pf.h.field_list
         sets = [dset for dset in list(sets) if dset in pf_field_list]
-        for g in grids: files_keys[g.filename].append(g)
+        for g in grids:
+            files_keys[g.filename].append(g)
         exc = self._read_exception
         for file in files_keys:
+            # This is a funny business with Enzo files that are DM-only,
+            # where grids can have *no* data, but still exist.
+            if file is None: continue
             mylog.debug("Starting read %s (%s)", file, sets)
             nodes = [g.id for g in files_keys[file]]
             nodes.sort()

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,7 +64,6 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
-                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -99,7 +99,7 @@
 
     grid = PlutoGrid
 
-    def __init__(self,pf,data_style='chombo_hdf5'):
+    def __init__(self,pf,data_style='pluto_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
@@ -187,7 +187,7 @@
     _fieldinfo_fallback = PlutoFieldInfo
     _fieldinfo_known = KnownPlutoFields
 
-    def __init__(self, filename, data_style='chombo_hdf5',
+    def __init__(self, filename, data_style='pluto_hdf5',
                  storage_filename = None, ini_filename = None):
         self._handle = h5py.File(filename,'r')
         self.current_time = self._handle.attrs['time']

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -603,17 +603,11 @@
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
 
-def mkdir_rec(path):
-    """
-    Recursive mkdir, so that if you mkdir two levels deep and the first 
-    one doesn't exist, it creates the first, and then any subsequent dirs.
 
-    Examples
-    --------
-    mkdir_rec("a/b/c")
-    """
-    dir_list = path.split("/")
-    basedir = "."
-    for dir in dir_list:
-        basedir = "%s/%s" % (basedir, dir)
-        if not os.path.isdir(basedir): os.mkdir(basedir)
+def ensure_dir_exists(path):
+    r"""Create all directories in path recursively in a parallel safe manner"""
+    my_dir = os.path.dirname(path)
+    if not my_dir:
+        return
+    if not os.path.exists(my_dir):
+        only_on_root(os.makedirs, my_dir)

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -76,7 +76,8 @@
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), setup = False)
+        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), 
+                           setup = False, plot_type='SlicePlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]
@@ -96,7 +97,7 @@
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         pw = PWViewerExtJS(proj, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]),
-                           setup = False)
+                           setup = False, plot_type='ProjectionPlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -142,8 +142,9 @@
 elif exe_name in \
         ["mpi4py", "embed_enzo",
          "python"+sys.version[:3]+"-mpi"] \
-    or '_parallel' in dir(sys) \
-    or any(["ipengine" in arg for arg in sys.argv]):
+        or '_parallel' in dir(sys) \
+        or any(["ipengine" in arg for arg in sys.argv]) \
+        or any(["cluster-id" in arg for arg in sys.argv]):
     parallel_capable = turn_on_parallelism()
 else:
     parallel_capable = False

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -613,7 +613,7 @@
         fns = ['old.png', 'new.png']
         mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
         mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
-        compare_images(fns[0], fns[1], 10**(-self.decimals))
+        assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
         for fn in fns: os.remove(fn)
 
 def requires_pf(pf_fn, big_data = False):

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1327,7 +1327,7 @@
             plt.set_log(args.field, args.takelog)
             if args.zlim:
                 plt.set_zlim(args.field,*args.zlim)
-            if not os.path.isdir(args.output): os.makedirs(args.output)
+            ensure_dir_exists(args.output)
             plt.save(os.path.join(args.output,"%s" % (pf)))
 
 class YTRenderCmd(YTCommand):

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -27,8 +27,8 @@
 
 from .physical_constants import \
    mpc_per_mpc, kpc_per_mpc, pc_per_mpc, au_per_mpc, rsun_per_mpc, \
-   miles_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, sec_per_year, \
-   sec_per_day
+   miles_per_mpc, km_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, \
+   sec_per_year, sec_per_day
 
 # The number of levels we expect to have at most
 MAXLEVEL=48
@@ -55,8 +55,14 @@
                   'au'    : au_per_mpc,
                   'rsun'  : rsun_per_mpc,
                   'miles' : miles_per_mpc,
+                  'km'    : km_per_mpc,
                   'cm'    : cm_per_mpc}
 
+# Nicely formatted versions of common length units
+formatted_length_unit_names = {'mpc'     : 'Mpc',
+                               'au'      : 'AU',
+                               'rsun'    : 'R_\odot'}
+
 # How many seconds are in each thing
 sec_conversion = {'Gyr'   : sec_per_Gyr,
                   'Myr'   : sec_per_Myr,

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -208,3 +208,10 @@
         s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
         s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
         return s
+
+class YTInvalidWidthError(YTException):
+    def __init__(self, error):
+        self.error = error
+
+    def __str__(self):
+        return str(self.error)

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -34,22 +34,22 @@
 def CICDeposit_3(np.ndarray[np.float64_t, ndim=1] posx,
                  np.ndarray[np.float64_t, ndim=1] posy,
                  np.ndarray[np.float64_t, ndim=1] posz,
-                 np.ndarray[np.float32_t, ndim=1] mass,
+                 np.ndarray[np.float64_t, ndim=1] mass,
                  np.int64_t npositions,
-                 np.ndarray[np.float32_t, ndim=3] field,
+                 np.ndarray[np.float64_t, ndim=3] field,
                  np.ndarray[np.float64_t, ndim=1] leftEdge,
                  np.ndarray[np.int32_t, ndim=1] gridDimension,
                  np.float64_t cellSize):
 
     cdef int i1, j1, k1, n
-    cdef double xpos, ypos, zpos
-    cdef double fact, edge0, edge1, edge2
-    cdef double le0, le1, le2
-    cdef float dx, dy, dz, dx2, dy2, dz2
+    cdef np.float64_t xpos, ypos, zpos
+    cdef np.float64_t fact, edge0, edge1, edge2
+    cdef np.float64_t le0, le1, le2
+    cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
 
-    edge0 = (<float> gridDimension[0]) - 0.5001
-    edge1 = (<float> gridDimension[1]) - 0.5001
-    edge2 = (<float> gridDimension[2]) - 0.5001
+    edge0 = (<np.float64_t> gridDimension[0]) - 0.5001
+    edge1 = (<np.float64_t> gridDimension[1]) - 0.5001
+    edge2 = (<np.float64_t> gridDimension[2]) - 0.5001
     fact = 1.0 / cellSize
 
     le0 = leftEdge[0]
@@ -68,9 +68,9 @@
         k1  = <int> (zpos + 0.5)
 
         # Compute the weights
-        dx = (<float> i1) + 0.5 - xpos
-        dy = (<float> j1) + 0.5 - ypos
-        dz = (<float> k1) + 0.5 - zpos
+        dx = (<np.float64_t> i1) + 0.5 - xpos
+        dy = (<np.float64_t> j1) + 0.5 - ypos
+        dz = (<np.float64_t> k1) + 0.5 - zpos
         dx2 =  1.0 - dx
         dy2 =  1.0 - dy
         dz2 =  1.0 - dz

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -39,3 +39,4 @@
 from .grid_traversal import *
 from .marching_cubes import *
 from .GridTree import *
+from .write_array import *

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os, sys, os.path, glob, \
+  tempfile, subprocess, shutil
 
 def check_for_png():
     # First up: HDF5_DIR in environment
@@ -97,11 +98,50 @@
     print "You can locate this by looking for the file ft2build.h"
     sys.exit(1)
 
+def check_for_openmp():
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    # Get compiler invocation
+    compiler = os.getenv('CC', 'cc')
+
+    # Attempt to compile a test script.
+    # See http://openmp.org/wp/openmp-compilers/
+    filename = r'test.c'
+    file = open(filename,'w', 0)
+    file.write(
+        "#include <omp.h>\n"
+        "#include <stdio.h>\n"
+        "int main() {\n"
+        "#pragma omp parallel\n"
+        "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+        "}"
+        )
+    with open(os.devnull, 'w') as fnull:
+        exit_code = subprocess.call([compiler, '-fopenmp', filename],
+                                    stdout=fnull, stderr=fnull)
+        
+    # Clean up
+    file.close()
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+    if exit_code == 0:
+        return True
+    else:
+        return False
+
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('lib',parent_package,top_path)
     png_inc, png_lib = check_for_png()
     freetype_inc, freetype_lib = check_for_freetype()
+    if check_for_openmp() == True:
+        omp_args = ['-fopenmp']
+    else:
+        omp_args = None
     # Because setjmp.h is included by lots of things, and because libpng hasn't
     # always properly checked its header files (see
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
@@ -129,8 +169,8 @@
                 depends=["yt/utilities/lib/freetype_includes.h"])
     config.add_extension("geometry_utils", 
                 ["yt/utilities/lib/geometry_utils.pyx"],
-               extra_compile_args=['-fopenmp'],
-               extra_link_args=['-fopenmp'],
+               extra_compile_args=omp_args,
+               extra_link_args=omp_args,
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("Interpolators", 
                 ["yt/utilities/lib/Interpolators.pyx"],
@@ -194,8 +234,8 @@
                  glob.glob("yt/utilities/lib/healpix_*.c"), 
                include_dirs=["yt/utilities/lib/"],
                libraries=["m"], 
-               extra_compile_args=['-fopenmp'],
-               extra_link_args=['-fopenmp'],
+               extra_compile_args=omp_args,
+               extra_link_args=omp_args,
                depends = ["yt/utilities/lib/VolumeIntegrator.pyx",
                           "yt/utilities/lib/fp_utils.pxd",
                           "yt/utilities/lib/kdtree.h",
@@ -204,6 +244,8 @@
                           "yt/utilities/lib/field_interpolation_tables.pxd",
                           ]
           )
+    config.add_extension("write_array",
+                         ["yt/utilities/lib/write_array.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/lib/write_array.pyx
--- /dev/null
+++ b/yt/utilities/lib/write_array.pyx
@@ -0,0 +1,66 @@
+"""
+Faster, cythonized file IO
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+DTYPE = np.float64
+ctypedef np.float64_t DTYPE_t
+
+ at cython.boundscheck(False)
+def write_3D_array(np.ndarray[DTYPE_t, ndim=3] data, fhandle):
+    assert data.dtype == DTYPE
+    cdef int Nx, Ny, Nz
+    Nx = data.shape[0]
+    Ny = data.shape[1]
+    Nz = data.shape[2]
+    cdef unsigned int i, j, k
+
+    for i in np.arange(Nz):
+        for j in np.arange(Ny):
+            for k in np.arange(Nx):
+                fhandle.write(str(data[k, j, i]) + '\n')
+
+ at cython.boundscheck(False)
+def write_3D_vector_array(np.ndarray[DTYPE_t, ndim=3] data_x, 
+                          np.ndarray[DTYPE_t, ndim=3] data_y,
+                          np.ndarray[DTYPE_t, ndim=3] data_z,
+                          fhandle):
+
+    assert data_x.dtype == DTYPE
+    cdef int Nx, Ny, Nz
+    Nx = data_x.shape[0]
+    Ny = data_x.shape[1]
+    Nz = data_x.shape[2]
+    cdef unsigned int i, j, k
+
+    for i in np.arange(Nz):
+        for j in np.arange(Ny):
+            for k in np.arange(Nx):
+                fx = data_x[k, j, i]
+                fy = data_y[k, j, i]
+                fz = data_z[k, j, i]
+                fhandle.write('{}    {}    {} \n'.format(fx, fy, fz))

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -31,12 +31,44 @@
 
 class UnilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 1D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple or array
+            If a tuple, this should specify the upper and lower bounds 
+            for the bins of the data table.  This assumes the bins are 
+            evenly spaced.  If an array, this specifies the bins 
+            explicitly.
+        field_names: str
+            Name of the field to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random(64)
+        interp = UnilinearFieldInterpolator(table_data, (0.0, 1.0), "x",
+                                            truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-
+        if isinstance(boundaries, np.ndarray):
+            if boundaries.size != table.shape[0]:
+                mylog.error("Bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries
+        else:
+            x0, x1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
@@ -57,12 +89,51 @@
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 2D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x and y bins 
+            given as (x0, x1, y0, y1) or a tuple of two arrays containing the 
+            x and y bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64))
+        interp = BilinearFieldInterpolator(table_data, (0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        if len(boundaries) == 4:
+            x0, x1, y0, y1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        elif len(boundaries) == 2:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1) or as (x_bins, y_bins)")
+            raise ValueError
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -90,14 +161,58 @@
 
 class TrilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate = False):
+        r"""Initialize a 3D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x, y, and z bins 
+            given as (x0, x1, y0, y1, z0, z1) or a tuple of three arrays 
+            containing the x, y, and z bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64, 64))
+        interp = BilinearFieldInterpolator(table_data, 
+                                           (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y", "z"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
-
+        if len(boundaries) == 6:
+            x0, x1, y0, y1, z0, z1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+            self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
+        elif len(boundaries) == 3:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[2].size != table.shape[2]:
+                mylog.error("Z bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+            self.z_bins = boundaries[2]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1, z0, z1) or as (x_bins, y_bins, z_bins)")
+            raise ValueError
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -40,9 +40,10 @@
 mpc_per_au    = 4.84813682e-12
 mpc_per_rsun  = 2.253962e-14
 mpc_per_miles = 5.21552871e-20
+mpc_per_km    = 3.24077929e-20
 mpc_per_cm    = 3.24077929e-25
 kpc_per_cm    = mpc_per_cm / mpc_per_kpc
-km_per_pc     = 1.3806504e13
+km_per_pc     = 3.08567758e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
 pc_per_cm     = 3.24077929e-19
@@ -54,6 +55,7 @@
 au_per_mpc    = 1.0 / mpc_per_au
 rsun_per_mpc  = 1.0 / mpc_per_rsun
 miles_per_mpc = 1.0 / mpc_per_miles
+km_per_mpc    = 1.0 / mpc_per_km
 cm_per_mpc    = 1.0 / mpc_per_cm
 cm_per_kpc    = 1.0 / kpc_per_cm
 cm_per_km     = 1.0 / km_per_cm

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -7,21 +7,58 @@
 def test_linear_interpolator_1d():
     random_data = np.random.random(64)
     fv = {'x': np.mgrid[0.0:1.0:64j]}
+    # evenly spaced bins
     ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
-    assert_array_equal(ufi(fv), random_data)
+    yield assert_array_equal, ufi(fv), random_data
+    
+    # randomly spaced bins
+    size = 64
+    shift = (1. / size) * np.random.random(size) - (0.5 / size)
+    fv["x"] += shift
+    ufi = lin.UnilinearFieldInterpolator(random_data, 
+                                         np.linspace(0.0, 1.0, size) + shift, 
+                                         "x", True)
+    yield assert_array_almost_equal, ufi(fv), random_data, 15
 
 def test_linear_interpolator_2d():
     random_data = np.random.random((64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
     bfi = lin.BilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0), "xy", True)
-    assert_array_equal(bfi(fv), random_data)
+    yield assert_array_equal, bfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xy")
+    fv["x"] += shifts["x"][:, np.newaxis]
+    fv["y"] += shifts["y"]
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"]), "xy", True)
+    yield assert_array_almost_equal, bfi(fv), random_data, 15
 
 def test_linear_interpolator_3d():
     random_data = np.random.random((64, 64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    assert_array_equal(tfi(fv), random_data)
+    yield assert_array_equal, tfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xyz")
+    fv["x"] += shifts["x"][:, np.newaxis, np.newaxis]
+    fv["y"] += shifts["y"][:, np.newaxis]
+    fv["z"] += shifts["z"]
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"], 
+             bins + shifts["z"]), "xyz", True)
+    yield assert_array_almost_equal, tfi(fv), random_data, 15
+    

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -82,3 +82,4 @@
         self.image = self.axes.imshow(data, origin='lower', extent=extent,
                                       norm=norm, vmin=self.zmin, aspect=aspect,
                                       vmax=self.zmax, cmap=cmap)
+        self.cb = self.figure.colorbar(self.image, self.cax)

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -172,7 +172,7 @@
         
         info['label'] = self.data_source.pf.field_info[item].display_name
         if info['label'] is None:
-            info['label'] = r'$\rm{'+item+r'}$'
+            info['label'] = r'$\rm{'+item.replace('_','\/').title()+r'}$'
         elif info['label'].find('$') == -1:
             info['label'] = info['label'].replace(' ','\/')
             info['label'] = r'$\rm{'+info['label']+r'}$'

diff -r 7b35c3296567868aaffcde67a574f5e28b797785 -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -472,7 +472,7 @@
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
-    for unit in ['mpc','kpc','pc','au','rsun','cm']:
+    for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
         vv = v*pf[unit]
         if vv < max_nu and vv > 1.0:
             good_u = unit
@@ -694,7 +694,7 @@
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
-        self.rv = plot._axes.contour(buff, len(self.clumps)+1,
+        self.rv = plot._axes.contour(buff, np.unique(buff),
                                      extent=extent,**self.plot_args)
         plot._axes.hold(False)
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/d2de8bb9615a/
Changeset:   d2de8bb9615a
Branch:      yt
User:        jzuhone
Date:        2013-07-31 22:54:24
Summary:     Merged yt_analysis/yt into yt
Affected #:  53 files

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,16 @@
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
 sam.skillman at gmail.com = samskillman at gmail.com
 casey at thestarkeffect.com = caseywstark at gmail.com
 chiffre = chiffre at posteo.de
 Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,6 +12,7 @@
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -375,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -396,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
 #!python
 import os, re
-from distutils import version
+from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import AMRData
 namespace = locals().copy()
@@ -23,10 +23,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
     api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+    api_version = '0.11'
 else:
-    api_version = '0.11'
+    api_version = '1.0'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
@@ -42,13 +44,18 @@
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
 elif api_version == "0.10":
     ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
-    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+    if api_version == "0.11":
+        from IPython.frontend.terminal.interactiveshell import \
+            TerminalInteractiveShell
+    elif api_version == "1.0":
+        from IPython.terminal.interactiveshell import TerminalInteractiveShell
+    else:
+        raise RuntimeError
     ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
                     display_banner = True)
     if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
-    raise RuntimeError
+
 
 # The rest is a modified version of the IPython default profile code
 
@@ -77,7 +84,7 @@
     ip = ip_shell.IP.getapi()
     try_next = IPython.ipapi.TryNext
     kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
     ip = ip_shell
     try_next = IPython.core.error.TryNext
     kwargs = dict()

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
 import subprocess
 import shutil
 import glob
-import distribute_setup
-distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+    import distribute_setup
+    distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
 # End snippet
 ######
 
-import setuptools
-
 VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -143,10 +143,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +159,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -212,7 +212,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
         self.layers.append(base_layer)
         self.cell_count += np.product(pf.domain_dimensions)
 
-        for grid in pf.h.grids:
+        sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+        for grid in sorted_grids:
             if grid.Level <= self.max_level:
                 self._add_grid_to_layers(grid)
 
@@ -232,11 +233,11 @@
             if p == 0:
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
-                LE = np.zeros(3)
+                parent_LE = np.zeros(3)
                 for potential_parent in self.layers:
                     if potential_parent.id == p:
-                        LE = potential_parent.LeftEdge
-                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+                        parent_LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
             ix  = int(ind[0]+0.5)
             iy  = int(ind[1]+0.5)
             iz  = int(ind[2]+0.5)

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
 import ConfigParser, os, os.path, types
 
 ytcfgDefaults = dict(
-    serialize = 'True',
+    serialize = 'False',
     onlydeserialize = 'False',
     timefunctions = 'False',
     logfile = 'False',
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold008',
+    gold_standard_filename = 'gold009',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3121,6 +3121,8 @@
     In-line extracted regions accept a base region and a set of field_cuts to
     determine which points in a grid should be included.
     """
+    _type_name = "cut_region"
+    _con_args = ("_base_region", "_field_cuts")
     def __init__(self, base_region, field_cuts, **kwargs):
         cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,
@@ -3701,7 +3703,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -57,14 +57,20 @@
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)
         if apath not in _cached_pfs:
             obj = object.__new__(cls)
-            _cached_pfs[apath] = obj
-        return _cached_pfs[apath]
+            if obj._skip_cache is False:
+                _cached_pfs[apath] = obj
+        return obj
 
     def __init__(self, filename, data_style=None, file_style=None):
         """
@@ -132,6 +138,10 @@
     def _mrep(self):
         return MinimalStaticOutput(self)
 
+    @property
+    def _skip_cache(self):
+        return False
+
     def hub_upload(self):
         self._mrep.upload()
 

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -801,6 +801,8 @@
         rdw = radius.copy()
     for i, ax in enumerate('xyz'):
         np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+        if data.pf.dimensionality < i+1:
+            break
         if data.pf.periodicity[i] == True:
             np.abs(r, r)
             np.subtract(r, DW[i], rdw)
@@ -1082,7 +1084,7 @@
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 
-add_field("BRadial", function=_BPoloidal,
+add_field("BRadial", function=_BRadial,
           units=r"\rm{Gauss}",
           validators=[ValidateParameter("normal")])
 
@@ -1415,7 +1417,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -309,6 +309,10 @@
         self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
         self.grid_dimensions = gdims.astype("int32")
         self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+        if self.parameter_file.dimensionality <= 2:
+            self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
+        if self.parameter_file.dimensionality == 1:
+            self.grid_right_edge[:,1:] = self.parameter_file.domain_right_edge[1:]
         self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
 
     def _populate_grid_objects(self):
@@ -335,7 +339,9 @@
     _data_style = "athena"
 
     def __init__(self, filename, data_style='athena',
-                 storage_filename=None, parameters={}):
+                 storage_filename=None, parameters=None):
+        if parameters is None:
+            parameters = {}
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
@@ -467,6 +473,10 @@
             pass
         return False
 
+    @property
+    def _skip_cache(self):
+        return True
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -128,15 +128,16 @@
     if "pressure" in data.pf.field_info:
         return data["pressure"]/(data.pf["Gamma"]-1.0)/data["density"]
     else:
-        return (data["total_energy"] - 
-                0.5*(data["cell_centered_B_x"]**2 +
-                     data["cell_centered_B_y"]**2 +
-                     data["cell_centered_B_z"]**2) - 
-                0.5*(data["momentum_x"]**2 +
-                     data["momentum_y"]**2 +
-                     data["momentum_z"]**2)/data["density"])/data["density"]
+        eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+                                           data["momentum_y"]**2 +
+                                           data["momentum_z"]**2)/data["density"]
+        if "cell_centered_B_x" in data.pf.field_info:
+            eint -= 0.5*(data["cell_centered_B_x"]**2 +
+                         data["cell_centered_B_y"]**2 +
+                         data["cell_centered_B_z"]**2)
+        return eint/data["density"]
 add_field("Gas_Energy", function=_gasenergy, take_log=False,
-          units=r"\rm{erg}/\rm{g}")
+          convert_function=_convertEnergy, units=r"\rm{erg}/\rm{g}")
 
 def _convertPressure(data) :
     return data.convert("Density")*data.convert("x-velocity")**2
@@ -144,15 +145,17 @@
     if "pressure" in data.pf.field_info:
         return data["pressure"]
     else:
-        return (data["total_energy"] -
-                0.5*(data["cell_centered_B_x"]**2 +
-                     data["cell_centered_B_y"]**2 +
-                     data["cell_centered_B_z"]**2) -
-                0.5*(data["momentum_x"]**2 +
-                     data["momentum_y"]**2 +
-                     data["momentum_z"]**2)/data["density"])*(data.pf["Gamma"]-1.0)
-add_field("Pressure", function=_pressure, take_log=False, convert_function=_convertPressure,
-          units=r"\rm{erg}/\rm{cm}^3", projected_units=r"\rm{erg}/\rm{cm}^2")
+        eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+                                           data["momentum_y"]**2 +
+                                           data["momentum_z"]**2)/data["density"]
+        if "cell_centered_B_x" in data.pf.field_info:
+            eint -= 0.5*(data["cell_centered_B_x"]**2 +
+                         data["cell_centered_B_y"]**2 +
+                         data["cell_centered_B_z"]**2)
+        return eint*(data.pf["Gamma"]-1.0)
+add_field("Pressure", function=_pressure, take_log=False,
+          convert_function=_convertPressure, units=r"\rm{erg}/\rm{cm}^3",
+          projected_units=r"\rm{erg}/\rm{cm}^2")
 
 def _temperature(field, data):
     if data.has_field_parameter("mu"):

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -608,7 +608,7 @@
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
         self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = self.parameters["TopGridDimensions"]
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
         self.refine_by = self.parameters.get("RefineBy", 2)
 
         if (self.parameters.has_key("ComovingCoordinates") and

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -77,6 +77,15 @@
         parses the Orion Star Particle text files
              
         """
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -87,15 +96,38 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
 
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+            
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
         fn = grid.pf.fullplotdir[:-4] + "sink"
         with open(fn, 'r') as f:

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -37,7 +37,8 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh
+    mh, \
+    kpc_per_cm
 from yt.funcs import *
 
 import yt.utilities.lib as amr_utils
@@ -77,6 +78,7 @@
 
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
+
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
@@ -315,6 +317,10 @@
 
 def _convertDensity(data):
     return data.convert("Density")
+
+def _convertCmToKpc(data):
+    return 1/(kpc_per_cm)**3
+
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
         ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
@@ -365,8 +371,35 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("star_density", function=_spdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Stellar\/Density")
+
+def _tpdensity(field, data): 
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
+    if data["particle_position_x"].size == 0: return blank
+    filter = data['particle_type'] == 3 # tracer particles
+    if not filter.any(): return blank
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           np.ones(filter.sum(), dtype="float64"),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    blank /= data['CellVolume']
+    return blank
+
+add_field("tracer_number_density", function=_tpdensity,
+          validators=[ValidateSpatial(0)], convert_function=_convertCmToKpc,
+          units = r"\rm{particles}/\rm{kpc}^3",
+          projected_units = r"\rm{particles}/\rm{kpc}^2",
+          display_name = "Tracer\/Particle\/Number\/Density",
+          projection_conversion='kpc')
 
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
@@ -387,8 +420,12 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("dm_density", function=_dmpdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Dark\/Matter\/Density")
 
 def _cic_particle_field(field, data):
     """

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -458,7 +458,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = self.parameters['redshift']
+            self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -366,5 +366,21 @@
 add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
 
 def _abar(field, data):
-    return 1.0 / data['sumy']
+    try:
+        return 1.0 / data['sumy']
+    except:
+        pass
+    return data['dens']*Na*kboltz*data['temp']/data['pres']
 add_field('abar', function=_abar, take_log=False)
+	
+
+def _NumberDensity(fields,data) :
+    try:
+        return data["nele"]+data["nion"]
+    except:
+        pass
+    return data['pres']/(data['temp']*kboltz)
+add_field("NumberDensity", function=_NumberDensity,
+        units=r'\rm{cm}^{-3}')
+
+

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -74,8 +74,9 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.data_software != "piernik":
+            if self.pf.dimensionality < 2: self.dds[1] = 1.0
+            if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):
@@ -214,6 +215,11 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
+        if 'data_software' in self._handle['gridded_data_format'].attrs:
+            self.data_software = \
+                self._handle['gridded_data_format'].attrs['data_software']
+        else:
+            self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -44,6 +44,17 @@
         parses the Orion Star Particle text files
         
         """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -54,17 +65,39 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
 
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
         with open(fn, 'r') as f:
             lines = f.readlines()
             particles = []

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -339,6 +339,10 @@
     def _is_valid(cls, *args, **kwargs):
         return False
 
+    @property
+    def _skip_cache(self):
+        return True
+
 class StreamDictFieldHandler(dict):
 
     @property

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -28,7 +28,7 @@
 import contextlib
 import warnings, struct, subprocess
 import numpy as np
-from distutils import version
+from distutils.version import LooseVersion
 from math import floor, ceil
 
 from yt.utilities.exceptions import *
@@ -260,10 +260,12 @@
     """
 
     import IPython
-    if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+    if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
         api_version = '0.10'
+    elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+        api_version = '0.11'
     else:
-        api_version = '0.11'
+        api_version = '1.0'
 
     stack = inspect.stack()
     frame = inspect.stack()[num_up]
@@ -281,7 +283,10 @@
         cfg.InteractiveShellEmbed.local_ns = loc
         cfg.InteractiveShellEmbed.global_ns = glo
         IPython.embed(config=cfg, banner2 = __header % dd)
-        from IPython.frontend.terminal.embed import InteractiveShellEmbed
+        if api_version == '0.11':
+            from IPython.frontend.terminal.embed import InteractiveShellEmbed
+        else:
+            from IPython.terminal.embed import InteractiveShellEmbed
         ipshell = InteractiveShellEmbed(config=cfg)
 
     del ipshell
@@ -369,6 +374,20 @@
     if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
+def is_root():
+    """
+    This function returns True if it is on the root processor of the
+    topcomm and False otherwise.
+    """
+    from yt.config import ytcfg
+    cfg_option = "__topcomm_parallel_rank"
+    if not ytcfg.getboolean("yt","__parallel"):
+        return True
+    if ytcfg.getint("yt", cfg_option) > 0: 
+        return False
+    return True
+
+
 #
 # Our signal and traceback handling functions
 #

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -98,7 +98,17 @@
         if param == "loglevel": # special case
             mylog.setLevel(int(val))
 
-parser = argparse.ArgumentParser(description = 'yt command line arguments')
+class YTParser(argparse.ArgumentParser):
+    def error(self, message):
+        """error(message: string)
+
+        Prints a help message that is more detailed than the argparse default
+        and then exits.
+        """
+        self.print_help(sys.stderr)
+        self.exit(2, '%s: error: %s\n' % (self.prog, message))
+
+parser = YTParser(description = 'yt command line arguments')
 parser.add_argument("--config", action=SetConfigOption,
     help = "Set configuration option, in the form param=value")
 parser.add_argument("--paste", action=SetExceptionHandling,

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,13 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+from amr_kdtools import \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +70,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +93,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +102,36 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl],
+                            dtype="int64")
 
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -134,19 +142,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):
@@ -204,14 +213,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -232,13 +235,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -248,33 +251,32 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
-                                  image, add_to_front)
-                if node.parent.id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
-                break
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +294,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -405,7 +407,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -426,8 +428,8 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
-                if node.grid is not None:
+                i = node.node_id
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
@@ -476,32 +478,28 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+        for node in depth_first_touch(self.tree.trunk):
+            nids.append(node.node_id) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -518,19 +516,23 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 6f00933abb0249c3557ed7eeb7719c42618b2e13 -r d2de8bb9615a5a345e09be63294f85f4062754c1 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1401,7 +1401,7 @@
         tf = ColorTransferFunction((mi-2, ma+2))
         tf.add_layers(n_contours,w=contour_width,col_bounds = (mi,ma), colormap=cmap)
 
-        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
+        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf, fields=[field])
         image = cam.snapshot()
 
         if args.enhance:
@@ -1456,7 +1456,12 @@
         """
     def __call__(self, args):
         kwargs = {}
-        from IPython.frontend.html.notebook.notebookapp import NotebookApp
+        try:
+            # IPython 1.0+
+            from IPython.html.notebookapp import NotebookApp
+        except ImportError:
+            # pre-IPython v1.0
+            from IPython.frontend.html.notebook.notebookapp import NotebookApp
         pw = ytcfg.get("yt", "notebook_password")
         if len(pw) == 0 and not args.no_password:
             import IPython.lib

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/c2c55742d4d8/
Changeset:   c2c55742d4d8
Branch:      yt
User:        jzuhone
Date:        2013-09-20 16:34:07
Summary:     First pass at making projections with SZpack
Affected #:  4 files

diff -r d2de8bb9615a5a345e09be63294f85f4062754c1 -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf yt/analysis_modules/SZmaps.py
--- a/yt/analysis_modules/SZmaps.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
-from yt.data_objects.image_array import ImageArray
-from yt.data_objects.field_info_container import add_field
-import numpy as np
-
-Tcmb = 2.726
-mueinv = 0.875
-
-try:
-    import SZpack
-except:
-    raise ImportError
-
-def _t_squared(field, data):
-    return data["TempkeV"]*data["TempkeV"]
-add_field("TSquared", function=_t_squared)
-
-def _beta_perp_squared(field, data):
-    axis = data.get_field_parameter("axis")
-    if axis == "x":
-	vv = np.sqrt(data["y-velocity"]**2+data["z-velocity"]**2)
-    elif axis == "y":
-	vv = np.sqrt(data["x-velocity"]**2+data["z-velocity"]**2)
-    elif axis == "z":
-	vv = np.sqrt(data["x-velocity"]**2+data["y-velocity"]**2)
-    return vv/clight/clight
-add_field("BetaPerpSquared", function=_beta_perp_squared)
-
-def _beta_par(field, data):
-    axis = data.get_field_parameter("axis")
-    return data["%s-velocity" % (axis)]/clight
-add_field("BetaPar", function=_beta_par)
-
-def _beta_par_squared(field, data):
-    return data["BetaPar"]**2
-add_field("BetaParSquared", function=_beta_par_squared)
-
-def _t_beta_par(field, data):
-    return data["TempkeV"]*data["BetaPar"]
-add_field("TBetaPar", function=_t_beta_par)
-
-vlist = 'xyz'
-
-def SZProjection(pf, axis, freqs, center="c", width=(1, "unitary"), nx=800, ny=800):
-
-    num_freqs = len(freqs)
-    freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
-    xo = hcgs*freqs*1.0e9/(kboltz*Tcmb)
-
-    if isinstance(axis, np.ndarray) :
-        frb1["TempkeV"] = off_axis_projection(pf, center, axis, width, nx, field="TempkeV", weight="Density")
-        frb2["Density"] = off_axis_projection(pf, center, axis, width, nx, field="Density")
-    else :
-        if axis in vlist:
-            vfield = "velocity_%s" % (axis)
-            proj1.set_field_parameter("axis", axis)
-        elif axis in xrange(0,3) :
-            vfield = "velocity_%s" % (vlist[axis])
-            proj1.set_field_parameter("axis", vlist[axis])
-        proj1 = pf.h.proj(axis, "TempkeV", weight_field="Density")
-        proj2 = pf.h.proj(axis, "Density")
-        frb1 = proj1.to_frb(width, nx)
-        frb2 = proj2.to_frb(width, ny)
-                    
-    TeSZ = frb1["TempkeV"]
-    omega1 = frb1["Tsquared"]/(TeSZ*TeSZ) - 1.
-    sigma1 = frb1["TBetaPar"]/TeSZ - betac_par
-    kappa1 = frb1["BetaParSquared"] - betac_par
-    
-    frb1["tau"] = sigma_thompson*frb2["Density"]*mueinv/mp
-    frb1["omega1"] = ImageArray(omega1)
-    frb1["kappa1"] = ImageArray(kappa1)
-    frb1["sigma1"] = ImageArray(sigma1)
-
-    SZsignal = np.zeros((num_freqs,nx,ny))
-    omega = np.zeros((3))
-    sigma = np.zeros((3))
-    
-    for i in xrange(nx):
-
-	for j in xrange(ny):
-		
-	    tau = frb1["tau"][i,j]
-	    Te = frb1["TempkeV"][i,j]
-	    bpar = frb1["BetaPar"][i,j]
-	    bperp2 = frb["BetaPerpSquared"][i,j]
-	    omega[0] = frb1["omega1"][i,j]
-	    sigma[0] = frb1["sigma1"][i,j]
-	    kappa = frb1["kappa1"][i,j]
-	
-	    SZsignal[:,i,j] = SZpack.compute_combo_means_ex(xo, tau, Te, bpar, omega,
-							    sigma, kappa, bperp2)
-
-    for i in xrange(num_freqs) :
-	frb1[freq_fields[i]] = ImageArray(SZsignal[i,:,:])
-	
-    return frb1

diff -r d2de8bb9615a5a345e09be63294f85f4062754c1 -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -118,7 +118,7 @@
     TwoPointFunctions, \
     FcnSet
 
-from .SZmaps import SZprojection
+from .sunyaev_zeldovich.api import SZprojection
 
 from .radmc3d_export.api import \
     RadMC3DWriter

diff -r d2de8bb9615a5a345e09be63294f85f4062754c1 -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,1 @@
+from .projection import SZprojection

diff -r d2de8bb9615a5a345e09be63294f85f4062754c1 -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,114 @@
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.definitions import inv_axis_names
+from yt.visualization.volume_rendering.camera import off_axis_projection
+import numpy as np
+
+Tcmb = 2.726
+
+try:
+    import SZpack
+except:
+    raise ImportError("SZpack not installed.")
+
+vlist = "xyz"
+nvec = int(0)
+
+def _t_squared(field, data):
+    return data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    return data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par(field, data):
+    axis = data.get_field_parameter("axis")
+    if iterable(nvec):
+        vpar = (data["x-velocity"]*nvec[0]+
+                data["y-velocity"]*nvec[1]+
+                data["z-velocity"]*nvec[2])
+    else:
+        vpar = data["%s-velocity" % (vlist[nvec])]
+    return vpar/clight
+add_field("BetaPar", function=_beta_par)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def SZProjection(pf, axis, freqs, center="c", width=(1, "unitary"), nx=800, mue=None):
+
+    global nvec # Ugly!
+    
+    if mue is None:
+        mueinv = 0.875
+    else:
+        mueinv = 1./mue
+        
+    num_freqs = len(freqs)
+    freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+    xinit = hcgs*freqs*1.0e9/(kboltz*Tcmb)
+
+    if isinstance(axis, basestring) or not iterable(axis):
+        axis = fix_axis(axis)
+        nvec = axis
+        proj1 = pf.h.proj(axis, "TempkeV", weight_field="Density")
+        proj2 = pf.h.proj(axis, "Density")
+        frb1 = proj1.to_frb(width, nx)
+        frb2 = proj2.to_frb(width, nx)
+        Te = frb1["TempkeV"]
+        bpar = frb1["BetaPar"]
+        bperp2 = frb1["BetaPerpSquared"]
+        omega1 = frb1["TSquared"]/(Te*Te) - 1.
+        sigma1 = frb1["TBetaPar"]/Te - bpar
+        kappa1 = frb1["BetaParSquared"] - bpar
+        tau = sigma_thompson*frb2["Density"]*mueinv/mp
+    else:
+        nvec = axis
+        if iterable(width):
+            w = width[0]/pf.units[width[1]]
+        else:
+            w = width
+        Te      = off_axis_projection(pf, center, axis, w, nx, "TempkeV", weight="Density")
+        bpar    = off_axis_projection(pf, center, axis, w, nx, "BetaPar", weight="Density")
+        bperp2  = off_axis_projection(pf, center, axis, w, nx, "BetaPerpSquared", weight="Density")
+        omega1  = off_axis_projection(pf, center, axis, w, nx, "TSquared", weight="Density")
+        omega1  = omega1/(Te*Te) - 1.
+        sigma1  = off_axis_projection(pf, center, axis, w, nx, "TBetaPar", weight="Density")
+        sigma1  = sigma1/Te - bpar
+        kappa1  = off_axis_projection(pf, center, axis, w, nx, "BetaParSquared", weight="Density")
+        kappa1 -= bpar
+        tau     = off_axis_projection(pf, center, axis, w, nx, "Density")
+        tau    *= sigma_thompson*mueinv/mp
+        
+    SZsignal = np.zeros((num_freqs,nx,nx))
+    xo = np.zeros((num_freqs))
+    
+    k = int(0)
+
+    pbar = get_pbar("Computing SZ signal.", nx*nx)
+    
+    for i in xrange(nx):
+	for j in xrange(nx):
+            xo[:] = xinit[:]
+	    SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                       bpar[i,j], omega[i,j],
+                                       sigma[i,j], kappa[i,j], bperp2[i,j])
+            SZsignal[:,i,j] = -xo[:]
+            pbar.update(k)
+            k += 1
+
+    pbar.finish()
+    
+    outimg = {}
+    for i in xrange(num_freqs) :
+	outimg[freq_fields[i]] = ImageArray(SZsignal[i,:,:])
+	
+    return outimg


https://bitbucket.org/yt_analysis/yt-3.0/commits/fdcac0502890/
Changeset:   fdcac0502890
Branch:      yt
User:        jzuhone
Date:        2013-09-21 19:18:24
Summary:     Merged yt_analysis/yt into yt
Affected #:  448 files

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5156,3 +5156,4 @@
 0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
+079e456c38a87676472a458210077e2be325dc85 last_gplv3

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb CITATION
--- /dev/null
+++ b/CITATION
@@ -0,0 +1,31 @@
+To cite yt in publications, please use:
+
+Turk, M. J., Smith, B. D., Oishi, J. S., et al. 2011, ApJS, 192, 9
+
+In the body of the text, please add a footnote to the yt webpage:
+
+http://yt-project.org/
+
+For LaTex and BibTex users:
+
+\bibitem[Turk et al.(2011)]{2011ApJS..192....9T} Turk, M.~J., Smith, B.~D.,
+Oishi, J.~S., et al.\ 2011, \apjs, 192, 9
+
+ at ARTICLE{2011ApJS..192....9T,
+   author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
+{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
+    title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
+  journal = {\apjs},
+archivePrefix = "arXiv",
+   eprint = {1011.3514},
+ primaryClass = "astro-ph.IM",
+ keywords = {cosmology: theory, methods: data analysis, methods: numerical},
+     year = 2011,
+    month = jan,
+   volume = 192,
+      eid = {9},
+    pages = {9},
+      doi = {10.1088/0067-0049/192/1/9},
+   adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb COPYING.txt
--- /dev/null
+++ b/COPYING.txt
@@ -0,0 +1,81 @@
+===============================
+ The yt project licensing terms
+===============================
+
+yt is licensed under the terms of the Modified BSD License (also known as New
+or Revised BSD), as follows:
+
+Copyright (c) 2013-, yt Development Team
+Copyright (c) 2006-2013, Matthew Turk <matthewturk at gmail.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the yt Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the yt Development Team
+-----------------------------
+
+Matthew Turk began yt in 2006 and remains the project lead.  Over time yt has
+grown to include contributions from a large number of individuals from many
+diverse institutions, scientific, and technical backgrounds.
+
+Until the fall of 2013, yt was licensed under the GPLv3.  However, with consent
+from all developers and on a public mailing list, yt has been relicensed under
+the BSD 3-clause under a shared copyright model.  For more information, see:
+http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2013-July/003239.html
+All versions of yt prior to this licensing change are available under the
+GPLv3; all subsequent versions are available under the BSD 3-clause license.
+
+The yt Development Team is the set of all contributors to the yt project.  This
+includes all of the yt subprojects.
+
+The core team that coordinates development on BitBucket can be found here:
+http://bitbucket.org/yt_analysis/ 
+
+
+Our Copyright Policy
+--------------------
+
+yt uses a shared copyright model. Each contributor maintains copyright
+over their contributions to yt. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the yt
+source code, in its entirety is not the copyright of any single person or
+institution.  Instead, it is the collective copyright of the entire yt
+Development Team.  If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the yt repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,51 +1,55 @@
-YT is a group effort.
+yt is a group effort.
 
-Contributors:                   Tom Abel (tabel at stanford.edu)
-				David Collins (dcollins at physics.ucsd.edu)
-				Brian Crosby (crosby.bd at gmail.com)
-				Andrew Cunningham (ajcunn at gmail.com)
-				Nathan Goldbaum (goldbaum at ucolick.org)
-				Markus Haider (markus.haider at uibk.ac.at)
-				Cameron Hummels (chummels at gmail.com)
-				Christian Karch (chiffre at posteo.de)
-				Ji-hoon Kim (me at jihoonkim.org)
-				Steffen Klemer (sklemer at phys.uni-goettingen.de)
-				Kacper Kowalik (xarthisius.kk at gmail.com)
-				Michael Kuhlen (mqk at astro.berkeley.edu)
-				Eve Lee (elee at cita.utoronto.ca)
-				Yuan Li (yuan at astro.columbia.edu)
-				Chris Malone (chris.m.malone at gmail.com)
-				Josh Maloney (joshua.moloney at colorado.edu)
-				Chris Moody (cemoody at ucsc.edu)
-				Andrew Myers (atmyers at astro.berkeley.edu)
-				Jeff Oishi (jsoishi at gmail.com)
-				Jean-Claude Passy (jcpassy at uvic.ca)
-				Mark Richardson (Mark.L.Richardson at asu.edu)
-				Thomas Robitaille (thomas.robitaille at gmail.com)
-				Anna Rosen (rosen at ucolick.org)
-				Anthony Scopatz (scopatz at gmail.com)
-				Devin Silvia (devin.silvia at colorado.edu)
-				Sam Skillman (samskillman at gmail.com)
-				Stephen Skory (s at skory.us)
-				Britton Smith (brittonsmith at gmail.com)
-				Geoffrey So (gsiisg at gmail.com)
-				Casey Stark (caseywstark at gmail.com)
-				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-				Stephanie Tonnesen (stonnes at gmail.com)
-				Matthew Turk (matthewturk at gmail.com)
-				Rich Wagner (rwagner at physics.ucsd.edu)
-				John Wise (jwise at physics.gatech.edu)
-				John ZuHone (jzuhone at gmail.com)
+Contributors:   
+                Tom Abel (tabel at stanford.edu)
+                David Collins (dcollins at physics.ucsd.edu)
+                Brian Crosby (crosby.bd at gmail.com)
+                Andrew Cunningham (ajcunn at gmail.com)
+                Hilary Egan (hilaryye at gmail.com)
+                John Forces (jforbes at ucolick.org)
+                Nathan Goldbaum (goldbaum at ucolick.org)
+                Markus Haider (markus.haider at uibk.ac.at)
+                Cameron Hummels (chummels at gmail.com)
+                Christian Karch (chiffre at posteo.de)
+                Ji-hoon Kim (me at jihoonkim.org)
+                Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Kacper Kowalik (xarthisius.kk at gmail.com)
+                Michael Kuhlen (mqk at astro.berkeley.edu)
+                Eve Lee (elee at cita.utoronto.ca)
+                Sam Leitner (sam.leitner at gmail.com)
+                Yuan Li (yuan at astro.columbia.edu)
+                Chris Malone (chris.m.malone at gmail.com)
+                Josh Maloney (joshua.moloney at colorado.edu)
+                Chris Moody (cemoody at ucsc.edu)
+                Andrew Myers (atmyers at astro.berkeley.edu)
+                Jill Naiman (jnaiman at ucolick.org)
+                Kaylea Nelson (kaylea.nelson at yale.edu)
+                Jeff Oishi (jsoishi at gmail.com)
+                Jean-Claude Passy (jcpassy at uvic.ca)
+                Mark Richardson (Mark.L.Richardson at asu.edu)
+                Thomas Robitaille (thomas.robitaille at gmail.com)
+                Anna Rosen (rosen at ucolick.org)
+                Douglas Rudd (drudd at uchicago.edu)
+                Anthony Scopatz (scopatz at gmail.com)
+                Noel Scudder (noel.scudder at stonybrook.edu)
+                Devin Silvia (devin.silvia at colorado.edu)
+                Sam Skillman (samskillman at gmail.com)
+                Stephen Skory (s at skory.us)
+                Britton Smith (brittonsmith at gmail.com)
+                Geoffrey So (gsiisg at gmail.com)
+                Casey Stark (caseywstark at gmail.com)
+                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Stephanie Tonnesen (stonnes at gmail.com)
+                Matthew Turk (matthewturk at gmail.com)
+                Rich Wagner (rwagner at physics.ucsd.edu)
+                Andrew Wetzel (andrew.wetzel at yale.edu)
+                John Wise (jwise at physics.gatech.edu)
+                John ZuHone (jzuhone at gmail.com)
 
-We also include the Delaunay Triangulation module written by Robert Kern of
-Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
+Several items included in the yt/extern directory were written by other
+individuals and may bear their own license, including the progressbar module by
 Nilton Volpato.  The PasteBin interface code (as well as the PasteBin itself)
-was written by the Pocoo collective (pocoo.org).  The RamsesRead++ library was
-developed by Oliver Hahn.  yt also includes a slightly-modified version of
-libconfig (http://www.hyperrealm.com/libconfig/) and an unmodified version of
-several routines from HEALpix (http://healpix.jpl.nasa.gov/).
-
-Large parts of development of yt were guided by discussions with Tom Abel, Ralf
-Kaehler, Mike Norman and Greg Bryan.
+was written by the Pocoo collective (pocoo.org).  
+developed by Oliver Hahn.  
 
 Thanks to everyone for all your contributions!

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb FUNDING
--- a/FUNDING
+++ /dev/null
@@ -1,35 +0,0 @@
-The development of yt has benefited from funding from many different sources
-and institutions.  Here is an incomplete list of these sources:
-
-  * NSF grant OCI-1048505
-  * NSF grant AST-0239709 
-  * NSF grant AST-0707474
-  * NSF grant AST-0708960
-  * NSF grant AST-0808184
-  * NSF grant AST-0807215 
-  * NSF grant AST-0807312
-  * NSF grant AST-0807075
-  * NSF grant AST-0908199
-  * NSF grant AST-0908553 
-  * NASA grant ATFP NNX08-AH26G
-  * NASA grant ATFP NNX09-AD80G
-  * NASA grant ATFP NNZ07-AG77G
-  * DOE Computational Science Graduate Fellowship under grant number DE-FG02-97ER25308
-
-Additionally, development of yt has benefited from the hospitality and hosting
-of the following institutions:
-
-  * Columbia University
-  * Harvard-Smithsonian Center for Astrophysics
-  * Institute for Advanced Study
-  * Kavli Institute for Particle Astrophysics and Cosmology
-  * Kavli Institute for Theoretical Physics
-  * Los Alamos National Lab
-  * Michigan State University
-  * Princeton University
-  * Stanford University
-  * University of California High-Performance Astro-Computing Center
-  * University of California at Berkeley
-  * University of California at San Diego
-  * University of California at Santa Cruz
-  * University of Colorado at Boulder

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb LICENSE.txt
--- a/LICENSE.txt
+++ /dev/null
@@ -1,674 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year><name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year><name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ b/doc/how_to_develop_yt.txt
@@ -25,7 +25,7 @@
 Licenses
 --------
 
-All code in yt should be under the GPL-3 (preferred) or a compatible license.
+All code in yt should be under the BSD 3-clause license.
 
 How To Get The Source Code
 --------------------------

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -473,11 +473,18 @@
 function do_setup_py
 {
     [ -e $1/done ] && return
-    echo "Installing $1 (arguments: '$*')"
-    [ ! -e $1/extracted ] && tar xfz $1.tar.gz
-    touch $1/extracted
-    cd $1
-    if [ ! -z `echo $1 | grep h5py` ]
+    LIB=$1
+    shift
+    if [ -z "$@" ]
+    then
+        echo "Installing $LIB"
+    else
+        echo "Installing $LIB (arguments: '$@')"
+    fi
+    [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+    touch $LIB/extracted
+    cd $LIB
+    if [ ! -z `echo $LIB | grep h5py` ]
     then
         shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
 
 function get_ytproject
 {
+    [ -e $1 ] && return
     echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
@@ -551,67 +558,93 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e $BZLIB/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
+        cd $BZLIB
         if [ `uname` = "Darwin" ]
         then
             if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e $ZLIB/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd $ZLIB
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e $PNG/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e $PNG ] && tar xfz $PNG.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd $PNG
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e $FREETYPE_VER/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -688,11 +722,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e $HDF5/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd $HDF5
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e $SQLITE/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd $SQLITE
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+    cd $PYTHON
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    do_setup_py $MERCURIAL
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py $NUMPY ${NUMPY_ARGS}
 else
-    if [ ! -e scipy-0.11.0/done ]
+    if [ ! -e $SCIPY/done ]
     then
 	if [ ! -e BLAS/done ]
 	then
@@ -798,27 +832,27 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o 1>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
 	fi
-	if [ ! -e lapack-3.4.2/done ]
+	if [ ! -e $LAPACK/done ]
 	then
-	    tar xfz lapack-3.4.2.tar.gz
+	    tar xfz $LAPACK.tar.gz
 	    echo "Building LAPACK"
-	    cd lapack-3.4.2/
+	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
-    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/$LAPACK/liblapack.a
+    do_setup_py $NUMPY ${NUMPY_ARGS}
+    do_setup_py $SCIPY ${NUMPY_ARGS}
 fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,15 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
+do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +894,36 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e $ZEROMQ/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd $ZEROMQ
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py $PYZMQ --zmq=${DEST_DIR}
+    do_setup_py $TORNADO
 fi
 
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -909,10 +948,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb setup.py
--- a/setup.py
+++ b/setup.py
@@ -248,7 +248,7 @@
         classifiers=["Development Status :: 5 - Production/Stable",
                      "Environment :: Console",
                      "Intended Audience :: Science/Research",
-                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "License :: OSI Approved :: BSD License",
                      "Operating System :: MacOS :: MacOS X",
                      "Operating System :: POSIX :: AIX",
                      "Operating System :: POSIX :: Linux",
@@ -269,7 +269,7 @@
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",
-        license="GPL-3",
+        license="BSD",
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -60,27 +60,17 @@
 All broadly useful code that doesn't clearly fit in one of the other
 categories goes here.
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 __version__ = "2.5-dev"
 
@@ -96,7 +86,7 @@
     if answer_big_data:
         nose_argv.append('--answer-big-data')
     log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg["yt","suppressStreamLogging"] = 'True'
+    ytcfg.set("yt","suppressStreamLogging", 'True')
     initial_dir = os.getcwd()
     yt_file = os.path.abspath(__file__)
     yt_dir = os.path.dirname(yt_file)
@@ -105,4 +95,4 @@
         nose.run(argv=nose_argv)
     finally:
         os.chdir(initial_dir)
-        ytcfg["yt","suppressStreamLogging"] = log_suppress
+        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb yt/analysis_modules/absorption_spectrum/__init__.py
--- a/yt/analysis_modules/absorption_spectrum/__init__.py
+++ b/yt/analysis_modules/absorption_spectrum/__init__.py
@@ -1,24 +1,14 @@
 """
 Import stuff for light cone generator.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -1,27 +1,17 @@
 """
 Absorption line generating functions.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 

diff -r c2c55742d4d83bd75de56d74dfe8b22665f1dcdf -r fdcac050289069bc6b6ef61949d765a88b3972bb yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -1,27 +1,17 @@
 """
 AbsorptionSpectrum class and member functions.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import h5py
 import numpy as np

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/72eb0870c6a4/
Changeset:   72eb0870c6a4
Branch:      yt
User:        jzuhone
Date:        2013-09-22 15:01:39
Summary:     Improvements to FITS writing. This small refactor puts all of the FITS writing into one versatile standalone routine (write_fits), which may be called by itself or called from within a method, as in frb.export_fits.

Also made some small tweaks to write_projection so that axes ticks and labels may be optionally included.
Affected #:  2 files

diff -r fdcac050289069bc6b6ef61949d765a88b3972bb -r 72eb0870c6a44aa934193148972198bcc1aad437 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,6 +19,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
@@ -263,8 +264,8 @@
             output.create_dataset(field,data=self[field])
         output.close()
 
-    def export_fits(self, filename_prefix, fields = None, clobber=False,
-                    other_keys=None, gzip_file=False, units="1"):
+    def export_fits(self, filename_prefix, fields=None, clobber=False,
+                    other_keys=None, units="cm", sky_center=(0.0,0.0), D_A=None):
 
         """
         This will export a set of FITS images of either the fields specified
@@ -273,106 +274,80 @@
         existing FITS file.
 
         This requires the *pyfits* module, which is a standalone module
-        provided by STSci to interface with FITS-format files.
+        provided by STSci to interface with FITS-format files, and is also
+        part of AstroPy.
         """
         r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filename is the
-        the specified prefix.
+        or all the fields already in the object.
 
         Parameters
         ----------
         filename_prefix : string
-            This prefix will be prepended to every FITS file name.
+            This prefix will be prepended to the FITS file name.
         fields : list of strings
             These fields will be pixelized and output.
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
         other_keys : dictionary, optional
             A set of header keys and values to write into the FITS header.
-        gzip_file : boolean, optional
-            gzip the file after writing, default False
         units : string, optional
-            the length units that the coordinates are written in, default '1'
+            the length units that the coordinates are written in, default 'cm'
+            If units are set to "deg" then assume that sky coordinates are
+            requested.
+        sky_center : array_like, optional
+            Center of the image in (ra,dec) in degrees if sky coordinates
+            (units="deg") are requested.
+        D_A : float or tuple, optional
+            Angular diameter distance, given in code units as a float or
+            a tuple containing the value and the length unit. Required if
+            using sky coordinates.                                                                                            
         """
-        
-        import pyfits
-        from os import system
-        
+
+        if units == "deg" and D_A is None:
+            mylog.error("Sky coordinates require an angular diameter distance. Please specify D_A.")    
+        if iterable(D_A):
+            dist = D_A[0]/self.pf.units[D_A[1]]
+        else:
+            dist = D_A
+
+        if other_keys is None:
+            hdu_keys = {}
+        else:
+            hdu_keys = other_keys
+            
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
-        if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
 
+        coords = {}
         nx, ny = self.buff_size
-        dx = (self.bounds[1]-self.bounds[0])/nx*self.pf[units]
-        dy = (self.bounds[3]-self.bounds[2])/ny*self.pf[units]
-        xmin = self.bounds[0]*self.pf[units]
-        ymin = self.bounds[2]*self.pf[units]
-        simtime = self.pf.current_time
+        dx = (self.bounds[1]-self.bounds[0])/nx
+        dy = (self.bounds[3]-self.bounds[2])/ny
+        if units == "deg":  
+            coords["dx"] = -np.rad2deg(dx/dist)
+            coords["dy"] = np.rad2deg(dy/dist)
+            coords["xctr"] = sky_center[0]
+            coords["yctr"] = sky_center[1]
+            hdu_keys["MTYPE1"] = "EQPOS"
+            hdu_keys["MFORM1"] = "RA,DEC"
+            hdu_keys["CTYPE1"] = "RA---TAN"
+            hdu_keys["CTYPE2"] = "DEC--TAN"
+        else:
+            coords["dx"] = dx*self.pf.units[units]
+            coords["dy"] = dy*self.pf.units[units]
+            coords["xctr"] = 0.5*(self.bounds[0]+self.bounds[1])*self.pf.units[units]
+            coords["yctr"] = 0.5*(self.bounds[2]+self.bounds[3])*self.pf.units[units]
+        coords["units"] = units
+        
+        hdu_keys["Time"] = self.pf.current_time
 
-        hdus = []
-
-        first = True
-        
-        for field in fields:
-
-            if (first) :
-                hdu = pyfits.PrimaryHDU(self[field])
-                first = False
-            else :
-                hdu = pyfits.ImageHDU(self[field])
+        data = dict([(field,self[field]) for field in fields])
+        write_fits(data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=hdu_keys)
                 
-            if self.data_source.has_key('weight_field'):
-                weightname = self.data_source._weight
-                if weightname is None: weightname = 'None'
-                field = field +'_'+weightname
-
-            hdu.header.update("Field", field)
-            hdu.header.update("Time", simtime)
-
-            hdu.header.update('WCSNAMEP', "PHYSICAL")            
-            hdu.header.update('CTYPE1P', "LINEAR")
-            hdu.header.update('CTYPE2P', "LINEAR")
-            hdu.header.update('CRPIX1P', 0.5)
-            hdu.header.update('CRPIX2P', 0.5)
-            hdu.header.update('CRVAL1P', xmin)
-            hdu.header.update('CRVAL2P', ymin)
-            hdu.header.update('CDELT1P', dx)
-            hdu.header.update('CDELT2P', dy)
-                    
-            hdu.header.update('CTYPE1', "LINEAR")
-            hdu.header.update('CTYPE2', "LINEAR")                                
-            hdu.header.update('CUNIT1', units)
-            hdu.header.update('CUNIT2', units)
-            hdu.header.update('CRPIX1', 0.5)
-            hdu.header.update('CRPIX2', 0.5)
-            hdu.header.update('CRVAL1', xmin)
-            hdu.header.update('CRVAL2', ymin)
-            hdu.header.update('CDELT1', dx)
-            hdu.header.update('CDELT2', dy)
-
-            if (other_keys is not None) :
-
-                for k,v in other_keys.items() :
-
-                    hdu.header.update(k,v)
-
-            hdus.append(hdu)
-
-            del hdu
-            
-        hdulist = pyfits.HDUList(hdus)
-
-        hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)
-        
-        if (gzip_file) :
-            clob = ""
-            if (clobber) : clob = "-f"
-            system("gzip "+clob+" %s.fits" % (filename_prefix))
-        
     def open_in_ds9(self, field, take_log=True):
         """
         This will open a given field in the DS9 viewer.

diff -r fdcac050289069bc6b6ef61949d765a88b3972bb -r 72eb0870c6a44aa934193148972198bcc1aad437 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -333,7 +333,8 @@
 
 def write_projection(data, filename, colorbar=True, colorbar_label=None, 
                      title=None, limits=None, take_log=True, figsize=(8,6),
-                     dpi=100, cmap_name='algae'):
+                     dpi=100, cmap_name='algae', extent=None, xlabel=None,
+                     ylabel=None):
     r"""Write a projection or volume rendering to disk with a variety of 
     pretty parameters such as limits, title, colorbar, etc.  write_projection
     uses the standard matplotlib interface to create the figure.  N.B. This code
@@ -392,16 +393,22 @@
     # Create the figure and paint the data on
     fig = matplotlib.figure.Figure(figsize=figsize)
     ax = fig.add_subplot(111)
-    fig.tight_layout()
-
-    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm, cmap=cmap_name)
+    
+    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm,
+                    extent=extent, cmap=cmap_name)
     
     if title:
         ax.set_title(title)
 
+    if xlabel:
+        ax.set_xlabel(xlabel)
+    if ylabel:
+        ax.set_ylabel(ylabel)
+
     # Suppress the x and y pixel counts
-    ax.set_xticks(())
-    ax.set_yticks(())
+    if extent is None:
+        ax.set_xticks(())
+        ax.set_yticks(())
 
     # Add a color bar and label if requested
     if colorbar:
@@ -409,6 +416,8 @@
         if colorbar_label:
             cbar.ax.set_ylabel(colorbar_label)
 
+    fig.tight_layout()
+        
     suffix = get_image_suffix(filename)
 
     if suffix == '':
@@ -429,70 +438,89 @@
     return filename
 
 
-def write_fits(image, filename_prefix, clobber=True, coords=None, gzip_file=False) :
+def write_fits(image, filename_prefix, clobber=True, coords=None,
+               other_keys=None):
     """
     This will export a FITS image of a floating point array. The output filename is
     *filename_prefix*. If clobber is set to True, this will overwrite any existing
     FITS file.
     
     This requires the *pyfits* module, which is a standalone module
-    provided by STSci to interface with FITS-format files.
+    provided by STSci to interface with FITS-format files, and is also part of
+    AstroPy.
     """
-    r"""Write out a floating point array directly to a FITS file, optionally
-    adding coordinates. 
+    r"""Write out floating point arrays directly to a FITS file, optionally
+    adding coordinates and header keywords.
         
     Parameters
     ----------
-    image : array_like
-        This is an (unscaled) array of floating point values, shape (N,N,) to save
-        in a FITS file.
+    image : array_like, or dict of array_like objects
+        This is either an (unscaled) array of floating point values, or a dict of
+        such arrays, shape (N,N,) to save in a FITS file. 
     filename_prefix : string
         This prefix will be prepended to every FITS file name.
     clobber : boolean
         If the file exists, this governs whether we will overwrite.
     coords : dictionary, optional
         A set of header keys and values to write to the FITS header to set up
-        a coordinate system. 
-    gzip_file : boolean, optional
-        gzip the file after writing, default False
+        a coordinate system, which is assumed to be linear unless specified otherwise
+        in *other_keys*
+        "units": the length units
+        "xctr","yctr": the center of the image
+        "dx","dy": the pixel width in each direction                                                
+    other_keys : dictionary, optional
+        A set of header keys and values to write into the FITS header.    
     """
+
+    try:
+        import pyfits
+    except ImportError:
+        try:
+            import astropy.io.fits as pyfits
+        except:
+            raise ImportError("You don't have pyFITS or AstroPy installed.")
     
-    import pyfits
     from os import system
     
-    if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
-    
-    hdu = pyfits.PrimaryHDU(image)
+    try:
+        image.keys()
+        image_dict = image
+    except:
+        image_dict = dict(yt_data=image)
 
-    if (coords is not None) :
+    hdulist = [pyfits.PrimaryHDU()]
 
-        hdu.header.update('WCSNAMEP', "PHYSICAL")
-        hdu.header.update('CTYPE1P', "LINEAR")
-        hdu.header.update('CTYPE2P', "LINEAR")
-        hdu.header.update('CRPIX1P', 0.5)
-        hdu.header.update('CRPIX2P', 0.5)
-        hdu.header.update('CRVAL1P', coords["xmin"])
-        hdu.header.update('CRVAL2P', coords["ymin"])
-        hdu.header.update('CDELT1P', coords["dx"])
-        hdu.header.update('CDELT2P', coords["dy"])
+    for key in image_dict.keys():
+
+        mylog.info("Writing image block \"%s\"" % (key))
+        hdu = pyfits.ImageHDU(image_dict[key])
+        hdu.update_ext_name(key)
         
-        hdu.header.update('CTYPE1', "LINEAR")
-        hdu.header.update('CTYPE2', "LINEAR")
-        hdu.header.update('CUNIT1', coords["units"])
-        hdu.header.update('CUNIT2', coords["units"])
-        hdu.header.update('CRPIX1', 0.5)
-        hdu.header.update('CRPIX2', 0.5)
-        hdu.header.update('CRVAL1', coords["xmin"])
-        hdu.header.update('CRVAL2', coords["ymin"])
-        hdu.header.update('CDELT1', coords["dx"])
-        hdu.header.update('CDELT2', coords["dy"])
+        if coords is not None:
 
-    hdu.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+            nx, ny = image_dict[key].shape
 
-    if (gzip_file) :
-        clob = ""
-        if (clobber) : clob="-f"
-        system("gzip "+clob+" %s.fits" % (filename_prefix))
+            hdu.header.update('CUNIT1', coords["units"])
+            hdu.header.update('CUNIT2', coords["units"])
+            hdu.header.update('CRPIX1', 0.5*(nx+1))
+            hdu.header.update('CRPIX2', 0.5*(ny+1))
+            hdu.header.update('CRVAL1', coords["xctr"])
+            hdu.header.update('CRVAL2', coords["yctr"])
+            hdu.header.update('CDELT1', coords["dx"])
+            hdu.header.update('CDELT2', coords["dy"])
+            # These are the defaults, but will get overwritten if
+            # the caller has specified them
+            hdu.header.update('CTYPE1', "LINEAR")
+            hdu.header.update('CTYPE2', "LINEAR")
+                                    
+        if other_keys is not None:
+            for k,v in other_keys.items():
+                hdu.header.update(k,v)
+
+        hdulist.append(hdu)
+
+    hdulist = pyfits.HDUList(hdulist)
+    hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)                    
 
 def display_in_notebook(image, max_val=None):
     """


https://bitbucket.org/yt_analysis/yt-3.0/commits/d0b4d51fced2/
Changeset:   d0b4d51fced2
Branch:      yt
User:        jzuhone
Date:        2013-09-22 15:03:07
Summary:     Fully operational projection of the SZ signal distortion using SZpack.
Affected #:  4 files

diff -r 72eb0870c6a44aa934193148972198bcc1aad437 -r d0b4d51fced2ea9f52fc811aff668461b58ec7f6 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,7 +103,7 @@
     TwoPointFunctions, \
     FcnSet
 
-from .sunyaev_zeldovich.api import SZprojection
+from .sunyaev_zeldovich.api import SZProjection
 
 from .radmc3d_export.api import \
     RadMC3DWriter

diff -r 72eb0870c6a44aa934193148972198bcc1aad437 -r d0b4d51fced2ea9f52fc811aff668461b58ec7f6 yt/analysis_modules/sunyaev_zeldovich/api.py
--- a/yt/analysis_modules/sunyaev_zeldovich/api.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -1,1 +1,1 @@
-from .projection import SZprojection
+from projection import SZProjection

diff -r 72eb0870c6a44aa934193148972198bcc1aad437 -r d0b4d51fced2ea9f52fc811aff668461b58ec7f6 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -2,113 +2,204 @@
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.definitions import inv_axis_names
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
 from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system, parallel_root_only
 import numpy as np
 
 Tcmb = 2.726
-
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+        
 try:
     import SZpack
 except:
     raise ImportError("SZpack not installed.")
 
 vlist = "xyz"
-nvec = int(0)
 
 def _t_squared(field, data):
-    return data["TempkeV"]*data["TempkeV"]
+    return data["Density"]*data["TempkeV"]*data["TempkeV"]
 add_field("TSquared", function=_t_squared)
 
 def _beta_perp_squared(field, data):
-    return data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+    return data["Density"]*(data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"])
 add_field("BetaPerpSquared", function=_beta_perp_squared)
 
-def _beta_par(field, data):
-    axis = data.get_field_parameter("axis")
-    if iterable(nvec):
-        vpar = (data["x-velocity"]*nvec[0]+
-                data["y-velocity"]*nvec[1]+
-                data["z-velocity"]*nvec[2])
-    else:
-        vpar = data["%s-velocity" % (vlist[nvec])]
-    return vpar/clight
-add_field("BetaPar", function=_beta_par)
-
 def _beta_par_squared(field, data):
-    return data["BetaPar"]**2
+    return data["Density"]*data["BetaPar"]**2
 add_field("BetaParSquared", function=_beta_par_squared)
 
 def _t_beta_par(field, data):
-    return data["TempkeV"]*data["BetaPar"]
+    return data["Density"]*data["TempkeV"]*data["BetaPar"]
 add_field("TBetaPar", function=_t_beta_par)
 
-def SZProjection(pf, axis, freqs, center="c", width=(1, "unitary"), nx=800, mue=None):
+def _t_sz(field, data):
+    return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
 
-    global nvec # Ugly!
-    
-    if mue is None:
-        mueinv = 0.875
-    else:
-        mueinv = 1./mue
+class SZProjection(object):
+
+    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+
+        self.pf = pf
+        self.num_freqs = len(freqs)
+        self.high_order = high_order
+        self.freqs = freqs
+        self.mueinv = 1./mue
+        self.xinit = hcgs*freqs*1.0e9/(kboltz*Tcmb)
+        self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+        self.field_dict = {}
+
+        self.units = {}
+        self.units["TeSZ"] = r"$\mathrm{keV}$"
+        self.units["Tau"] = None
+
+        self.display_names = {}
+        self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+        self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+        for f, field in zip(self.freqs, self.freq_fields):
+            self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(freq))
+            
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800):
+
+        axis = fix_axis(axis)
+
+        def _beta_par(field, data):
+            axis = data.get_field_parameter("axis")
+            vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)    
+
+        proj = self.pf.h.proj(axis, "Density")
+        proj.set_field_parameter("axis", axis)
+        frb = proj.to_frb(width, nx)
+        dens = frb["Density"]
+        Te = frb["TeSZ"]/dens
+        bpar = frb["BetaPar"]/dens
+        omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        if self.high_order:
+            bperp2 = frb["BetaPerpSquared"]/dens
+            sigma1 = frb["TBetaPar"]/dens/Te - bpar
+            kappa1 = frb["BetaParSquared"]/dens - bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*self.mueinv/mp
+
+        nx,ny = frb.buff_size
+        self.bounds = frb.bounds
+        self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+        self.dy = (frb.bounds[3]-frb.bounds[2])/ny
         
-    num_freqs = len(freqs)
-    freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
-    xinit = hcgs*freqs*1.0e9/(kboltz*Tcmb)
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+                                                                                                                
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800):
 
-    if isinstance(axis, basestring) or not iterable(axis):
-        axis = fix_axis(axis)
-        nvec = axis
-        proj1 = pf.h.proj(axis, "TempkeV", weight_field="Density")
-        proj2 = pf.h.proj(axis, "Density")
-        frb1 = proj1.to_frb(width, nx)
-        frb2 = proj2.to_frb(width, nx)
-        Te = frb1["TempkeV"]
-        bpar = frb1["BetaPar"]
-        bperp2 = frb1["BetaPerpSquared"]
-        omega1 = frb1["TSquared"]/(Te*Te) - 1.
-        sigma1 = frb1["TBetaPar"]/Te - bpar
-        kappa1 = frb1["BetaParSquared"] - bpar
-        tau = sigma_thompson*frb2["Density"]*mueinv/mp
-    else:
-        nvec = axis
         if iterable(width):
             w = width[0]/pf.units[width[1]]
         else:
             w = width
-        Te      = off_axis_projection(pf, center, axis, w, nx, "TempkeV", weight="Density")
-        bpar    = off_axis_projection(pf, center, axis, w, nx, "BetaPar", weight="Density")
-        bperp2  = off_axis_projection(pf, center, axis, w, nx, "BetaPerpSquared", weight="Density")
-        omega1  = off_axis_projection(pf, center, axis, w, nx, "TSquared", weight="Density")
+
+        def _beta_par(field, data):
+            vpar = data["Density"]*(data["x-velocity"]*L[0]+
+                                    data["y-velocity"]*L[1]+
+                                    data["z-velocity"]*L[2])
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)
+
+        dens    = off_axis_projection(self.pf, center, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, center, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, center, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, center, L, w, nx, "TSquared")/dens
         omega1  = omega1/(Te*Te) - 1.
-        sigma1  = off_axis_projection(pf, center, axis, w, nx, "TBetaPar", weight="Density")
-        sigma1  = sigma1/Te - bpar
-        kappa1  = off_axis_projection(pf, center, axis, w, nx, "BetaParSquared", weight="Density")
-        kappa1 -= bpar
-        tau     = off_axis_projection(pf, center, axis, w, nx, "Density")
-        tau    *= sigma_thompson*mueinv/mp
+        if self.high_order:
+            bperp2  = off_axis_projection(self.pf, center, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, center, L, w, nx, "TBetaPar")/dens
+            sigma1  = sigma1/Te - bpar
+            kappa1  = off_axis_projection(self.pf, center, L, w, nx, "BetaParSquared")/dens
+            kappa1 -= bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*mueinv/mp
+
+        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+        self.dx = w/nx
+        self.dy = w/nx
         
-    SZsignal = np.zeros((num_freqs,nx,nx))
-    xo = np.zeros((num_freqs))
-    
-    k = int(0)
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
 
-    pbar = get_pbar("Computing SZ signal.", nx*nx)
-    
-    for i in xrange(nx):
-	for j in xrange(nx):
-            xo[:] = xinit[:]
-	    SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
-                                       bpar[i,j], omega[i,j],
-                                       sigma[i,j], kappa[i,j], bperp2[i,j])
-            SZsignal[:,i,j] = -xo[:]
-            pbar.update(k)
-            k += 1
+    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
-    pbar.finish()
-    
-    outimg = {}
-    for i in xrange(num_freqs) :
-	outimg[freq_fields[i]] = ImageArray(SZsignal[i,:,:])
-	
-    return outimg
+        comm = communication_system.communicators[-1]
+        
+        nx, ny = tau.shape
+        signal = np.zeros((self.num_freqs,nx,nx))
+        xo = np.zeros((self.num_freqs))
+        
+        k = int(0)
+
+        start_i = comm.rank*nx/comm.size
+        end_i = (comm.rank+1)*nx/comm.size
+                        
+        pbar = get_pbar("Computing SZ signal.", nx*nx)
+        
+        for i in xrange(start_i, end_i):
+            for j in xrange(ny):
+                xo[:] = self.xinit[:]
+                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                           bpar[i,j], omega1[i,j],
+                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
+                signal[:,i,j] = -xo[:]
+                pbar.update(k)
+                k += 1
+
+        signal = comm.mpi_allreduce(signal)
+        
+        pbar.finish()
+                
+        for i, field in enumerate(self.freq_fields):
+            self.field_dict[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.field_dict["Tau"] = ImageArray(tau)
+        self.field_dict["TeSZ"] = ImageArray(Te)
+
+    @parallel_root_only
+    def write_fits(self, filename_prefix, clobber=True):
+
+        coords = {}
+        coords["dx"] = self.dx*self.pf.units["kpc"]
+        coords["dy"] = self.dy*self.pf.units["kpc"]
+        coords["xctr"] = 0.0
+        coords["yctr"] = 0.0
+        coords["units"] = "kpc"
+        other_keys = {"Time" : self.pf.current_time}
+        write_fits(self.field_dict, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=other_keys)
+
+    @parallel_root_only
+    def write_png(self, filename_prefix):
+
+        extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+        for field, image in self.field_dict.items():
+            filename=filename_prefix+"_"+field+".png"
+            label = self.display_names[field]
+            if self.units[field] is not None:
+                label += " ("+self.units[field]+")"
+            write_projection(image, filename, colorbar_label=label, take_log=False,
+                             extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+                             ylabel=r"$\mathrm{y\ (kpc)}$")
+
+    def keys(self):
+        return self.field_dict.keys()
+
+    def has_key(self, key):
+        return key in self.field_dict.keys()
+
+    def __getitem__(self, key):
+        return self.field_dict[key]


https://bitbucket.org/yt_analysis/yt-3.0/commits/47722556465d/
Changeset:   47722556465d
Branch:      yt
User:        jzuhone
Date:        2013-09-22 16:12:08
Summary:     Docstrings, copyright, and a bug fix
Affected #:  2 files

diff -r d0b4d51fced2ea9f52fc811aff668461b58ec7f6 -r 47722556465d3f11353c132a5354a362585ef1eb yt/analysis_modules/sunyaev_zeldovich/api.py
--- a/yt/analysis_modules/sunyaev_zeldovich/api.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -1,1 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from projection import SZProjection

diff -r d0b4d51fced2ea9f52fc811aff668461b58ec7f6 -r 47722556465d3f11353c132a5354a362585ef1eb yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -1,3 +1,23 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
@@ -15,7 +35,7 @@
 try:
     import SZpack
 except:
-    raise ImportError("SZpack not installed.")
+    raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
 
 vlist = "xyz"
 
@@ -40,13 +60,30 @@
 add_field("TeSZ", function=_t_sz)
 
 class SZProjection(object):
+    r""" Initialize a SZProjection object.
 
+    Parameters
+    ----------
+    pf : parameter_file
+        The parameter file.
+    freqs : array_like
+        The frequencies (in GHz) at which to compute the SZ spectral distortion.
+    mue : float, optional
+        Mean molecular weight for determining the electron number density.
+    high_order : boolean, optional
+        Should we calculate high-order moments of velocity and temperature?
+
+    Examples
+    --------
+    >>> freqs = [90., 180., 240.]
+    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    """
     def __init__(self, pf, freqs, mue=1.143, high_order=False):
-
+            
         self.pf = pf
         self.num_freqs = len(freqs)
         self.high_order = high_order
-        self.freqs = freqs
+        self.freqs = np.array(freqs)
         self.mueinv = 1./mue
         self.xinit = hcgs*freqs*1.0e9/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
@@ -64,8 +101,26 @@
             self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
             self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(freq))
             
-    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800):
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an on-axis projection of the SZ signal.
 
+        Parameters
+        ----------
+        axis : integer or string
+            The axis of the simulation domain along which to make the SZprojection.
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+
+        Examples
+        --------
+        >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+        """
         axis = fix_axis(axis)
 
         def _beta_par(field, data):
@@ -74,7 +129,7 @@
             return vpar/clight
         add_field("BetaPar", function=_beta_par)    
 
-        proj = self.pf.h.proj(axis, "Density")
+        proj = self.pf.h.proj(axis, "Density", source=source)
         proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["Density"]
@@ -99,12 +154,35 @@
         self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
                                                                                                                 
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800):
-
+        r""" Make an off-axis projection of the SZ signal.
+        
+        Parameters
+        ----------
+        L : array_like
+            The normal vector of the projection. 
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+                    
+        Examples
+        --------
+        >>> L = np.array([0.5, 1.0, 0.75])
+        >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+        """
         if iterable(width):
-            w = width[0]/pf.units[width[1]]
+            w = width[0]/self.pf.units[width[1]]
         else:
             w = width
-
+        if center == "c":
+            ctr = self.pf.domain_center
+        elif center == "max":
+            ctr = self.pf.h.find_max("Density")
+        else:
+            ctr = center
+            
         def _beta_par(field, data):
             vpar = data["Density"]*(data["x-velocity"]*L[0]+
                                     data["y-velocity"]*L[1]+
@@ -112,22 +190,22 @@
             return vpar/clight
         add_field("BetaPar", function=_beta_par)
 
-        dens    = off_axis_projection(self.pf, center, L, w, nx, "Density")
-        Te      = off_axis_projection(self.pf, center, L, w, nx, "TeSZ")/dens
-        bpar    = off_axis_projection(self.pf, center, L, w, nx, "BetaPar")/dens
-        omega1  = off_axis_projection(self.pf, center, L, w, nx, "TSquared")/dens
+        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
         omega1  = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.pf, center, L, w, nx, "BetaPerpSquared")/dens
-            sigma1  = off_axis_projection(self.pf, center, L, w, nx, "TBetaPar")/dens
+            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
             sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.pf, center, L, w, nx, "BetaParSquared")/dens
+            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))
             sigma1 = np.zeros((nx,nx))
             kappa1 = np.zeros((nx,nx))
-        tau = sigma_thompson*dens*mueinv/mp
+        tau = sigma_thompson*dens*self.mueinv/mp
 
         self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
         self.dx = w/nx
@@ -171,7 +249,21 @@
 
     @parallel_root_only
     def write_fits(self, filename_prefix, clobber=True):
-
+        r""" Export images to a FITS file. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc.  
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the FITS filename.
+        clobber : boolean, optional
+            If the file already exists, do we overwrite?
+                    
+        Examples
+        --------
+        >>> szprj.write_fits("SZbullet", clobber=False)
+        """
         coords = {}
         coords["dx"] = self.dx*self.pf.units["kpc"]
         coords["dy"] = self.dy*self.pf.units["kpc"]
@@ -184,7 +276,19 @@
 
     @parallel_root_only
     def write_png(self, filename_prefix):
-
+        r""" Export images to PNG files. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc. 
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the image filenames.
+                
+        Examples
+        --------
+        >>> szprj.write_png("SZsloshing")
+        """     
         extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
         for field, image in self.field_dict.items():
             filename=filename_prefix+"_"+field+".png"


https://bitbucket.org/yt_analysis/yt-3.0/commits/5780cf148e35/
Changeset:   5780cf148e35
Branch:      yt
User:        jzuhone
Date:        2013-09-22 20:43:21
Summary:     Small bug fixes
Affected #:  1 file

diff -r 47722556465d3f11353c132a5354a362585ef1eb -r 5780cf148e35d18e96e89425c5d4f59b7b07c6e0 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -85,7 +85,7 @@
         self.high_order = high_order
         self.freqs = np.array(freqs)
         self.mueinv = 1./mue
-        self.xinit = hcgs*freqs*1.0e9/(kboltz*Tcmb)
+        self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
         self.field_dict = {}
 
@@ -99,7 +99,7 @@
 
         for f, field in zip(self.freqs, self.freq_fields):
             self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
-            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(freq))
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
             
     def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an on-axis projection of the SZ signal.
@@ -234,7 +234,7 @@
                 SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
                                            bpar[i,j], omega1[i,j],
                                            sigma1[i,j], kappa1[i,j], bperp2[i,j])
-                signal[:,i,j] = -xo[:]
+                signal[:,i,j] = xo[:]
                 pbar.update(k)
                 k += 1
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/3064a4e68cc2/
Changeset:   3064a4e68cc2
Branch:      yt
User:        jzuhone
Date:        2013-09-28 19:50:24
Summary:     Adding a requires_module decorator to allow certain tests to pass if optional modules cannot be imported.
Affected #:  1 file

diff -r 5780cf148e35d18e96e89425c5d4f59b7b07c6e0 -r 3064a4e68cc22786d39b281277668a14c01c70eb yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -14,6 +14,7 @@
 
 import itertools as it
 import numpy as np
+import importlib
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
@@ -251,3 +252,23 @@
                     list_of_kwarg_dicts[i][key] = keywords[key][0]
 
     return list_of_kwarg_dicts
+
+def requires_module(module):
+    """
+    Decorator that takes a module name as an argument and tries to import it.
+    If the module imports without issue, the function is returned, but if not, 
+    a null function is returned. This is so tests that depend on certain modules
+    being imported will not fail if the module is not installed on the testing
+    platform.
+    """
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    try:
+        importlib.import_module(module)
+    except ImportError:
+        return ffalse
+    else:
+        return ftrue
+    


https://bitbucket.org/yt_analysis/yt-3.0/commits/e596f8824970/
Changeset:   e596f8824970
Branch:      yt
User:        jzuhone
Date:        2013-09-28 19:51:04
Summary:     Adding the current CMB temperature as a physical "constant".
Affected #:  1 file

diff -r 3064a4e68cc22786d39b281277668a14c01c70eb -r e596f8824970a29ca62c178a9b741963df1e7b00 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -8,7 +8,7 @@
 # http://physics.nist.gov/cuu/Constants/index.html
 
 # Masses
-mass_electron_cgs = 9.109382-28  # g
+mass_electron_cgs = 9.109382e-28  # g
 amu_cgs           = 1.660538921e-24  # g
 mass_hydrogen_cgs = 1.007947*amu_cgs  # g
 mass_sun_cgs = 1.98841586e33  # g
@@ -84,6 +84,7 @@
 erg_per_keV = erg_per_eV * 1.0e3
 K_per_keV = erg_per_keV / boltzmann_constant_cgs
 keV_per_K = 1.0 / K_per_keV
+Tcmb = 2.726 # Current CMB temperature
 
 #Short cuts
 G = gravitational_constant_cgs


https://bitbucket.org/yt_analysis/yt-3.0/commits/439f0735086b/
Changeset:   439f0735086b
Branch:      yt
User:        jzuhone
Date:        2013-09-28 19:53:12
Summary:     1) Fixing bugs related to mass weighting and velocity projections.
2) adding a write_hdf5 method for writing the projections to an HDF5 file.
3) Adding dict-like attributes and methods.
Affected #:  1 file

diff -r e596f8824970a29ca62c178a9b741963df1e7b00 -r 439f0735086b4cd264c2c5477b843dfe3a7d480f yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -18,7 +18,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mp
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
@@ -27,9 +27,9 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
+from yt.utilities.exceptions import YTException
 import numpy as np
 
-Tcmb = 2.726
 I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
         
 try:
@@ -44,15 +44,15 @@
 add_field("TSquared", function=_t_squared)
 
 def _beta_perp_squared(field, data):
-    return data["Density"]*(data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"])
+    return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
 add_field("BetaPerpSquared", function=_beta_perp_squared)
 
 def _beta_par_squared(field, data):
-    return data["Density"]*data["BetaPar"]**2
+    return data["BetaPar"]**2/data["Density"]
 add_field("BetaParSquared", function=_beta_par_squared)
 
 def _t_beta_par(field, data):
-    return data["Density"]*data["TempkeV"]*data["BetaPar"]
+    return data["TempkeV"]*data["BetaPar"]
 add_field("TBetaPar", function=_t_beta_par)
 
 def _t_sz(field, data):
@@ -136,24 +136,24 @@
         Te = frb["TeSZ"]/dens
         bpar = frb["BetaPar"]/dens
         omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        bperp2 = np.zeros((nx,nx))
+        sigma1 = np.zeros((nx,nx))
+        kappa1 = np.zeros((nx,nx))                                    
         if self.high_order:
             bperp2 = frb["BetaPerpSquared"]/dens
             sigma1 = frb["TBetaPar"]/dens/Te - bpar
-            kappa1 = frb["BetaParSquared"]/dens - bpar
-        else:
-            bperp2 = np.zeros((nx,nx))
-            sigma1 = np.zeros((nx,nx))
-            kappa1 = np.zeros((nx,nx))
-        tau = sigma_thompson*dens*self.mueinv/mp
+            kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+        tau = sigma_thompson*dens*self.mueinv/mh
 
         nx,ny = frb.buff_size
         self.bounds = frb.bounds
         self.dx = (frb.bounds[1]-frb.bounds[0])/nx
         self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+        self.nx = nx
         
         self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
                                                                                                                 
-    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800):
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
         
         Parameters
@@ -166,6 +166,9 @@
             The width of the projection.
         nx : integer, optional
             The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+            Currently unsupported in yt 2.x.
                     
         Examples
         --------
@@ -182,7 +185,10 @@
             ctr = self.pf.h.find_max("Density")
         else:
             ctr = center
-            
+
+        if source is not None:
+            raise YTException("Source argument is not currently supported for off-axis S-Z projections.")
+        
         def _beta_par(field, data):
             vpar = data["Density"]*(data["x-velocity"]*L[0]+
                                     data["y-velocity"]*L[1]+
@@ -205,20 +211,25 @@
             bperp2 = np.zeros((nx,nx))
             sigma1 = np.zeros((nx,nx))
             kappa1 = np.zeros((nx,nx))
-        tau = sigma_thompson*dens*self.mueinv/mp
+        tau = sigma_thompson*dens*self.mueinv/mh
 
         self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
         self.dx = w/nx
         self.dy = w/nx
-        
+        self.nx = nx
+
         self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
 
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
+        # Bad hack, but we get NaNs if we don't do something like this
+        small_beta = np.abs(bpar) < 1.0e-20
+        bpar[small_beta] = 1.0e-20
+                                                                   
         comm = communication_system.communicators[-1]
-        
-        nx, ny = tau.shape
-        signal = np.zeros((self.num_freqs,nx,nx))
+
+        nx, ny = self.nx,self.nx
+        signal = np.zeros((self.num_freqs,nx,ny))
         xo = np.zeros((self.num_freqs))
         
         k = int(0)
@@ -227,7 +238,7 @@
         end_i = (comm.rank+1)*nx/comm.size
                         
         pbar = get_pbar("Computing SZ signal.", nx*nx)
-        
+
         for i in xrange(start_i, end_i):
             for j in xrange(ny):
                 xo[:] = self.xinit[:]
@@ -290,7 +301,7 @@
         >>> szprj.write_png("SZsloshing")
         """     
         extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
-        for field, image in self.field_dict.items():
+        for field, image in self.items():
             filename=filename_prefix+"_"+field+".png"
             label = self.display_names[field]
             if self.units[field] is not None:
@@ -299,11 +310,40 @@
                              extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
                              ylabel=r"$\mathrm{y\ (kpc)}$")
 
+    @parallel_root_only
+    def write_hdf5(self, filename):
+        r"""Export the set of S-Z fields to a set of HDF5 datasets.
+        
+        Parameters
+        ----------
+        filename : string
+            This file will be opened in "write" mode.
+        
+        Examples
+        --------
+        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        """
+        import h5py
+        f = h5py.File(filename, "w")
+        for field, data in self.items():
+            f.create_dataset(field,data=data)
+        f.close()
+                                                
     def keys(self):
         return self.field_dict.keys()
 
+    def items(self):
+        return self.field_dict.items()
+
+    def values(self):
+        return self.field_dict.values()
+    
     def has_key(self, key):
         return key in self.field_dict.keys()
 
     def __getitem__(self, key):
         return self.field_dict[key]
+
+    @property
+    def shape(self):
+        return (self.nx,self.nx)


https://bitbucket.org/yt_analysis/yt-3.0/commits/ab6f76a445c0/
Changeset:   ab6f76a445c0
Branch:      yt
User:        jzuhone
Date:        2013-09-28 20:31:58
Summary:     Unit test for the SZ analysis module.
Affected #:  4 files

diff -r 439f0735086b4cd264c2c5477b843dfe3a7d480f -r ab6f76a445c066e0fa807cc35d6c59a026bbd4a0 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunyaev_zeldovich")    
     return config

diff -r 439f0735086b4cd264c2c5477b843dfe3a7d480f -r ab6f76a445c066e0fa807cc35d6c59a026bbd4a0 yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+    config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 439f0735086b4cd264c2c5477b843dfe3a7d480f -r ab6f76a445c066e0fa807cc35d6c59a026bbd4a0 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,103 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+import numpy as np
+try:
+    import SZpack
+except ImportError:
+    pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+    
+def setup():
+    pass
+
+def full_szpack3d(pf, xo):
+    data = pf.h.grids[0]
+    dz = pf.h.get_smallest_dx()*pf.units["cm"]
+    nx,ny,nz = data["Density"].shape
+    dn = np.zeros((nx,ny,nz))
+    Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+    Te = data["Temperature"]/K_per_keV
+    betac = data["z-velocity"]/clight
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx) 
+    for i in xrange(nx):
+        pbar.update(i)
+        for j in xrange(ny):
+            for k in xrange(nz):
+                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+                                              Te[i,j,k], betac[i,j,k],
+                                              1.0, 0.0, 0.0, 1.0e-5)
+    pbar.finish()
+    return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+    R = 1000.
+    r_c = 100.
+    rho_c = 1.673e-26
+    beta = 1.
+    T0 = 4.
+    nx,ny,nz = 16,16,16
+    c = 0.17
+    a_c = 30.
+    a = 200.
+    v0 = 300.*cm_per_km
+    ddims = (nx,ny,nz)
+    
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:ny*1j,
+                       -R:R:nz*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+    velz = v0*temp/(T0*K_per_keV)
+
+    data = {}
+    data["Density"] = dens
+    data["Temperature"] = temp
+    data["x-velocity"] = np.zeros(ddims)
+    data["y-velocity"] = np.zeros(ddims)
+    data["z-velocity"] = velz
+
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    
+    L = 2*R*cm_per_kpc
+    dl = L/nz
+
+    pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+    return pf
+
+pf = setup_cluster()
+
+ at requires_module("SZpack")
+def test_projection():
+    nx,ny,nz = pf.domain_dimensions
+    xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+    szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+    szprj.on_axis(2, nx=nx)
+    deltaI = np.zeros((3,nx,ny))
+    for i in xrange(3):
+        deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+        yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+        


https://bitbucket.org/yt_analysis/yt-3.0/commits/b1642de56ef4/
Changeset:   b1642de56ef4
Branch:      yt
User:        jzuhone
Date:        2013-09-29 20:28:09
Summary:     Answer test, but it doesn't work yet for some reason.
Affected #:  2 files

diff -r ab6f76a445c066e0fa807cc35d6c59a026bbd4a0 -r b1642de56ef468aa58ac954778f977726edefbc0 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -15,6 +15,8 @@
 from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
      mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
 from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+     GenericArrayTest, data_dir_load
 from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
 import numpy as np
 try:
@@ -26,7 +28,9 @@
 freqs = np.array([30., 90., 240.])
     
 def setup():
-    pass
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"        
 
 def full_szpack3d(pf, xo):
     data = pf.h.grids[0]
@@ -88,10 +92,9 @@
 
     return pf
 
-pf = setup_cluster()
-
 @requires_module("SZpack")
 def test_projection():
+    pf = setup_cluster()
     nx,ny,nz = pf.domain_dimensions
     xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
     szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
@@ -100,4 +103,24 @@
     for i in xrange(3):
         deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
         yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+    pf = data_dir_load(M7)
+    def onaxis_func():
+        szprj = SZProjection(pf, freqs)
+        szprj.on_axis(2)
+        return szprj
+    yield GenericArrayTest(pf, onaxis_func)
         
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+    pf = data_dir_load(sloshing)
+    def offaxis_func():
+        szprj = SZProjection(pf, freqs)
+        szprj.off_axis(np.array([0.1,-0.2,0.4]))
+        return szprj                    
+    yield GenericArrayTest(pf, offaxis_func)

diff -r ab6f76a445c066e0fa807cc35d6c59a026bbd4a0 -r b1642de56ef468aa58ac954778f977726edefbc0 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -609,6 +609,25 @@
         assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
         for fn in fns: os.remove(fn)
 
+class GenericArrayTest(AnswerTestingTest):
+    _type_name = "GenericArray"
+    _attrs = ('array_func','args','kwargs')
+    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
+        super(AnalysisModuleResultTest, self).__init__(pf_fn)
+        self.array_func = array_func
+        self.decimals = decimals
+    def run(self):
+        return self.array_func(*args, **kwargs)
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
+            
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None


https://bitbucket.org/yt_analysis/yt-3.0/commits/83b142845ae0/
Changeset:   83b142845ae0
Branch:      yt
User:        jzuhone
Date:        2013-10-01 18:53:55
Summary:     Fixed the GenericArrayTest and added GenericImageTest.
Affected #:  1 file

diff -r b1642de56ef468aa58ac954778f977726edefbc0 -r 83b142845ae0f2f7a5698c70b5517bdb403cc94f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -24,6 +24,7 @@
 import shelve
 import zlib
 import tempfile
+import glob
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -612,12 +613,14 @@
 class GenericArrayTest(AnswerTestingTest):
     _type_name = "GenericArray"
     _attrs = ('array_func','args','kwargs')
-    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
-        super(AnalysisModuleResultTest, self).__init__(pf_fn)
+    def __init__(self, pf_fn, array_func, args=[], kwargs={}, decimals=None):
+        super(GenericArrayTest, self).__init__(pf_fn)
         self.array_func = array_func
+        self.args = args
+        self.kwargs = kwargs
         self.decimals = decimals
     def run(self):
-        return self.array_func(*args, **kwargs)
+        return self.array_func(*self.args, **self.kwargs)
     def compare(self, new_result, old_result):
         assert_equal(len(new_result), len(old_result),
                                           err_msg="Number of outputs not equal.",
@@ -627,7 +630,36 @@
                 assert_equal(new_result[k], old_result[k])
             else:
                 assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
-            
+
+class GenericImageTest(AnswerTestingTest):
+    _type_name = "GenericImage"
+    _attrs = ('image_func','args','kwargs')
+    def __init__(self, pf_fn, image_func, decimals, args=[], kwargs={}):
+        super(GenericImageTest, self).__init__(pf_fn)
+        self.image_func = image_func
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        comp_imgs = []
+        tmpdir = tempfile.mkdtemp()
+        image_prefix = os.path.join(tmpdir,"test_img_")
+        self.image_func(image_prefix, *self.args, **self.kwargs)
+        imgs = glob.glob(image_prefix)
+        for img in imgs:
+            img_data = mpimg.imread(img)
+            os.remove(img)
+            comp_imgs.append(zlib.compress(img_data.dumps()))
+        return comp_imgs
+    def compare(self, new_result, old_result):
+        fns = ['old.png', 'new.png']
+        num_images = len(old_result)
+        for i in xrange(num_images):
+            mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
+            mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
+            assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
+            for fn in fns: os.remove(fn)
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None


https://bitbucket.org/yt_analysis/yt-3.0/commits/d231865b1e74/
Changeset:   d231865b1e74
Branch:      yt
User:        jzuhone
Date:        2013-10-01 18:54:51
Summary:     Temporary commit... onaxis test does not work yet.
Affected #:  1 file

diff -r 83b142845ae0f2f7a5698c70b5517bdb403cc94f -r d231865b1e741415078e057b2585b61f4ce7aaea yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -11,12 +11,12 @@
 #-----------------------------------------------------------------------------
 
 from yt.frontends.stream.api import load_uniform_grid
-from yt.funcs import get_pbar
+from yt.funcs import get_pbar, mylog
 from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
      mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
 from yt.testing import *
 from yt.utilities.answer_testing.framework import requires_pf, \
-     GenericArrayTest, data_dir_load
+     GenericArrayTest, data_dir_load, GenericImageTest
 from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
 import numpy as np
 try:
@@ -91,7 +91,7 @@
     pf = load_uniform_grid(data, ddims, L, bbox=bbox)
 
     return pf
-
+"""
 @requires_module("SZpack")
 def test_projection():
     pf = setup_cluster()
@@ -103,18 +103,23 @@
     for i in xrange(3):
         deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
         yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+"""
 
 M7 = "DD0010/moving7_0010"
 @requires_module("SZpack")
 @requires_pf(M7)
 def test_M7_onaxis():
     pf = data_dir_load(M7)
-    def onaxis_func():
-        szprj = SZProjection(pf, freqs)
-        szprj.on_axis(2)
+    szprj = SZProjection(pf, freqs)
+    szprj.on_axis(2, nx=200)
+    def array_func():
         return szprj
-    yield GenericArrayTest(pf, onaxis_func)
-        
+    def image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    yield GenericArrayTest(pf, array_func)
+    yield GenericImageTest(pf, image_func, 3)
+    
+"""        
 @requires_module("SZpack")
 @requires_pf(M7)
 def test_M7_offaxis():
@@ -124,3 +129,4 @@
         szprj.off_axis(np.array([0.1,-0.2,0.4]))
         return szprj                    
     yield GenericArrayTest(pf, offaxis_func)
+"""


https://bitbucket.org/yt_analysis/yt-3.0/commits/26e9489dc3e5/
Changeset:   26e9489dc3e5
Branch:      yt
User:        jzuhone
Date:        2013-10-01 21:00:34
Summary:     All tests now pass.
Affected #:  1 file

diff -r d231865b1e741415078e057b2585b61f4ce7aaea -r 26e9489dc3e5b678af77cbd35372344c72334b6d yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -91,7 +91,7 @@
     pf = load_uniform_grid(data, ddims, L, bbox=bbox)
 
     return pf
-"""
+
 @requires_module("SZpack")
 def test_projection():
     pf = setup_cluster()
@@ -103,7 +103,6 @@
     for i in xrange(3):
         deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
         yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
-"""
 
 M7 = "DD0010/moving7_0010"
 @requires_module("SZpack")
@@ -111,22 +110,23 @@
 def test_M7_onaxis():
     pf = data_dir_load(M7)
     szprj = SZProjection(pf, freqs)
-    szprj.on_axis(2, nx=200)
-    def array_func():
-        return szprj
-    def image_func(filename_prefix):
+    szprj.on_axis(2, nx=100)
+    def onaxis_array_func():
+        return szprj.data
+    def onaxis_image_func(filename_prefix):
         szprj.write_png(filename_prefix)
-    yield GenericArrayTest(pf, array_func)
-    yield GenericImageTest(pf, image_func, 3)
-    
-"""        
+    yield GenericArrayTest(pf, onaxis_array_func)
+    yield GenericImageTest(pf, onaxis_image_func, 3)
+       
 @requires_module("SZpack")
 @requires_pf(M7)
 def test_M7_offaxis():
-    pf = data_dir_load(sloshing)
-    def offaxis_func():
-        szprj = SZProjection(pf, freqs)
-        szprj.off_axis(np.array([0.1,-0.2,0.4]))
-        return szprj                    
-    yield GenericArrayTest(pf, offaxis_func)
-"""
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+    def offaxis_array_func():
+        return szprj.data
+    def offaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    yield GenericArrayTest(pf, offaxis_array_func)
+    yield GenericImageTest(pf, offaxis_image_func, 3)


https://bitbucket.org/yt_analysis/yt-3.0/commits/08a40ea64be8/
Changeset:   08a40ea64be8
Branch:      yt
User:        jzuhone
Date:        2013-10-01 21:01:00
Summary:     Small naming change.
Affected #:  1 file

diff -r 26e9489dc3e5b678af77cbd35372344c72334b6d -r 08a40ea64be8141c193079cd1722f1c0fc86cd6d yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -87,7 +87,7 @@
         self.mueinv = 1./mue
         self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
-        self.field_dict = {}
+        self.data = {}
 
         self.units = {}
         self.units["TeSZ"] = r"$\mathrm{keV}$"
@@ -254,9 +254,9 @@
         pbar.finish()
                 
         for i, field in enumerate(self.freq_fields):
-            self.field_dict[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
-        self.field_dict["Tau"] = ImageArray(tau)
-        self.field_dict["TeSZ"] = ImageArray(Te)
+            self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.data["Tau"] = ImageArray(tau)
+        self.data["TeSZ"] = ImageArray(Te)
 
     @parallel_root_only
     def write_fits(self, filename_prefix, clobber=True):
@@ -282,7 +282,7 @@
         coords["yctr"] = 0.0
         coords["units"] = "kpc"
         other_keys = {"Time" : self.pf.current_time}
-        write_fits(self.field_dict, filename_prefix, clobber=clobber, coords=coords,
+        write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
                    other_keys=other_keys)
 
     @parallel_root_only
@@ -328,21 +328,21 @@
         for field, data in self.items():
             f.create_dataset(field,data=data)
         f.close()
-                                                
+   
     def keys(self):
-        return self.field_dict.keys()
+        return self.data.keys()
 
     def items(self):
-        return self.field_dict.items()
+        return self.data.items()
 
     def values(self):
-        return self.field_dict.values()
+        return self.data.values()
     
     def has_key(self, key):
-        return key in self.field_dict.keys()
+        return key in self.data.keys()
 
     def __getitem__(self, key):
-        return self.field_dict[key]
+        return self.data[key]
 
     @property
     def shape(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/c95e7c8d1eb9/
Changeset:   c95e7c8d1eb9
Branch:      yt
User:        jzuhone
Date:        2013-10-01 21:01:53
Summary:     Bug fixes for GenericArrayTest and GenericImageTest
Affected #:  1 file

diff -r 08a40ea64be8141c193079cd1722f1c0fc86cd6d -r c95e7c8d1eb99005ddf666d483fb4381253ce70c yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -612,15 +612,24 @@
 
 class GenericArrayTest(AnswerTestingTest):
     _type_name = "GenericArray"
-    _attrs = ('array_func','args','kwargs')
-    def __init__(self, pf_fn, array_func, args=[], kwargs={}, decimals=None):
+    _attrs = ('array_func_name','args','kwargs')
+    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
         super(GenericArrayTest, self).__init__(pf_fn)
         self.array_func = array_func
+        self.array_func_name = array_func.func_name
         self.args = args
         self.kwargs = kwargs
         self.decimals = decimals
     def run(self):
-        return self.array_func(*self.args, **self.kwargs)
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        return self.array_func(*args, **kwargs)
     def compare(self, new_result, old_result):
         assert_equal(len(new_result), len(old_result),
                                           err_msg="Number of outputs not equal.",
@@ -633,19 +642,29 @@
 
 class GenericImageTest(AnswerTestingTest):
     _type_name = "GenericImage"
-    _attrs = ('image_func','args','kwargs')
-    def __init__(self, pf_fn, image_func, decimals, args=[], kwargs={}):
+    _attrs = ('image_func_name','args','kwargs')
+    def __init__(self, pf_fn, image_func, decimals, args=None, kwargs=None):
         super(GenericImageTest, self).__init__(pf_fn)
         self.image_func = image_func
+        self.image_func_name = image_func.func_name
         self.args = args
         self.kwargs = kwargs
         self.decimals = decimals
     def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
         comp_imgs = []
         tmpdir = tempfile.mkdtemp()
-        image_prefix = os.path.join(tmpdir,"test_img_")
-        self.image_func(image_prefix, *self.args, **self.kwargs)
-        imgs = glob.glob(image_prefix)
+        image_prefix = os.path.join(tmpdir,"test_img")
+        self.image_func(image_prefix, *args, **kwargs)
+        imgs = glob.glob(image_prefix+"*")
+        assert(len(imgs) > 0)
         for img in imgs:
             img_data = mpimg.imread(img)
             os.remove(img)
@@ -654,6 +673,7 @@
     def compare(self, new_result, old_result):
         fns = ['old.png', 'new.png']
         num_images = len(old_result)
+        assert(num_images > 0)
         for i in xrange(num_images):
             mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
             mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))


https://bitbucket.org/yt_analysis/yt-3.0/commits/bdf0a0603ef5/
Changeset:   bdf0a0603ef5
Branch:      yt
User:        jzuhone
Date:        2013-10-01 21:32:15
Summary:     small refactor to making GenericArrayTest and PlotWindowAttributesTest use the same code.
Affected #:  3 files

diff -r c95e7c8d1eb99005ddf666d483fb4381253ce70c -r bdf0a0603ef5a8b40a508e4d88a3d7fdca04769e nose.cfg
--- a/nose.cfg
+++ b/nose.cfg
@@ -1,4 +1,4 @@
 [nosetests]
 detailed-errors=1
 where=yt
-exclude=answer_testing
+exclude=

diff -r c95e7c8d1eb99005ddf666d483fb4381253ce70c -r bdf0a0603ef5a8b40a508e4d88a3d7fdca04769e yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -499,7 +499,7 @@
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
             fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
-            find_many_nn_nearest_neighbors()
+            fKD.find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
         return n

diff -r c95e7c8d1eb99005ddf666d483fb4381253ce70c -r bdf0a0603ef5a8b40a508e4d88a3d7fdca04769e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -577,6 +577,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def compare_image_lists(new_result, old_result, decimals):
+    fns = ['old.png', 'new.png']
+    num_images = len(old_result)
+    assert(num_images > 0)
+    for i in xrange(num_images):
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
+        assert compare_images(fns[0], fns[1], 10**(decimals)) == None
+        for fn in fns: os.remove(fn)
+            
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
@@ -604,12 +614,8 @@
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):
-        fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
-        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
-        assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
-        for fn in fns: os.remove(fn)
-
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 class GenericArrayTest(AnswerTestingTest):
     _type_name = "GenericArray"
     _attrs = ('array_func_name','args','kwargs')
@@ -671,15 +677,8 @@
             comp_imgs.append(zlib.compress(img_data.dumps()))
         return comp_imgs
     def compare(self, new_result, old_result):
-        fns = ['old.png', 'new.png']
-        num_images = len(old_result)
-        assert(num_images > 0)
-        for i in xrange(num_images):
-            mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
-            mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
-            assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
-            for fn in fns: os.remove(fn)
-
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None


https://bitbucket.org/yt_analysis/yt-3.0/commits/b6e7b3ebe1fa/
Changeset:   b6e7b3ebe1fa
Branch:      yt
User:        jzuhone
Date:        2013-10-01 21:34:52
Summary:     Don't know how that happened.
Affected #:  1 file

diff -r bdf0a0603ef5a8b40a508e4d88a3d7fdca04769e -r b6e7b3ebe1fab2371c1691543461c6db85163c81 nose.cfg
--- a/nose.cfg
+++ b/nose.cfg
@@ -1,4 +1,4 @@
 [nosetests]
 detailed-errors=1
 where=yt
-exclude=
+exclude=answer_testing


https://bitbucket.org/yt_analysis/yt-3.0/commits/820b8c0950a7/
Changeset:   820b8c0950a7
Branch:      yt
User:        jzuhone
Date:        2013-10-01 22:35:01
Summary:     Don't check this field
Affected #:  1 file

diff -r b6e7b3ebe1fab2371c1691543461c6db85163c81 -r 820b8c0950a76ec1802e94e628377e7ead24e46d yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -87,5 +87,6 @@
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
         if FieldInfo[field].particle_type: continue
+        if field.startswith("BetaPar"): continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)


https://bitbucket.org/yt_analysis/yt-3.0/commits/5b946b3fec4c/
Changeset:   5b946b3fec4c
Branch:      yt
User:        jzuhone
Date:        2013-10-01 22:37:33
Summary:     Forget that, doing it another way
Affected #:  1 file

diff -r 820b8c0950a76ec1802e94e628377e7ead24e46d -r 5b946b3fec4cf414bf2a8c39e9812d2cd4f8bef7 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -87,6 +87,5 @@
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
         if FieldInfo[field].particle_type: continue
-        if field.startswith("BetaPar"): continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)


https://bitbucket.org/yt_analysis/yt-3.0/commits/88991ae4a811/
Changeset:   88991ae4a811
Branch:      yt
User:        jzuhone
Date:        2013-10-01 22:37:55
Summary:     Attempting to resolve a test failure
Affected #:  1 file

diff -r 5b946b3fec4cf414bf2a8c39e9812d2cd4f8bef7 -r 88991ae4a811450d372c04aa97a325b186829fe7 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -124,13 +124,13 @@
         axis = fix_axis(axis)
 
         def _beta_par(field, data):
-            axis = data.get_field_parameter("axis")
+            axis = data.get_field_parameter("SZaxis")
             vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
             return vpar/clight
         add_field("BetaPar", function=_beta_par)    
 
         proj = self.pf.h.proj(axis, "Density", source=source)
-        proj.set_field_parameter("axis", axis)
+        proj.set_field_parameter("SZaxis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["Density"]
         Te = frb["TeSZ"]/dens


https://bitbucket.org/yt_analysis/yt-3.0/commits/c8a641ed885b/
Changeset:   c8a641ed885b
Branch:      yt
User:        jzuhone
Date:        2013-10-01 23:59:16
Summary:     Preventing test suite failures
Affected #:  1 file

diff -r 88991ae4a811450d372c04aa97a325b186829fe7 -r c8a641ed885bd0a3c16cfde118d94eaca3cd64e0 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -86,6 +86,9 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
+        if field.startswith("BetaPar"): continue
+        if field.startswith("TBetaPar"): continue
+        if field.startswith("BetaPerp"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)


https://bitbucket.org/yt_analysis/yt-3.0/commits/bf51b2a74330/
Changeset:   bf51b2a74330
Branch:      yt
User:        jzuhone
Date:        2013-10-02 00:00:47
Summary:     Preventing test suite failures
Affected #:  2 files

diff -r c8a641ed885bd0a3c16cfde118d94eaca3cd64e0 -r bf51b2a7433079b468028373c8ddcc31086486aa yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -124,13 +124,13 @@
         axis = fix_axis(axis)
 
         def _beta_par(field, data):
-            axis = data.get_field_parameter("SZaxis")
+            axis = data.get_field_parameter("axis")
             vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
             return vpar/clight
         add_field("BetaPar", function=_beta_par)    
 
         proj = self.pf.h.proj(axis, "Density", source=source)
-        proj.set_field_parameter("SZaxis", axis)
+        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["Density"]
         Te = frb["TeSZ"]/dens

diff -r c8a641ed885bd0a3c16cfde118d94eaca3cd64e0 -r bf51b2a7433079b468028373c8ddcc31086486aa yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -17,7 +17,10 @@
 from yt.testing import *
 from yt.utilities.answer_testing.framework import requires_pf, \
      GenericArrayTest, data_dir_load, GenericImageTest
-from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+try:
+    from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+    pass
 import numpy as np
 try:
     import SZpack


https://bitbucket.org/yt_analysis/yt-3.0/commits/8dbd32272376/
Changeset:   8dbd32272376
Branch:      yt
User:        jzuhone
Date:        2013-10-02 06:12:22
Summary:     A small change to exception-raising here.
Affected #:  1 file

diff -r bf51b2a7433079b468028373c8ddcc31086486aa -r 8dbd32272376d5d6ddbe3c31b492360cf4f1d283 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -27,7 +27,6 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
-from yt.utilities.exceptions import YTException
 import numpy as np
 
 I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
@@ -187,8 +186,9 @@
             ctr = center
 
         if source is not None:
-            raise YTException("Source argument is not currently supported for off-axis S-Z projections.")
-        
+            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+            raise NotImplementedError
+                
         def _beta_par(field, data):
             vpar = data["Density"]*(data["x-velocity"]*L[0]+
                                     data["y-velocity"]*L[1]+


https://bitbucket.org/yt_analysis/yt-3.0/commits/f393c060da9b/
Changeset:   f393c060da9b
Branch:      yt
User:        MatthewTurk
Date:        2013-10-02 12:10:31
Summary:     Merged in jzuhone/yt (pull request #602)

Projections of the S-Z effect using SZpack
Affected #:  16 files

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
     TwoPointFunctions, \
     FcnSet
 
+from .sunyaev_zeldovich.api import SZProjection
+
 from .radmc3d_export.api import \
     RadMC3DWriter
+

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunyaev_zeldovich")    
     return config

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,349 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+        
+try:
+    import SZpack
+except:
+    raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+    return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+    return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+    r""" Initialize a SZProjection object.
+
+    Parameters
+    ----------
+    pf : parameter_file
+        The parameter file.
+    freqs : array_like
+        The frequencies (in GHz) at which to compute the SZ spectral distortion.
+    mue : float, optional
+        Mean molecular weight for determining the electron number density.
+    high_order : boolean, optional
+        Should we calculate high-order moments of velocity and temperature?
+
+    Examples
+    --------
+    >>> freqs = [90., 180., 240.]
+    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    """
+    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+            
+        self.pf = pf
+        self.num_freqs = len(freqs)
+        self.high_order = high_order
+        self.freqs = np.array(freqs)
+        self.mueinv = 1./mue
+        self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+        self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+        self.data = {}
+
+        self.units = {}
+        self.units["TeSZ"] = r"$\mathrm{keV}$"
+        self.units["Tau"] = None
+
+        self.display_names = {}
+        self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+        self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+        for f, field in zip(self.freqs, self.freq_fields):
+            self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+            
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an on-axis projection of the SZ signal.
+
+        Parameters
+        ----------
+        axis : integer or string
+            The axis of the simulation domain along which to make the SZprojection.
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+
+        Examples
+        --------
+        >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+        """
+        axis = fix_axis(axis)
+
+        def _beta_par(field, data):
+            axis = data.get_field_parameter("axis")
+            vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)    
+
+        proj = self.pf.h.proj(axis, "Density", source=source)
+        proj.set_field_parameter("axis", axis)
+        frb = proj.to_frb(width, nx)
+        dens = frb["Density"]
+        Te = frb["TeSZ"]/dens
+        bpar = frb["BetaPar"]/dens
+        omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        bperp2 = np.zeros((nx,nx))
+        sigma1 = np.zeros((nx,nx))
+        kappa1 = np.zeros((nx,nx))                                    
+        if self.high_order:
+            bperp2 = frb["BetaPerpSquared"]/dens
+            sigma1 = frb["TBetaPar"]/dens/Te - bpar
+            kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        nx,ny = frb.buff_size
+        self.bounds = frb.bounds
+        self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+        self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+        self.nx = nx
+        
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+                                                                                                                
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an off-axis projection of the SZ signal.
+        
+        Parameters
+        ----------
+        L : array_like
+            The normal vector of the projection. 
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+            Currently unsupported in yt 2.x.
+                    
+        Examples
+        --------
+        >>> L = np.array([0.5, 1.0, 0.75])
+        >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+        """
+        if iterable(width):
+            w = width[0]/self.pf.units[width[1]]
+        else:
+            w = width
+        if center == "c":
+            ctr = self.pf.domain_center
+        elif center == "max":
+            ctr = self.pf.h.find_max("Density")
+        else:
+            ctr = center
+
+        if source is not None:
+            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+            raise NotImplementedError
+                
+        def _beta_par(field, data):
+            vpar = data["Density"]*(data["x-velocity"]*L[0]+
+                                    data["y-velocity"]*L[1]+
+                                    data["z-velocity"]*L[2])
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)
+
+        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+        omega1  = omega1/(Te*Te) - 1.
+        if self.high_order:
+            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+            sigma1  = sigma1/Te - bpar
+            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+            kappa1 -= bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+        self.dx = w/nx
+        self.dy = w/nx
+        self.nx = nx
+
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+        # Bad hack, but we get NaNs if we don't do something like this
+        small_beta = np.abs(bpar) < 1.0e-20
+        bpar[small_beta] = 1.0e-20
+                                                                   
+        comm = communication_system.communicators[-1]
+
+        nx, ny = self.nx,self.nx
+        signal = np.zeros((self.num_freqs,nx,ny))
+        xo = np.zeros((self.num_freqs))
+        
+        k = int(0)
+
+        start_i = comm.rank*nx/comm.size
+        end_i = (comm.rank+1)*nx/comm.size
+                        
+        pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+        for i in xrange(start_i, end_i):
+            for j in xrange(ny):
+                xo[:] = self.xinit[:]
+                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                           bpar[i,j], omega1[i,j],
+                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
+                signal[:,i,j] = xo[:]
+                pbar.update(k)
+                k += 1
+
+        signal = comm.mpi_allreduce(signal)
+        
+        pbar.finish()
+                
+        for i, field in enumerate(self.freq_fields):
+            self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.data["Tau"] = ImageArray(tau)
+        self.data["TeSZ"] = ImageArray(Te)
+
+    @parallel_root_only
+    def write_fits(self, filename_prefix, clobber=True):
+        r""" Export images to a FITS file. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc.  
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the FITS filename.
+        clobber : boolean, optional
+            If the file already exists, do we overwrite?
+                    
+        Examples
+        --------
+        >>> szprj.write_fits("SZbullet", clobber=False)
+        """
+        coords = {}
+        coords["dx"] = self.dx*self.pf.units["kpc"]
+        coords["dy"] = self.dy*self.pf.units["kpc"]
+        coords["xctr"] = 0.0
+        coords["yctr"] = 0.0
+        coords["units"] = "kpc"
+        other_keys = {"Time" : self.pf.current_time}
+        write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=other_keys)
+
+    @parallel_root_only
+    def write_png(self, filename_prefix):
+        r""" Export images to PNG files. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc. 
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the image filenames.
+                
+        Examples
+        --------
+        >>> szprj.write_png("SZsloshing")
+        """     
+        extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+        for field, image in self.items():
+            filename=filename_prefix+"_"+field+".png"
+            label = self.display_names[field]
+            if self.units[field] is not None:
+                label += " ("+self.units[field]+")"
+            write_projection(image, filename, colorbar_label=label, take_log=False,
+                             extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+                             ylabel=r"$\mathrm{y\ (kpc)}$")
+
+    @parallel_root_only
+    def write_hdf5(self, filename):
+        r"""Export the set of S-Z fields to a set of HDF5 datasets.
+        
+        Parameters
+        ----------
+        filename : string
+            This file will be opened in "write" mode.
+        
+        Examples
+        --------
+        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        """
+        import h5py
+        f = h5py.File(filename, "w")
+        for field, data in self.items():
+            f.create_dataset(field,data=data)
+        f.close()
+   
+    def keys(self):
+        return self.data.keys()
+
+    def items(self):
+        return self.data.items()
+
+    def values(self):
+        return self.data.values()
+    
+    def has_key(self, key):
+        return key in self.data.keys()
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    @property
+    def shape(self):
+        return (self.nx,self.nx)

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+    config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,135 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+     GenericArrayTest, data_dir_load, GenericImageTest
+try:
+    from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+    pass
+import numpy as np
+try:
+    import SZpack
+except ImportError:
+    pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+    
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"        
+
+def full_szpack3d(pf, xo):
+    data = pf.h.grids[0]
+    dz = pf.h.get_smallest_dx()*pf.units["cm"]
+    nx,ny,nz = data["Density"].shape
+    dn = np.zeros((nx,ny,nz))
+    Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+    Te = data["Temperature"]/K_per_keV
+    betac = data["z-velocity"]/clight
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx) 
+    for i in xrange(nx):
+        pbar.update(i)
+        for j in xrange(ny):
+            for k in xrange(nz):
+                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+                                              Te[i,j,k], betac[i,j,k],
+                                              1.0, 0.0, 0.0, 1.0e-5)
+    pbar.finish()
+    return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+    R = 1000.
+    r_c = 100.
+    rho_c = 1.673e-26
+    beta = 1.
+    T0 = 4.
+    nx,ny,nz = 16,16,16
+    c = 0.17
+    a_c = 30.
+    a = 200.
+    v0 = 300.*cm_per_km
+    ddims = (nx,ny,nz)
+    
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:ny*1j,
+                       -R:R:nz*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+    velz = v0*temp/(T0*K_per_keV)
+
+    data = {}
+    data["Density"] = dens
+    data["Temperature"] = temp
+    data["x-velocity"] = np.zeros(ddims)
+    data["y-velocity"] = np.zeros(ddims)
+    data["z-velocity"] = velz
+
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    
+    L = 2*R*cm_per_kpc
+    dl = L/nz
+
+    pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+    return pf
+
+ at requires_module("SZpack")
+def test_projection():
+    pf = setup_cluster()
+    nx,ny,nz = pf.domain_dimensions
+    xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+    szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+    szprj.on_axis(2, nx=nx)
+    deltaI = np.zeros((3,nx,ny))
+    for i in xrange(3):
+        deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+        yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.on_axis(2, nx=100)
+    def onaxis_array_func():
+        return szprj.data
+    def onaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    yield GenericArrayTest(pf, onaxis_array_func)
+    yield GenericImageTest(pf, onaxis_image_func, 3)
+       
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+    def offaxis_array_func():
+        return szprj.data
+    def offaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    yield GenericArrayTest(pf, offaxis_array_func)
+    yield GenericImageTest(pf, offaxis_image_func, 3)

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -499,7 +499,7 @@
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
             fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
-            find_many_nn_nearest_neighbors()
+            fKD.find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
         return n

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -86,6 +86,9 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
+        if field.startswith("BetaPar"): continue
+        if field.startswith("TBetaPar"): continue
+        if field.startswith("BetaPerp"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -14,6 +14,7 @@
 
 import itertools as it
 import numpy as np
+import importlib
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
@@ -251,3 +252,23 @@
                     list_of_kwarg_dicts[i][key] = keywords[key][0]
 
     return list_of_kwarg_dicts
+
+def requires_module(module):
+    """
+    Decorator that takes a module name as an argument and tries to import it.
+    If the module imports without issue, the function is returned, but if not, 
+    a null function is returned. This is so tests that depend on certain modules
+    being imported will not fail if the module is not installed on the testing
+    platform.
+    """
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    try:
+        importlib.import_module(module)
+    except ImportError:
+        return ffalse
+    else:
+        return ftrue
+    

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -24,6 +24,7 @@
 import shelve
 import zlib
 import tempfile
+import glob
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -576,6 +577,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def compare_image_lists(new_result, old_result, decimals):
+    fns = ['old.png', 'new.png']
+    num_images = len(old_result)
+    assert(num_images > 0)
+    for i in xrange(num_images):
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
+        assert compare_images(fns[0], fns[1], 10**(decimals)) == None
+        for fn in fns: os.remove(fn)
+            
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
@@ -603,12 +614,71 @@
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):
-        fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
-        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
-        assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
-        for fn in fns: os.remove(fn)
+        compare_image_lists(new_result, old_result, self.decimals)
+        
+class GenericArrayTest(AnswerTestingTest):
+    _type_name = "GenericArray"
+    _attrs = ('array_func_name','args','kwargs')
+    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
+        super(GenericArrayTest, self).__init__(pf_fn)
+        self.array_func = array_func
+        self.array_func_name = array_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        return self.array_func(*args, **kwargs)
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
 
+class GenericImageTest(AnswerTestingTest):
+    _type_name = "GenericImage"
+    _attrs = ('image_func_name','args','kwargs')
+    def __init__(self, pf_fn, image_func, decimals, args=None, kwargs=None):
+        super(GenericImageTest, self).__init__(pf_fn)
+        self.image_func = image_func
+        self.image_func_name = image_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        comp_imgs = []
+        tmpdir = tempfile.mkdtemp()
+        image_prefix = os.path.join(tmpdir,"test_img")
+        self.image_func(image_prefix, *args, **kwargs)
+        imgs = glob.glob(image_prefix+"*")
+        assert(len(imgs) > 0)
+        for img in imgs:
+            img_data = mpimg.imread(img)
+            os.remove(img)
+            comp_imgs.append(zlib.compress(img_data.dumps()))
+        return comp_imgs
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -84,6 +84,7 @@
 erg_per_keV = erg_per_eV * 1.0e3
 K_per_keV = erg_per_keV / boltzmann_constant_cgs
 keV_per_K = 1.0 / K_per_keV
+Tcmb = 2.726 # Current CMB temperature
 
 #Short cuts
 G = gravitational_constant_cgs

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,6 +19,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
@@ -263,8 +264,8 @@
             output.create_dataset(field,data=self[field])
         output.close()
 
-    def export_fits(self, filename_prefix, fields = None, clobber=False,
-                    other_keys=None, gzip_file=False, units="1"):
+    def export_fits(self, filename_prefix, fields=None, clobber=False,
+                    other_keys=None, units="cm", sky_center=(0.0,0.0), D_A=None):
 
         """
         This will export a set of FITS images of either the fields specified
@@ -273,106 +274,80 @@
         existing FITS file.
 
         This requires the *pyfits* module, which is a standalone module
-        provided by STSci to interface with FITS-format files.
+        provided by STSci to interface with FITS-format files, and is also
+        part of AstroPy.
         """
         r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filename is the
-        the specified prefix.
+        or all the fields already in the object.
 
         Parameters
         ----------
         filename_prefix : string
-            This prefix will be prepended to every FITS file name.
+            This prefix will be prepended to the FITS file name.
         fields : list of strings
             These fields will be pixelized and output.
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
         other_keys : dictionary, optional
             A set of header keys and values to write into the FITS header.
-        gzip_file : boolean, optional
-            gzip the file after writing, default False
         units : string, optional
-            the length units that the coordinates are written in, default '1'
+            the length units that the coordinates are written in, default 'cm'
+            If units are set to "deg" then assume that sky coordinates are
+            requested.
+        sky_center : array_like, optional
+            Center of the image in (ra,dec) in degrees if sky coordinates
+            (units="deg") are requested.
+        D_A : float or tuple, optional
+            Angular diameter distance, given in code units as a float or
+            a tuple containing the value and the length unit. Required if
+            using sky coordinates.                                                                                            
         """
-        
-        import pyfits
-        from os import system
-        
+
+        if units == "deg" and D_A is None:
+            mylog.error("Sky coordinates require an angular diameter distance. Please specify D_A.")    
+        if iterable(D_A):
+            dist = D_A[0]/self.pf.units[D_A[1]]
+        else:
+            dist = D_A
+
+        if other_keys is None:
+            hdu_keys = {}
+        else:
+            hdu_keys = other_keys
+            
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
-        if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
 
+        coords = {}
         nx, ny = self.buff_size
-        dx = (self.bounds[1]-self.bounds[0])/nx*self.pf[units]
-        dy = (self.bounds[3]-self.bounds[2])/ny*self.pf[units]
-        xmin = self.bounds[0]*self.pf[units]
-        ymin = self.bounds[2]*self.pf[units]
-        simtime = self.pf.current_time
+        dx = (self.bounds[1]-self.bounds[0])/nx
+        dy = (self.bounds[3]-self.bounds[2])/ny
+        if units == "deg":  
+            coords["dx"] = -np.rad2deg(dx/dist)
+            coords["dy"] = np.rad2deg(dy/dist)
+            coords["xctr"] = sky_center[0]
+            coords["yctr"] = sky_center[1]
+            hdu_keys["MTYPE1"] = "EQPOS"
+            hdu_keys["MFORM1"] = "RA,DEC"
+            hdu_keys["CTYPE1"] = "RA---TAN"
+            hdu_keys["CTYPE2"] = "DEC--TAN"
+        else:
+            coords["dx"] = dx*self.pf.units[units]
+            coords["dy"] = dy*self.pf.units[units]
+            coords["xctr"] = 0.5*(self.bounds[0]+self.bounds[1])*self.pf.units[units]
+            coords["yctr"] = 0.5*(self.bounds[2]+self.bounds[3])*self.pf.units[units]
+        coords["units"] = units
+        
+        hdu_keys["Time"] = self.pf.current_time
 
-        hdus = []
-
-        first = True
-        
-        for field in fields:
-
-            if (first) :
-                hdu = pyfits.PrimaryHDU(self[field])
-                first = False
-            else :
-                hdu = pyfits.ImageHDU(self[field])
+        data = dict([(field,self[field]) for field in fields])
+        write_fits(data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=hdu_keys)
                 
-            if self.data_source.has_key('weight_field'):
-                weightname = self.data_source._weight
-                if weightname is None: weightname = 'None'
-                field = field +'_'+weightname
-
-            hdu.header.update("Field", field)
-            hdu.header.update("Time", simtime)
-
-            hdu.header.update('WCSNAMEP', "PHYSICAL")            
-            hdu.header.update('CTYPE1P', "LINEAR")
-            hdu.header.update('CTYPE2P', "LINEAR")
-            hdu.header.update('CRPIX1P', 0.5)
-            hdu.header.update('CRPIX2P', 0.5)
-            hdu.header.update('CRVAL1P', xmin)
-            hdu.header.update('CRVAL2P', ymin)
-            hdu.header.update('CDELT1P', dx)
-            hdu.header.update('CDELT2P', dy)
-                    
-            hdu.header.update('CTYPE1', "LINEAR")
-            hdu.header.update('CTYPE2', "LINEAR")                                
-            hdu.header.update('CUNIT1', units)
-            hdu.header.update('CUNIT2', units)
-            hdu.header.update('CRPIX1', 0.5)
-            hdu.header.update('CRPIX2', 0.5)
-            hdu.header.update('CRVAL1', xmin)
-            hdu.header.update('CRVAL2', ymin)
-            hdu.header.update('CDELT1', dx)
-            hdu.header.update('CDELT2', dy)
-
-            if (other_keys is not None) :
-
-                for k,v in other_keys.items() :
-
-                    hdu.header.update(k,v)
-
-            hdus.append(hdu)
-
-            del hdu
-            
-        hdulist = pyfits.HDUList(hdus)
-
-        hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)
-        
-        if (gzip_file) :
-            clob = ""
-            if (clobber) : clob = "-f"
-            system("gzip "+clob+" %s.fits" % (filename_prefix))
-        
     def open_in_ds9(self, field, take_log=True):
         """
         This will open a given field in the DS9 viewer.

diff -r 6144554f5038e1fade96aaba6648544fbbf3b135 -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -333,7 +333,8 @@
 
 def write_projection(data, filename, colorbar=True, colorbar_label=None, 
                      title=None, limits=None, take_log=True, figsize=(8,6),
-                     dpi=100, cmap_name='algae'):
+                     dpi=100, cmap_name='algae', extent=None, xlabel=None,
+                     ylabel=None):
     r"""Write a projection or volume rendering to disk with a variety of 
     pretty parameters such as limits, title, colorbar, etc.  write_projection
     uses the standard matplotlib interface to create the figure.  N.B. This code
@@ -392,16 +393,22 @@
     # Create the figure and paint the data on
     fig = matplotlib.figure.Figure(figsize=figsize)
     ax = fig.add_subplot(111)
-    fig.tight_layout()
-
-    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm, cmap=cmap_name)
+    
+    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm,
+                    extent=extent, cmap=cmap_name)
     
     if title:
         ax.set_title(title)
 
+    if xlabel:
+        ax.set_xlabel(xlabel)
+    if ylabel:
+        ax.set_ylabel(ylabel)
+
     # Suppress the x and y pixel counts
-    ax.set_xticks(())
-    ax.set_yticks(())
+    if extent is None:
+        ax.set_xticks(())
+        ax.set_yticks(())
 
     # Add a color bar and label if requested
     if colorbar:
@@ -409,6 +416,8 @@
         if colorbar_label:
             cbar.ax.set_ylabel(colorbar_label)
 
+    fig.tight_layout()
+        
     suffix = get_image_suffix(filename)
 
     if suffix == '':
@@ -429,70 +438,89 @@
     return filename
 
 
-def write_fits(image, filename_prefix, clobber=True, coords=None, gzip_file=False) :
+def write_fits(image, filename_prefix, clobber=True, coords=None,
+               other_keys=None):
     """
     This will export a FITS image of a floating point array. The output filename is
     *filename_prefix*. If clobber is set to True, this will overwrite any existing
     FITS file.
     
     This requires the *pyfits* module, which is a standalone module
-    provided by STSci to interface with FITS-format files.
+    provided by STSci to interface with FITS-format files, and is also part of
+    AstroPy.
     """
-    r"""Write out a floating point array directly to a FITS file, optionally
-    adding coordinates. 
+    r"""Write out floating point arrays directly to a FITS file, optionally
+    adding coordinates and header keywords.
         
     Parameters
     ----------
-    image : array_like
-        This is an (unscaled) array of floating point values, shape (N,N,) to save
-        in a FITS file.
+    image : array_like, or dict of array_like objects
+        This is either an (unscaled) array of floating point values, or a dict of
+        such arrays, shape (N,N,) to save in a FITS file. 
     filename_prefix : string
         This prefix will be prepended to every FITS file name.
     clobber : boolean
         If the file exists, this governs whether we will overwrite.
     coords : dictionary, optional
         A set of header keys and values to write to the FITS header to set up
-        a coordinate system. 
-    gzip_file : boolean, optional
-        gzip the file after writing, default False
+        a coordinate system, which is assumed to be linear unless specified otherwise
+        in *other_keys*
+        "units": the length units
+        "xctr","yctr": the center of the image
+        "dx","dy": the pixel width in each direction                                                
+    other_keys : dictionary, optional
+        A set of header keys and values to write into the FITS header.    
     """
+
+    try:
+        import pyfits
+    except ImportError:
+        try:
+            import astropy.io.fits as pyfits
+        except:
+            raise ImportError("You don't have pyFITS or AstroPy installed.")
     
-    import pyfits
     from os import system
     
-    if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
-    
-    hdu = pyfits.PrimaryHDU(image)
+    try:
+        image.keys()
+        image_dict = image
+    except:
+        image_dict = dict(yt_data=image)
 
-    if (coords is not None) :
+    hdulist = [pyfits.PrimaryHDU()]
 
-        hdu.header.update('WCSNAMEP', "PHYSICAL")
-        hdu.header.update('CTYPE1P', "LINEAR")
-        hdu.header.update('CTYPE2P', "LINEAR")
-        hdu.header.update('CRPIX1P', 0.5)
-        hdu.header.update('CRPIX2P', 0.5)
-        hdu.header.update('CRVAL1P', coords["xmin"])
-        hdu.header.update('CRVAL2P', coords["ymin"])
-        hdu.header.update('CDELT1P', coords["dx"])
-        hdu.header.update('CDELT2P', coords["dy"])
+    for key in image_dict.keys():
+
+        mylog.info("Writing image block \"%s\"" % (key))
+        hdu = pyfits.ImageHDU(image_dict[key])
+        hdu.update_ext_name(key)
         
-        hdu.header.update('CTYPE1', "LINEAR")
-        hdu.header.update('CTYPE2', "LINEAR")
-        hdu.header.update('CUNIT1', coords["units"])
-        hdu.header.update('CUNIT2', coords["units"])
-        hdu.header.update('CRPIX1', 0.5)
-        hdu.header.update('CRPIX2', 0.5)
-        hdu.header.update('CRVAL1', coords["xmin"])
-        hdu.header.update('CRVAL2', coords["ymin"])
-        hdu.header.update('CDELT1', coords["dx"])
-        hdu.header.update('CDELT2', coords["dy"])
+        if coords is not None:
 
-    hdu.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+            nx, ny = image_dict[key].shape
 
-    if (gzip_file) :
-        clob = ""
-        if (clobber) : clob="-f"
-        system("gzip "+clob+" %s.fits" % (filename_prefix))
+            hdu.header.update('CUNIT1', coords["units"])
+            hdu.header.update('CUNIT2', coords["units"])
+            hdu.header.update('CRPIX1', 0.5*(nx+1))
+            hdu.header.update('CRPIX2', 0.5*(ny+1))
+            hdu.header.update('CRVAL1', coords["xctr"])
+            hdu.header.update('CRVAL2', coords["yctr"])
+            hdu.header.update('CDELT1', coords["dx"])
+            hdu.header.update('CDELT2', coords["dy"])
+            # These are the defaults, but will get overwritten if
+            # the caller has specified them
+            hdu.header.update('CTYPE1', "LINEAR")
+            hdu.header.update('CTYPE2', "LINEAR")
+                                    
+        if other_keys is not None:
+            for k,v in other_keys.items():
+                hdu.header.update(k,v)
+
+        hdulist.append(hdu)
+
+    hdulist = pyfits.HDUList(hdulist)
+    hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)                    
 
 def display_in_notebook(image, max_val=None):
     """


https://bitbucket.org/yt_analysis/yt-3.0/commits/824ba0b66e92/
Changeset:   824ba0b66e92
Branch:      yt
User:        xarthisius
Date:        2013-10-02 15:15:39
Summary:     Give meaningful names to answer tests
Affected #:  6 files

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -29,11 +29,11 @@
 
 mue = 1./0.88
 freqs = np.array([30., 90., 240.])
-    
+
 def setup():
     """Test specific setup."""
     from yt.config import ytcfg
-    ytcfg["yt", "__withintesting"] = "True"        
+    ytcfg["yt", "__withintesting"] = "True"
 
 def full_szpack3d(pf, xo):
     data = pf.h.grids[0]
@@ -43,7 +43,7 @@
     Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
     Te = data["Temperature"]/K_per_keV
     betac = data["z-velocity"]/clight
-    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx) 
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
     for i in xrange(nx):
         pbar.update(i)
         for j in xrange(ny):
@@ -67,7 +67,7 @@
     a = 200.
     v0 = 300.*cm_per_km
     ddims = (nx,ny,nz)
-    
+
     x, y, z = np.mgrid[-R:R:nx*1j,
                        -R:R:ny*1j,
                        -R:R:nz*1j]
@@ -87,7 +87,7 @@
     data["z-velocity"] = velz
 
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
-    
+
     L = 2*R*cm_per_kpc
     dl = L/nz
 
@@ -118,9 +118,11 @@
         return szprj.data
     def onaxis_image_func(filename_prefix):
         szprj.write_png(filename_prefix)
-    yield GenericArrayTest(pf, onaxis_array_func)
-    yield GenericImageTest(pf, onaxis_image_func, 3)
-       
+    for test in [GenericArrayTest(pf, onaxis_array_func),
+                 GenericImageTest(pf, onaxis_image_func, 3)]:
+        test_M7_onaxis.__name__ = test.description
+        yield test
+
 @requires_module("SZpack")
 @requires_pf(M7)
 def test_M7_offaxis():
@@ -131,5 +133,7 @@
         return szprj.data
     def offaxis_image_func(filename_prefix):
         szprj.write_png(filename_prefix)
-    yield GenericArrayTest(pf, offaxis_array_func)
-    yield GenericImageTest(pf, offaxis_image_func, 3)
+    for test in [GenericArrayTest(pf, offaxis_array_func),
+                 GenericImageTest(pf, offaxis_image_func, 3)]:
+        test_M7_offaxis.__name__ = test.description
+        yield test

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(gc)
     yield assert_equal, str(pf), "data.0077.3d.hdf5"
     for test in small_patch_amr(gc, _fields):
+        test_gc.__name__ = test.description
         yield test
 
 tb = "TurbBoxLowRes/data.0005.3d.hdf5"
@@ -37,4 +38,5 @@
     pf = data_dir_load(tb)
     yield assert_equal, str(pf), "data.0005.3d.hdf5"
     for test in small_patch_amr(tb, _fields):
+        test_tb.__name__ = test.description
         yield test

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -30,6 +30,7 @@
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
     for test in small_patch_amr(m7, _fields):
+        test_moving7.__name__ = test.description
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@@ -38,4 +39,5 @@
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
+        test_galaxy0030.__name__ = test.description
         yield test

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
     for test in small_patch_amr(sloshing, _fields):
+        test_sloshing.__name__ = test.description
         yield test
 
 _fields_2d = ("Temperature", "Density")
@@ -39,4 +40,5 @@
     pf = data_dir_load(wt)
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
+        test_wind_tunnel.__name__ = test.description
         yield test

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/frontends/orion/tests/test_outputs.py
--- a/yt/frontends/orion/tests/test_outputs.py
+++ b/yt/frontends/orion/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(radadvect)
     yield assert_equal, str(pf), "plt00000"
     for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
         yield test
 
 rt = "RadTube/plt00500"
@@ -37,4 +38,5 @@
     pf = data_dir_load(rt)
     yield assert_equal, str(pf), "plt00500"
     for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
         yield test

diff -r f393c060da9bdc22d6e28937ba87c8d8347b6dfc -r 824ba0b66e9229d070e9323032301e061e57a916 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -94,8 +94,10 @@
     for ax in 'xyz':
         for attr_name in ATTR_ARGS.keys():
             for args in ATTR_ARGS[attr_name]:
-                yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
-                                              args, decimals)
+                test = PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                               args, decimals)
+                test_attributes.__name__ = test.description
+                yield test
 
 
 @requires_pf(WT)


https://bitbucket.org/yt_analysis/yt-3.0/commits/da308f4c50c7/
Changeset:   da308f4c50c7
Branch:      yt
User:        samskillman
Date:        2013-06-30 00:22:39
Summary:     Add a new TransferFunctionHelper object to help build, plot, and manage a
transfer function.  Adds the capability of overplotting a 1D profile (of say,
CellMass) on the transfer function to help guide choices.
Affected #:  2 files

diff -r 8e8bd5b353df99c1510058a7c982329e5212fcfe -r da308f4c50c7b632169cb3e37036a9651c262864 yt/visualization/volume_rendering/transfer_function_helper.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -0,0 +1,216 @@
+"""
+A helper class to build, display, and modify transfer functions for volume
+rendering.
+
+Author: Samuel Skillman <samskillman at gmail.com>
+Affiliation: DOE CSGF, U. of Colorado at Boulder
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.funcs import mylog
+from yt.data_objects.profiles import BinnedProfile1D
+from yt.visualization.volume_rendering.api import ColorTransferFunction
+from yt.visualization._mpl_imports import FigureCanvasAgg
+from matplotlib.figure import Figure
+from IPython.core.display import Image
+import cStringIO
+import numpy as np
+
+
+class TransferFunctionHelper(object):
+
+    profiles = {}
+
+    def __init__(self, pf):
+        r"""A transfer function helper.
+
+        This attempts to help set up a good transfer function by finding
+        bounds, handling linear/log options, and displaying the transfer
+        function combined with 1D profiles of rendering quantity.
+
+        Parameters
+        ----------
+        pf: A StaticOutput instance
+            A static output that is currently being rendered. This is used to
+            help set up data bounds.
+
+        Notes
+        -----
+        """
+        self.pf = pf
+        self.field = None
+        self.log = False
+        self.tf = None
+        self.bounds = None
+        self.grey_opacity = True
+
+    def set_bounds(self, bounds=None):
+        """
+        Set the bounds of the transfer function.
+
+        Parameters
+        ----------
+        bounds: array-like, length 2, optional
+            A length 2 list/array in the form [min, max]. These should be the
+            raw values and not the logarithm of the min and max. If bounds is
+            None, the bounds of the data are calculated from all of the data
+            in the dataset.  This can be slow for very large datasets.
+        """
+        if bounds is None:
+            self.bounds = \
+                self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+
+        # Do some error checking.
+        assert(len(self.bounds) == 2)
+        if self.log:
+            assert(self.bounds[0] > 0.0)
+            assert(self.bounds[1] > 0.0)
+        return
+
+    def set_field(self, field):
+        """
+        Set the field to be rendered
+
+        Parameters
+        ----------
+        field: string
+            The field to be rendered.
+        """
+        self.field = field
+
+    def set_log(self, log):
+        """
+        Set whether or not the transfer function should be in log or linear
+        space. Also modifies the pf.field_info[field].take_log attribute to
+        stay in sync with this setting.
+
+        Parameters
+        ----------
+        log: boolean
+            Sets whether the transfer function should use log or linear space.
+        """
+        self.log = log
+        self.pf.h
+        self.pf.field_info[self.field].take_log = log
+
+    def build_transfer_function(self):
+        """
+        Builds the transfer function according to the current state of the
+        TransferFunctionHelper.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+
+        A ColorTransferFunction object.
+
+        """
+        if self.bounds is None:
+            mylog.info('Calculating data bounds. This may take a while.' +
+                       '  Set the .bounds to avoid this.')
+            self.set_bounds()
+
+        if self.log:
+            mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            mi, ma = self.bounds
+        self.tf = ColorTransferFunction((mi, ma),
+                                        grey_opacity=self.grey_opacity,
+                                        nbins=512)
+        return self.tf
+
+    def plot(self, fn=None, profile_field=None, profile_weight=None):
+        """
+        Save the current transfer function to a bitmap, or display
+        it inline.
+
+        Parameters
+        ----------
+        fn: string, optional
+            Filename to save the image to. If None, the returns an image
+            to an IPython session.
+
+        Returns
+        -------
+
+        If fn is None, will return an image to an IPython notebook.
+
+        """
+        if self.tf is None:
+            self.build_transfer_function()
+        tf = self.tf
+        if self.log:
+            xfunc = np.logspace
+            xmi, xma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            xfunc = np.linspace
+            xmi, xma = self.bounds
+
+        x = xfunc(xmi, xma, tf.nbins)
+        y = tf.funcs[3].y
+        w = np.append(x[1:]-x[:-1], x[-1]-x[-2])
+        colors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,
+                           np.ones_like(x)]).T
+
+        fig = Figure(figsize=[6, 3])
+        canvas = FigureCanvasAgg(fig)
+        ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
+        ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
+               log=True, color=colors)
+
+        if profile_field is not None:
+            try:
+                prof = self.profiles[self.field]
+            except KeyError:
+                self.setup_profile(profile_field, profile_weight)
+                prof = self.profiles[self.field]
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='w', linewidth=3)
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='k')
+
+        ax.set_xscale({True: 'log', False: 'linear'}[self.log])
+        ax.set_xlim(x.min(), x.max())
+        ax.set_xlabel(self.pf.field_info[self.field].get_label())
+        ax.set_ylabel(r'$\mathrm{alpha}$')
+        ax.set_ylim(y.max()*1.0e-3, y.max()*2)
+
+        if fn is None:
+            f = cStringIO.StringIO()
+            canvas.print_figure(f)
+            f.seek(0)
+            img = f.read()
+            return Image(img)
+        else:
+            fig.savefig(fn)
+
+    def setup_profile(self, profile_field=None, profile_weight=None):
+        if profile_field is None:
+            profile_field = 'CellVolume'
+        prof = BinnedProfile1D(self.pf.h.all_data(), 128, self.field,
+                               self.bounds[0], self.bounds[1],
+                               log_space=self.log,
+                               lazy_reader=False, end_collect=False)
+        prof.add_fields([profile_field], fractional=False,
+                        weight=profile_weight)
+        self.profiles[self.field] = prof
+        return

diff -r 8e8bd5b353df99c1510058a7c982329e5212fcfe -r da308f4c50c7b632169cb3e37036a9651c262864 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -641,6 +641,8 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
+        rel0 = max(rel0, 0)
+        rel1 = min(rel1, self.nbins-1)
         tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)


https://bitbucket.org/yt_analysis/yt-3.0/commits/1fb87198e5e9/
Changeset:   1fb87198e5e9
Branch:      yt
User:        samskillman
Date:        2013-06-30 04:10:52
Summary:     Fix in case a profile is then asked for a different field.
Affected #:  1 file

diff -r da308f4c50c7b632169cb3e37036a9651c262864 -r 1fb87198e5e9d7cdf793da6a8150d4b1d2a07092 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -183,6 +183,9 @@
             except KeyError:
                 self.setup_profile(profile_field, profile_weight)
                 prof = self.profiles[self.field]
+            if profile_field not in prof.keys():
+                prof.add_fields([profile_field], fractional=False,
+                                weight=profile_weight)
             ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
                     prof[profile_field].max(), color='w', linewidth=3)
             ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /


https://bitbucket.org/yt_analysis/yt-3.0/commits/a588666af47f/
Changeset:   a588666af47f
Branch:      yt
User:        samskillman
Date:        2013-07-08 22:59:58
Summary:     Fixing up the bounds to be respected, working around a matplotlib bug for
logarithmic bar plots, and initializing the profiles more properly.
Affected #:  1 file

diff -r 1fb87198e5e9d7cdf793da6a8150d4b1d2a07092 -r a588666af47f6e39cd931225df084a8633195e14 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -35,7 +35,7 @@
 
 class TransferFunctionHelper(object):
 
-    profiles = {}
+    profiles = None
 
     def __init__(self, pf):
         r"""A transfer function helper.
@@ -59,6 +59,7 @@
         self.tf = None
         self.bounds = None
         self.grey_opacity = True
+        self.profiles = {}
 
     def set_bounds(self, bounds=None):
         """
@@ -73,8 +74,8 @@
             in the dataset.  This can be slow for very large datasets.
         """
         if bounds is None:
-            self.bounds = \
-                self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+        self.bounds = bounds
 
         # Do some error checking.
         assert(len(self.bounds) == 2)
@@ -175,7 +176,7 @@
         canvas = FigureCanvasAgg(fig)
         ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
         ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
-               log=True, color=colors)
+               log=True, color=colors, bottom=[0])
 
         if profile_field is not None:
             try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/51157128e185/
Changeset:   51157128e185
Branch:      yt
User:        MatthewTurk
Date:        2013-10-02 21:32:13
Summary:     Merged in samskillman/yt (pull request #538)

Transfer Function Helper
Affected #:  1 file

diff -r 824ba0b66e9229d070e9323032301e061e57a916 -r 51157128e18557c82dc00a9f5853b6bcd361e63a yt/visualization/volume_rendering/transfer_function_helper.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -0,0 +1,220 @@
+"""
+A helper class to build, display, and modify transfer functions for volume
+rendering.
+
+Author: Samuel Skillman <samskillman at gmail.com>
+Affiliation: DOE CSGF, U. of Colorado at Boulder
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+from yt.funcs import mylog
+from yt.data_objects.profiles import BinnedProfile1D
+from yt.visualization.volume_rendering.api import ColorTransferFunction
+from yt.visualization._mpl_imports import FigureCanvasAgg
+from matplotlib.figure import Figure
+from IPython.core.display import Image
+import cStringIO
+import numpy as np
+
+
+class TransferFunctionHelper(object):
+
+    profiles = None
+
+    def __init__(self, pf):
+        r"""A transfer function helper.
+
+        This attempts to help set up a good transfer function by finding
+        bounds, handling linear/log options, and displaying the transfer
+        function combined with 1D profiles of rendering quantity.
+
+        Parameters
+        ----------
+        pf: A StaticOutput instance
+            A static output that is currently being rendered. This is used to
+            help set up data bounds.
+
+        Notes
+        -----
+        """
+        self.pf = pf
+        self.field = None
+        self.log = False
+        self.tf = None
+        self.bounds = None
+        self.grey_opacity = True
+        self.profiles = {}
+
+    def set_bounds(self, bounds=None):
+        """
+        Set the bounds of the transfer function.
+
+        Parameters
+        ----------
+        bounds: array-like, length 2, optional
+            A length 2 list/array in the form [min, max]. These should be the
+            raw values and not the logarithm of the min and max. If bounds is
+            None, the bounds of the data are calculated from all of the data
+            in the dataset.  This can be slow for very large datasets.
+        """
+        if bounds is None:
+            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+        self.bounds = bounds
+
+        # Do some error checking.
+        assert(len(self.bounds) == 2)
+        if self.log:
+            assert(self.bounds[0] > 0.0)
+            assert(self.bounds[1] > 0.0)
+        return
+
+    def set_field(self, field):
+        """
+        Set the field to be rendered
+
+        Parameters
+        ----------
+        field: string
+            The field to be rendered.
+        """
+        self.field = field
+
+    def set_log(self, log):
+        """
+        Set whether or not the transfer function should be in log or linear
+        space. Also modifies the pf.field_info[field].take_log attribute to
+        stay in sync with this setting.
+
+        Parameters
+        ----------
+        log: boolean
+            Sets whether the transfer function should use log or linear space.
+        """
+        self.log = log
+        self.pf.h
+        self.pf.field_info[self.field].take_log = log
+
+    def build_transfer_function(self):
+        """
+        Builds the transfer function according to the current state of the
+        TransferFunctionHelper.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+
+        A ColorTransferFunction object.
+
+        """
+        if self.bounds is None:
+            mylog.info('Calculating data bounds. This may take a while.' +
+                       '  Set the .bounds to avoid this.')
+            self.set_bounds()
+
+        if self.log:
+            mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            mi, ma = self.bounds
+        self.tf = ColorTransferFunction((mi, ma),
+                                        grey_opacity=self.grey_opacity,
+                                        nbins=512)
+        return self.tf
+
+    def plot(self, fn=None, profile_field=None, profile_weight=None):
+        """
+        Save the current transfer function to a bitmap, or display
+        it inline.
+
+        Parameters
+        ----------
+        fn: string, optional
+            Filename to save the image to. If None, the returns an image
+            to an IPython session.
+
+        Returns
+        -------
+
+        If fn is None, will return an image to an IPython notebook.
+
+        """
+        if self.tf is None:
+            self.build_transfer_function()
+        tf = self.tf
+        if self.log:
+            xfunc = np.logspace
+            xmi, xma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            xfunc = np.linspace
+            xmi, xma = self.bounds
+
+        x = xfunc(xmi, xma, tf.nbins)
+        y = tf.funcs[3].y
+        w = np.append(x[1:]-x[:-1], x[-1]-x[-2])
+        colors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,
+                           np.ones_like(x)]).T
+
+        fig = Figure(figsize=[6, 3])
+        canvas = FigureCanvasAgg(fig)
+        ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
+        ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
+               log=True, color=colors, bottom=[0])
+
+        if profile_field is not None:
+            try:
+                prof = self.profiles[self.field]
+            except KeyError:
+                self.setup_profile(profile_field, profile_weight)
+                prof = self.profiles[self.field]
+            if profile_field not in prof.keys():
+                prof.add_fields([profile_field], fractional=False,
+                                weight=profile_weight)
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='w', linewidth=3)
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='k')
+
+        ax.set_xscale({True: 'log', False: 'linear'}[self.log])
+        ax.set_xlim(x.min(), x.max())
+        ax.set_xlabel(self.pf.field_info[self.field].get_label())
+        ax.set_ylabel(r'$\mathrm{alpha}$')
+        ax.set_ylim(y.max()*1.0e-3, y.max()*2)
+
+        if fn is None:
+            f = cStringIO.StringIO()
+            canvas.print_figure(f)
+            f.seek(0)
+            img = f.read()
+            return Image(img)
+        else:
+            fig.savefig(fn)
+
+    def setup_profile(self, profile_field=None, profile_weight=None):
+        if profile_field is None:
+            profile_field = 'CellVolume'
+        prof = BinnedProfile1D(self.pf.h.all_data(), 128, self.field,
+                               self.bounds[0], self.bounds[1],
+                               log_space=self.log,
+                               lazy_reader=False, end_collect=False)
+        prof.add_fields([profile_field], fractional=False,
+                        weight=profile_weight)
+        self.profiles[self.field] = prof
+        return


https://bitbucket.org/yt_analysis/yt-3.0/commits/736abff6337f/
Changeset:   736abff6337f
Branch:      yt
User:        ngoldbaum
Date:        2013-10-03 00:13:15
Summary:     Updating TransferFunctionHelper's license blurb.
Affected #:  1 file

diff -r 51157128e18557c82dc00a9f5853b6bcd361e63a -r 736abff6337f5c1c037638c1ccfac2b6c4ef3508 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -2,27 +2,18 @@
 A helper class to build, display, and modify transfer functions for volume
 rendering.
 
-Author: Samuel Skillman <samskillman at gmail.com>
-Affiliation: DOE CSGF, U. of Colorado at Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
 from yt.funcs import mylog
 from yt.data_objects.profiles import BinnedProfile1D
 from yt.visualization.volume_rendering.api import ColorTransferFunction


https://bitbucket.org/yt_analysis/yt-3.0/commits/4628e092abdc/
Changeset:   4628e092abdc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 15:00:04
Summary:     Merging from yt-2.x
Affected #:  25 files

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
     TwoPointFunctions, \
     FcnSet
 
+from .sunyaev_zeldovich.api import SZProjection
+
 from .radmc3d_export.api import \
     RadMC3DWriter
+

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunyaev_zeldovich")    
     return config

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,349 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+        
+try:
+    import SZpack
+except:
+    raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+    return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+    return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+    r""" Initialize a SZProjection object.
+
+    Parameters
+    ----------
+    pf : parameter_file
+        The parameter file.
+    freqs : array_like
+        The frequencies (in GHz) at which to compute the SZ spectral distortion.
+    mue : float, optional
+        Mean molecular weight for determining the electron number density.
+    high_order : boolean, optional
+        Should we calculate high-order moments of velocity and temperature?
+
+    Examples
+    --------
+    >>> freqs = [90., 180., 240.]
+    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    """
+    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+            
+        self.pf = pf
+        self.num_freqs = len(freqs)
+        self.high_order = high_order
+        self.freqs = np.array(freqs)
+        self.mueinv = 1./mue
+        self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+        self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+        self.data = {}
+
+        self.units = {}
+        self.units["TeSZ"] = r"$\mathrm{keV}$"
+        self.units["Tau"] = None
+
+        self.display_names = {}
+        self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+        self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+        for f, field in zip(self.freqs, self.freq_fields):
+            self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+            
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an on-axis projection of the SZ signal.
+
+        Parameters
+        ----------
+        axis : integer or string
+            The axis of the simulation domain along which to make the SZprojection.
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+
+        Examples
+        --------
+        >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+        """
+        axis = fix_axis(axis)
+
+        def _beta_par(field, data):
+            axis = data.get_field_parameter("axis")
+            vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)    
+
+        proj = self.pf.h.proj(axis, "Density", source=source)
+        proj.set_field_parameter("axis", axis)
+        frb = proj.to_frb(width, nx)
+        dens = frb["Density"]
+        Te = frb["TeSZ"]/dens
+        bpar = frb["BetaPar"]/dens
+        omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        bperp2 = np.zeros((nx,nx))
+        sigma1 = np.zeros((nx,nx))
+        kappa1 = np.zeros((nx,nx))                                    
+        if self.high_order:
+            bperp2 = frb["BetaPerpSquared"]/dens
+            sigma1 = frb["TBetaPar"]/dens/Te - bpar
+            kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        nx,ny = frb.buff_size
+        self.bounds = frb.bounds
+        self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+        self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+        self.nx = nx
+        
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+                                                                                                                
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an off-axis projection of the SZ signal.
+        
+        Parameters
+        ----------
+        L : array_like
+            The normal vector of the projection. 
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+            Currently unsupported in yt 2.x.
+                    
+        Examples
+        --------
+        >>> L = np.array([0.5, 1.0, 0.75])
+        >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+        """
+        if iterable(width):
+            w = width[0]/self.pf.units[width[1]]
+        else:
+            w = width
+        if center == "c":
+            ctr = self.pf.domain_center
+        elif center == "max":
+            ctr = self.pf.h.find_max("Density")
+        else:
+            ctr = center
+
+        if source is not None:
+            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+            raise NotImplementedError
+                
+        def _beta_par(field, data):
+            vpar = data["Density"]*(data["x-velocity"]*L[0]+
+                                    data["y-velocity"]*L[1]+
+                                    data["z-velocity"]*L[2])
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)
+
+        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+        omega1  = omega1/(Te*Te) - 1.
+        if self.high_order:
+            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+            sigma1  = sigma1/Te - bpar
+            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+            kappa1 -= bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+        self.dx = w/nx
+        self.dy = w/nx
+        self.nx = nx
+
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+        # Bad hack, but we get NaNs if we don't do something like this
+        small_beta = np.abs(bpar) < 1.0e-20
+        bpar[small_beta] = 1.0e-20
+                                                                   
+        comm = communication_system.communicators[-1]
+
+        nx, ny = self.nx,self.nx
+        signal = np.zeros((self.num_freqs,nx,ny))
+        xo = np.zeros((self.num_freqs))
+        
+        k = int(0)
+
+        start_i = comm.rank*nx/comm.size
+        end_i = (comm.rank+1)*nx/comm.size
+                        
+        pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+        for i in xrange(start_i, end_i):
+            for j in xrange(ny):
+                xo[:] = self.xinit[:]
+                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                           bpar[i,j], omega1[i,j],
+                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
+                signal[:,i,j] = xo[:]
+                pbar.update(k)
+                k += 1
+
+        signal = comm.mpi_allreduce(signal)
+        
+        pbar.finish()
+                
+        for i, field in enumerate(self.freq_fields):
+            self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.data["Tau"] = ImageArray(tau)
+        self.data["TeSZ"] = ImageArray(Te)
+
+    @parallel_root_only
+    def write_fits(self, filename_prefix, clobber=True):
+        r""" Export images to a FITS file. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc.  
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the FITS filename.
+        clobber : boolean, optional
+            If the file already exists, do we overwrite?
+                    
+        Examples
+        --------
+        >>> szprj.write_fits("SZbullet", clobber=False)
+        """
+        coords = {}
+        coords["dx"] = self.dx*self.pf.units["kpc"]
+        coords["dy"] = self.dy*self.pf.units["kpc"]
+        coords["xctr"] = 0.0
+        coords["yctr"] = 0.0
+        coords["units"] = "kpc"
+        other_keys = {"Time" : self.pf.current_time}
+        write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=other_keys)
+
+    @parallel_root_only
+    def write_png(self, filename_prefix):
+        r""" Export images to PNG files. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc. 
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the image filenames.
+                
+        Examples
+        --------
+        >>> szprj.write_png("SZsloshing")
+        """     
+        extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+        for field, image in self.items():
+            filename=filename_prefix+"_"+field+".png"
+            label = self.display_names[field]
+            if self.units[field] is not None:
+                label += " ("+self.units[field]+")"
+            write_projection(image, filename, colorbar_label=label, take_log=False,
+                             extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+                             ylabel=r"$\mathrm{y\ (kpc)}$")
+
+    @parallel_root_only
+    def write_hdf5(self, filename):
+        r"""Export the set of S-Z fields to a set of HDF5 datasets.
+        
+        Parameters
+        ----------
+        filename : string
+            This file will be opened in "write" mode.
+        
+        Examples
+        --------
+        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        """
+        import h5py
+        f = h5py.File(filename, "w")
+        for field, data in self.items():
+            f.create_dataset(field,data=data)
+        f.close()
+   
+    def keys(self):
+        return self.data.keys()
+
+    def items(self):
+        return self.data.items()
+
+    def values(self):
+        return self.data.values()
+    
+    def has_key(self, key):
+        return key in self.data.keys()
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    @property
+    def shape(self):
+        return (self.nx,self.nx)

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+    config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,139 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+     GenericArrayTest, data_dir_load, GenericImageTest
+try:
+    from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+    pass
+import numpy as np
+try:
+    import SZpack
+except ImportError:
+    pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+def full_szpack3d(pf, xo):
+    data = pf.h.grids[0]
+    dz = pf.h.get_smallest_dx()*pf.units["cm"]
+    nx,ny,nz = data["Density"].shape
+    dn = np.zeros((nx,ny,nz))
+    Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+    Te = data["Temperature"]/K_per_keV
+    betac = data["z-velocity"]/clight
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
+    for i in xrange(nx):
+        pbar.update(i)
+        for j in xrange(ny):
+            for k in xrange(nz):
+                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+                                              Te[i,j,k], betac[i,j,k],
+                                              1.0, 0.0, 0.0, 1.0e-5)
+    pbar.finish()
+    return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+    R = 1000.
+    r_c = 100.
+    rho_c = 1.673e-26
+    beta = 1.
+    T0 = 4.
+    nx,ny,nz = 16,16,16
+    c = 0.17
+    a_c = 30.
+    a = 200.
+    v0 = 300.*cm_per_km
+    ddims = (nx,ny,nz)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:ny*1j,
+                       -R:R:nz*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+    velz = v0*temp/(T0*K_per_keV)
+
+    data = {}
+    data["Density"] = dens
+    data["Temperature"] = temp
+    data["x-velocity"] = np.zeros(ddims)
+    data["y-velocity"] = np.zeros(ddims)
+    data["z-velocity"] = velz
+
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+    L = 2*R*cm_per_kpc
+    dl = L/nz
+
+    pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+    return pf
+
+ at requires_module("SZpack")
+def test_projection():
+    pf = setup_cluster()
+    nx,ny,nz = pf.domain_dimensions
+    xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+    szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+    szprj.on_axis(2, nx=nx)
+    deltaI = np.zeros((3,nx,ny))
+    for i in xrange(3):
+        deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+        yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.on_axis(2, nx=100)
+    def onaxis_array_func():
+        return szprj.data
+    def onaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, onaxis_array_func),
+                 GenericImageTest(pf, onaxis_image_func, 3)]:
+        test_M7_onaxis.__name__ = test.description
+        yield test
+
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+    def offaxis_array_func():
+        return szprj.data
+    def offaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, offaxis_array_func),
+                 GenericImageTest(pf, offaxis_image_func, 3)]:
+        test_M7_offaxis.__name__ = test.description
+        yield test

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -498,7 +498,7 @@
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
             fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
-            find_many_nn_nearest_neighbors()
+            fKD.find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
         return n

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(gc)
     yield assert_equal, str(pf), "data.0077.3d.hdf5"
     for test in small_patch_amr(gc, _fields):
+        test_gc.__name__ = test.description
         yield test
 
 tb = "TurbBoxLowRes/data.0005.3d.hdf5"
@@ -37,4 +38,5 @@
     pf = data_dir_load(tb)
     yield assert_equal, str(pf), "data.0005.3d.hdf5"
     for test in small_patch_amr(tb, _fields):
+        test_tb.__name__ = test.description
         yield test

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
     for test in small_patch_amr(m7, _fields):
+        test_moving7.__name__ = test.description
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@@ -37,4 +38,5 @@
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
+        test_galaxy0030.__name__ = test.description
         yield test

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
     for test in small_patch_amr(sloshing, _fields):
+        test_sloshing.__name__ = test.description
         yield test
 
 _fields_2d = ("Temperature", "Density")
@@ -39,4 +40,5 @@
     pf = data_dir_load(wt)
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
+        test_wind_tunnel.__name__ = test.description
         yield test

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/frontends/orion/tests/test_outputs.py
--- a/yt/frontends/orion/tests/test_outputs.py
+++ b/yt/frontends/orion/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(radadvect)
     yield assert_equal, str(pf), "plt00000"
     for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
         yield test
 
 rt = "RadTube/plt00500"
@@ -37,4 +38,5 @@
     pf = data_dir_load(rt)
     yield assert_equal, str(pf), "plt00500"
     for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
         yield test

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/gui/reason/extdirect_router.py
--- a/yt/gui/reason/extdirect_router.py
+++ b/yt/gui/reason/extdirect_router.py
@@ -9,6 +9,13 @@
 This code was released under the BSD License.
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import inspect
 
 class DirectException(Exception):
@@ -186,12 +193,4 @@
 
 
 
-"""
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -14,6 +14,7 @@
 
 import itertools as it
 import numpy as np
+import importlib
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
@@ -252,3 +253,23 @@
                     list_of_kwarg_dicts[i][key] = keywords[key][0]
 
     return list_of_kwarg_dicts
+
+def requires_module(module):
+    """
+    Decorator that takes a module name as an argument and tries to import it.
+    If the module imports without issue, the function is returned, but if not, 
+    a null function is returned. This is so tests that depend on certain modules
+    being imported will not fail if the module is not installed on the testing
+    platform.
+    """
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    try:
+        importlib.import_module(module)
+    except ImportError:
+        return ffalse
+    else:
+        return ftrue
+    

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -24,6 +24,7 @@
 import shelve
 import zlib
 import tempfile
+import glob
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -584,6 +585,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def compare_image_lists(new_result, old_result, decimals):
+    fns = ['old.png', 'new.png']
+    num_images = len(old_result)
+    assert(num_images > 0)
+    for i in xrange(num_images):
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
+        assert compare_images(fns[0], fns[1], 10**(decimals)) == None
+        for fn in fns: os.remove(fn)
+            
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
@@ -611,11 +622,71 @@
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):
-        fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
-        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
-        assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
-        for fn in fns: os.remove(fn)
+        compare_image_lists(new_result, old_result, self.decimals)
+
+class GenericArrayTest(AnswerTestingTest):
+    _type_name = "GenericArray"
+    _attrs = ('array_func_name','args','kwargs')
+    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
+        super(GenericArrayTest, self).__init__(pf_fn)
+        self.array_func = array_func
+        self.array_func_name = array_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        return self.array_func(*args, **kwargs)
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
+
+class GenericImageTest(AnswerTestingTest):
+    _type_name = "GenericImage"
+    _attrs = ('image_func_name','args','kwargs')
+    def __init__(self, pf_fn, image_func, decimals, args=None, kwargs=None):
+        super(GenericImageTest, self).__init__(pf_fn)
+        self.image_func = image_func
+        self.image_func_name = image_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        comp_imgs = []
+        tmpdir = tempfile.mkdtemp()
+        image_prefix = os.path.join(tmpdir,"test_img")
+        self.image_func(image_prefix, *args, **kwargs)
+        imgs = glob.glob(image_prefix+"*")
+        assert(len(imgs) > 0)
+        for img in imgs:
+            img_data = mpimg.imread(img)
+            os.remove(img)
+            comp_imgs.append(zlib.compress(img_data.dumps()))
+        return comp_imgs
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 
 def requires_pf(pf_fn, big_data = False, file_check = False):
     def ffalse(func):

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -84,6 +84,7 @@
 erg_per_keV = erg_per_eV * 1.0e3
 K_per_keV = erg_per_keV / boltzmann_constant_cgs
 keV_per_K = 1.0 / K_per_keV
+Tcmb = 2.726 # Current CMB temperature
 
 #Short cuts
 G = gravitational_constant_cgs

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,6 +19,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
@@ -269,8 +270,8 @@
             output.create_dataset(field,data=self[field])
         output.close()
 
-    def export_fits(self, filename_prefix, fields = None, clobber=False,
-                    other_keys=None, gzip_file=False, units="1"):
+    def export_fits(self, filename_prefix, fields=None, clobber=False,
+                    other_keys=None, units="cm", sky_center=(0.0,0.0), D_A=None):
 
         """
         This will export a set of FITS images of either the fields specified
@@ -279,106 +280,80 @@
         existing FITS file.
 
         This requires the *pyfits* module, which is a standalone module
-        provided by STSci to interface with FITS-format files.
+        provided by STSci to interface with FITS-format files, and is also
+        part of AstroPy.
         """
         r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filename is the
-        the specified prefix.
+        or all the fields already in the object.
 
         Parameters
         ----------
         filename_prefix : string
-            This prefix will be prepended to every FITS file name.
+            This prefix will be prepended to the FITS file name.
         fields : list of strings
             These fields will be pixelized and output.
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
         other_keys : dictionary, optional
             A set of header keys and values to write into the FITS header.
-        gzip_file : boolean, optional
-            gzip the file after writing, default False
         units : string, optional
-            the length units that the coordinates are written in, default '1'
+            the length units that the coordinates are written in, default 'cm'
+            If units are set to "deg" then assume that sky coordinates are
+            requested.
+        sky_center : array_like, optional
+            Center of the image in (ra,dec) in degrees if sky coordinates
+            (units="deg") are requested.
+        D_A : float or tuple, optional
+            Angular diameter distance, given in code units as a float or
+            a tuple containing the value and the length unit. Required if
+            using sky coordinates.                                                                                            
         """
-        
-        import pyfits
-        from os import system
-        
+
+        if units == "deg" and D_A is None:
+            mylog.error("Sky coordinates require an angular diameter distance. Please specify D_A.")    
+        if iterable(D_A):
+            dist = D_A[0]/self.pf.units[D_A[1]]
+        else:
+            dist = D_A
+
+        if other_keys is None:
+            hdu_keys = {}
+        else:
+            hdu_keys = other_keys
+            
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
-        if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
 
+        coords = {}
         nx, ny = self.buff_size
-        dx = (self.bounds[1]-self.bounds[0])/nx*self.pf[units]
-        dy = (self.bounds[3]-self.bounds[2])/ny*self.pf[units]
-        xmin = self.bounds[0]*self.pf[units]
-        ymin = self.bounds[2]*self.pf[units]
-        simtime = self.pf.current_time
+        dx = (self.bounds[1]-self.bounds[0])/nx
+        dy = (self.bounds[3]-self.bounds[2])/ny
+        if units == "deg":  
+            coords["dx"] = -np.rad2deg(dx/dist)
+            coords["dy"] = np.rad2deg(dy/dist)
+            coords["xctr"] = sky_center[0]
+            coords["yctr"] = sky_center[1]
+            hdu_keys["MTYPE1"] = "EQPOS"
+            hdu_keys["MFORM1"] = "RA,DEC"
+            hdu_keys["CTYPE1"] = "RA---TAN"
+            hdu_keys["CTYPE2"] = "DEC--TAN"
+        else:
+            coords["dx"] = dx*self.pf.units[units]
+            coords["dy"] = dy*self.pf.units[units]
+            coords["xctr"] = 0.5*(self.bounds[0]+self.bounds[1])*self.pf.units[units]
+            coords["yctr"] = 0.5*(self.bounds[2]+self.bounds[3])*self.pf.units[units]
+        coords["units"] = units
+        
+        hdu_keys["Time"] = self.pf.current_time
 
-        hdus = []
-
-        first = True
-        
-        for field in fields:
-
-            if (first) :
-                hdu = pyfits.PrimaryHDU(self[field])
-                first = False
-            else :
-                hdu = pyfits.ImageHDU(self[field])
+        data = dict([(field,self[field]) for field in fields])
+        write_fits(data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=hdu_keys)
                 
-            if self.data_source.has_key('weight_field'):
-                weightname = self.data_source._weight
-                if weightname is None: weightname = 'None'
-                field = field +'_'+weightname
-
-            hdu.header.update("Field", field)
-            hdu.header.update("Time", simtime)
-
-            hdu.header.update('WCSNAMEP', "PHYSICAL")            
-            hdu.header.update('CTYPE1P', "LINEAR")
-            hdu.header.update('CTYPE2P', "LINEAR")
-            hdu.header.update('CRPIX1P', 0.5)
-            hdu.header.update('CRPIX2P', 0.5)
-            hdu.header.update('CRVAL1P', xmin)
-            hdu.header.update('CRVAL2P', ymin)
-            hdu.header.update('CDELT1P', dx)
-            hdu.header.update('CDELT2P', dy)
-                    
-            hdu.header.update('CTYPE1', "LINEAR")
-            hdu.header.update('CTYPE2', "LINEAR")                                
-            hdu.header.update('CUNIT1', units)
-            hdu.header.update('CUNIT2', units)
-            hdu.header.update('CRPIX1', 0.5)
-            hdu.header.update('CRPIX2', 0.5)
-            hdu.header.update('CRVAL1', xmin)
-            hdu.header.update('CRVAL2', ymin)
-            hdu.header.update('CDELT1', dx)
-            hdu.header.update('CDELT2', dy)
-
-            if (other_keys is not None) :
-
-                for k,v in other_keys.items() :
-
-                    hdu.header.update(k,v)
-
-            hdus.append(hdu)
-
-            del hdu
-            
-        hdulist = pyfits.HDUList(hdus)
-
-        hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)
-        
-        if (gzip_file) :
-            clob = ""
-            if (clobber) : clob = "-f"
-            system("gzip "+clob+" %s.fits" % (filename_prefix))
-        
     def open_in_ds9(self, field, take_log=True):
         """
         This will open a given field in the DS9 viewer.

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -333,7 +333,8 @@
 
 def write_projection(data, filename, colorbar=True, colorbar_label=None, 
                      title=None, limits=None, take_log=True, figsize=(8,6),
-                     dpi=100, cmap_name='algae'):
+                     dpi=100, cmap_name='algae', extent=None, xlabel=None,
+                     ylabel=None):
     r"""Write a projection or volume rendering to disk with a variety of 
     pretty parameters such as limits, title, colorbar, etc.  write_projection
     uses the standard matplotlib interface to create the figure.  N.B. This code
@@ -392,16 +393,22 @@
     # Create the figure and paint the data on
     fig = matplotlib.figure.Figure(figsize=figsize)
     ax = fig.add_subplot(111)
-    fig.tight_layout()
-
-    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm, cmap=cmap_name)
+    
+    cax = ax.imshow(data, vmin=limits[0], vmax=limits[1], norm=norm,
+                    extent=extent, cmap=cmap_name)
     
     if title:
         ax.set_title(title)
 
+    if xlabel:
+        ax.set_xlabel(xlabel)
+    if ylabel:
+        ax.set_ylabel(ylabel)
+
     # Suppress the x and y pixel counts
-    ax.set_xticks(())
-    ax.set_yticks(())
+    if extent is None:
+        ax.set_xticks(())
+        ax.set_yticks(())
 
     # Add a color bar and label if requested
     if colorbar:
@@ -409,6 +416,8 @@
         if colorbar_label:
             cbar.ax.set_ylabel(colorbar_label)
 
+    fig.tight_layout()
+        
     suffix = get_image_suffix(filename)
 
     if suffix == '':
@@ -429,70 +438,89 @@
     return filename
 
 
-def write_fits(image, filename_prefix, clobber=True, coords=None, gzip_file=False) :
+def write_fits(image, filename_prefix, clobber=True, coords=None,
+               other_keys=None):
     """
     This will export a FITS image of a floating point array. The output filename is
     *filename_prefix*. If clobber is set to True, this will overwrite any existing
     FITS file.
     
     This requires the *pyfits* module, which is a standalone module
-    provided by STSci to interface with FITS-format files.
+    provided by STSci to interface with FITS-format files, and is also part of
+    AstroPy.
     """
-    r"""Write out a floating point array directly to a FITS file, optionally
-    adding coordinates. 
+    r"""Write out floating point arrays directly to a FITS file, optionally
+    adding coordinates and header keywords.
         
     Parameters
     ----------
-    image : array_like
-        This is an (unscaled) array of floating point values, shape (N,N,) to save
-        in a FITS file.
+    image : array_like, or dict of array_like objects
+        This is either an (unscaled) array of floating point values, or a dict of
+        such arrays, shape (N,N,) to save in a FITS file. 
     filename_prefix : string
         This prefix will be prepended to every FITS file name.
     clobber : boolean
         If the file exists, this governs whether we will overwrite.
     coords : dictionary, optional
         A set of header keys and values to write to the FITS header to set up
-        a coordinate system. 
-    gzip_file : boolean, optional
-        gzip the file after writing, default False
+        a coordinate system, which is assumed to be linear unless specified otherwise
+        in *other_keys*
+        "units": the length units
+        "xctr","yctr": the center of the image
+        "dx","dy": the pixel width in each direction                                                
+    other_keys : dictionary, optional
+        A set of header keys and values to write into the FITS header.    
     """
+
+    try:
+        import pyfits
+    except ImportError:
+        try:
+            import astropy.io.fits as pyfits
+        except:
+            raise ImportError("You don't have pyFITS or AstroPy installed.")
     
-    import pyfits
     from os import system
     
-    if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
-    
-    hdu = pyfits.PrimaryHDU(image)
+    try:
+        image.keys()
+        image_dict = image
+    except:
+        image_dict = dict(yt_data=image)
 
-    if (coords is not None) :
+    hdulist = [pyfits.PrimaryHDU()]
 
-        hdu.header.update('WCSNAMEP', "PHYSICAL")
-        hdu.header.update('CTYPE1P', "LINEAR")
-        hdu.header.update('CTYPE2P', "LINEAR")
-        hdu.header.update('CRPIX1P', 0.5)
-        hdu.header.update('CRPIX2P', 0.5)
-        hdu.header.update('CRVAL1P', coords["xmin"])
-        hdu.header.update('CRVAL2P', coords["ymin"])
-        hdu.header.update('CDELT1P', coords["dx"])
-        hdu.header.update('CDELT2P', coords["dy"])
+    for key in image_dict.keys():
+
+        mylog.info("Writing image block \"%s\"" % (key))
+        hdu = pyfits.ImageHDU(image_dict[key])
+        hdu.update_ext_name(key)
         
-        hdu.header.update('CTYPE1', "LINEAR")
-        hdu.header.update('CTYPE2', "LINEAR")
-        hdu.header.update('CUNIT1', coords["units"])
-        hdu.header.update('CUNIT2', coords["units"])
-        hdu.header.update('CRPIX1', 0.5)
-        hdu.header.update('CRPIX2', 0.5)
-        hdu.header.update('CRVAL1', coords["xmin"])
-        hdu.header.update('CRVAL2', coords["ymin"])
-        hdu.header.update('CDELT1', coords["dx"])
-        hdu.header.update('CDELT2', coords["dy"])
+        if coords is not None:
 
-    hdu.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+            nx, ny = image_dict[key].shape
 
-    if (gzip_file) :
-        clob = ""
-        if (clobber) : clob="-f"
-        system("gzip "+clob+" %s.fits" % (filename_prefix))
+            hdu.header.update('CUNIT1', coords["units"])
+            hdu.header.update('CUNIT2', coords["units"])
+            hdu.header.update('CRPIX1', 0.5*(nx+1))
+            hdu.header.update('CRPIX2', 0.5*(ny+1))
+            hdu.header.update('CRVAL1', coords["xctr"])
+            hdu.header.update('CRVAL2', coords["yctr"])
+            hdu.header.update('CDELT1', coords["dx"])
+            hdu.header.update('CDELT2', coords["dy"])
+            # These are the defaults, but will get overwritten if
+            # the caller has specified them
+            hdu.header.update('CTYPE1', "LINEAR")
+            hdu.header.update('CTYPE2', "LINEAR")
+                                    
+        if other_keys is not None:
+            for k,v in other_keys.items():
+                hdu.header.update(k,v)
+
+        hdulist.append(hdu)
+
+    hdulist = pyfits.HDUList(hdulist)
+    hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)                    
 
 def display_in_notebook(image, max_val=None):
     """

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -94,8 +94,10 @@
     for ax in 'xyz':
         for attr_name in ATTR_ARGS.keys():
             for args in ATTR_ARGS[attr_name]:
-                yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
-                                              args, decimals)
+                test = PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                               args, decimals)
+                test_attributes.__name__ = test.description
+                yield test
 
 
 @requires_pf(WT)

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -72,6 +72,9 @@
         cubical, but if not, it is left/right, top/bottom, front/back.
     resolution : int or list of ints
         The number of pixels in each direction.
+    transfer_function : `yt.visualization.volume_rendering.TransferFunction`
+        The transfer function used to map values to colors in an image.  If
+        not specified, defaults to a ProjectionTransferFunction.
     north_vector : array_like, optional
         The 'up' direction for the plane of rays.  If not specific, calculated
         automatically.
@@ -152,7 +155,7 @@
     _tf_figure = None
     _render_figure = None
     def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
+                 resolution, transfer_function = None,
                  north_vector = None, steady_north=False,
                  volume = None, fields = None,
                  log_fields = None,
@@ -1425,7 +1428,7 @@
 
 class MosaicCamera(Camera):
     def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
+                 resolution, transfer_function = None,
                  north_vector = None, steady_north=False,
                  volume = None, fields = None,
                  log_fields = None,

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -35,6 +35,14 @@
 I hope this helps,
   Almar
 """
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 from yt.mods import *
 from yt.funcs import *
 
@@ -300,14 +308,3 @@
     ax.Draw()
 
     return mtex, ax
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 73eceed0e23d36d8e56a0066d15b2be873c704a2 -r 4628e092abdca5ac1b29449e093cbe3887d26c1e yt/visualization/volume_rendering/transfer_function_helper.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -0,0 +1,211 @@
+"""
+A helper class to build, display, and modify transfer functions for volume
+rendering.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import mylog
+from yt.data_objects.profiles import BinnedProfile1D
+from yt.visualization.volume_rendering.api import ColorTransferFunction
+from yt.visualization._mpl_imports import FigureCanvasAgg
+from matplotlib.figure import Figure
+from IPython.core.display import Image
+import cStringIO
+import numpy as np
+
+
+class TransferFunctionHelper(object):
+
+    profiles = None
+
+    def __init__(self, pf):
+        r"""A transfer function helper.
+
+        This attempts to help set up a good transfer function by finding
+        bounds, handling linear/log options, and displaying the transfer
+        function combined with 1D profiles of rendering quantity.
+
+        Parameters
+        ----------
+        pf: A StaticOutput instance
+            A static output that is currently being rendered. This is used to
+            help set up data bounds.
+
+        Notes
+        -----
+        """
+        self.pf = pf
+        self.field = None
+        self.log = False
+        self.tf = None
+        self.bounds = None
+        self.grey_opacity = True
+        self.profiles = {}
+
+    def set_bounds(self, bounds=None):
+        """
+        Set the bounds of the transfer function.
+
+        Parameters
+        ----------
+        bounds: array-like, length 2, optional
+            A length 2 list/array in the form [min, max]. These should be the
+            raw values and not the logarithm of the min and max. If bounds is
+            None, the bounds of the data are calculated from all of the data
+            in the dataset.  This can be slow for very large datasets.
+        """
+        if bounds is None:
+            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+        self.bounds = bounds
+
+        # Do some error checking.
+        assert(len(self.bounds) == 2)
+        if self.log:
+            assert(self.bounds[0] > 0.0)
+            assert(self.bounds[1] > 0.0)
+        return
+
+    def set_field(self, field):
+        """
+        Set the field to be rendered
+
+        Parameters
+        ----------
+        field: string
+            The field to be rendered.
+        """
+        self.field = field
+
+    def set_log(self, log):
+        """
+        Set whether or not the transfer function should be in log or linear
+        space. Also modifies the pf.field_info[field].take_log attribute to
+        stay in sync with this setting.
+
+        Parameters
+        ----------
+        log: boolean
+            Sets whether the transfer function should use log or linear space.
+        """
+        self.log = log
+        self.pf.h
+        self.pf.field_info[self.field].take_log = log
+
+    def build_transfer_function(self):
+        """
+        Builds the transfer function according to the current state of the
+        TransferFunctionHelper.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+
+        A ColorTransferFunction object.
+
+        """
+        if self.bounds is None:
+            mylog.info('Calculating data bounds. This may take a while.' +
+                       '  Set the .bounds to avoid this.')
+            self.set_bounds()
+
+        if self.log:
+            mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            mi, ma = self.bounds
+        self.tf = ColorTransferFunction((mi, ma),
+                                        grey_opacity=self.grey_opacity,
+                                        nbins=512)
+        return self.tf
+
+    def plot(self, fn=None, profile_field=None, profile_weight=None):
+        """
+        Save the current transfer function to a bitmap, or display
+        it inline.
+
+        Parameters
+        ----------
+        fn: string, optional
+            Filename to save the image to. If None, the returns an image
+            to an IPython session.
+
+        Returns
+        -------
+
+        If fn is None, will return an image to an IPython notebook.
+
+        """
+        if self.tf is None:
+            self.build_transfer_function()
+        tf = self.tf
+        if self.log:
+            xfunc = np.logspace
+            xmi, xma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            xfunc = np.linspace
+            xmi, xma = self.bounds
+
+        x = xfunc(xmi, xma, tf.nbins)
+        y = tf.funcs[3].y
+        w = np.append(x[1:]-x[:-1], x[-1]-x[-2])
+        colors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,
+                           np.ones_like(x)]).T
+
+        fig = Figure(figsize=[6, 3])
+        canvas = FigureCanvasAgg(fig)
+        ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
+        ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
+               log=True, color=colors, bottom=[0])
+
+        if profile_field is not None:
+            try:
+                prof = self.profiles[self.field]
+            except KeyError:
+                self.setup_profile(profile_field, profile_weight)
+                prof = self.profiles[self.field]
+            if profile_field not in prof.keys():
+                prof.add_fields([profile_field], fractional=False,
+                                weight=profile_weight)
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='w', linewidth=3)
+            ax.plot(prof[self.field], prof[profile_field]*tf.funcs[3].y.max() /
+                    prof[profile_field].max(), color='k')
+
+        ax.set_xscale({True: 'log', False: 'linear'}[self.log])
+        ax.set_xlim(x.min(), x.max())
+        ax.set_xlabel(self.pf.field_info[self.field].get_label())
+        ax.set_ylabel(r'$\mathrm{alpha}$')
+        ax.set_ylim(y.max()*1.0e-3, y.max()*2)
+
+        if fn is None:
+            f = cStringIO.StringIO()
+            canvas.print_figure(f)
+            f.seek(0)
+            img = f.read()
+            return Image(img)
+        else:
+            fig.savefig(fn)
+
+    def setup_profile(self, profile_field=None, profile_weight=None):
+        if profile_field is None:
+            profile_field = 'CellVolume'
+        prof = BinnedProfile1D(self.pf.h.all_data(), 128, self.field,
+                               self.bounds[0], self.bounds[1],
+                               log_space=self.log,
+                               lazy_reader=False, end_collect=False)
+        prof.add_fields([profile_field], fractional=False,
+                        weight=profile_weight)
+        self.profiles[self.field] = prof
+        return


https://bitbucket.org/yt_analysis/yt-3.0/commits/46521c80a5bd/
Changeset:   46521c80a5bd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 15:29:40
Summary:     Refactoring create_obj and upping gold standard name.
Affected #:  6 files

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold310',
+    gold_standard_filename = 'gold311',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -39,7 +40,7 @@
                         sizmbhloz, axis, field, weight_field,
                         ds)
             yield FieldValuesTest(sizmbhloz, field, ds)
-        if ds is None: ds = pf.h.all_data()
-        s1 = ds["Ones"].sum()
-        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -39,7 +40,7 @@
                         output_00080, axis, field, weight_field,
                         ds)
             yield FieldValuesTest(output_00080, field, ds)
-        if ds is None: ds = pf.h.all_data()
-        s1 = ds["Ones"].sum()
-        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -21,7 +21,8 @@
     big_patch_amr, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.sph.api import OWLSStaticOutput
 
 _fields = (("deposit", "all_density"), ("deposit", "all_count"),
@@ -48,8 +49,7 @@
                         os33, axis, field, weight_field,
                         ds)
             yield FieldValuesTest(os33, field, ds)
-        if ds is None: ds = pf.h.all_data()
-        s1 = ds["Ones"].sum()
-        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
-

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -21,7 +21,8 @@
     big_patch_amr, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.sph.api import TipsyStaticOutput
 
 _fields = (("deposit", "all_density"),
@@ -57,9 +58,9 @@
                         pf, axis, field, weight_field,
                         ds)
             yield FieldValuesTest(pf, field, ds)
-        if ds is None: ds = pf.h.all_data()
-        s1 = ds["Ones"].sum()
-        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
 
 gasoline = "agora_1e11.00400/agora_1e11.00400"
@@ -88,7 +89,7 @@
                         pf, axis, field, weight_field,
                         ds)
             yield FieldValuesTest(pf, field, ds)
-        if ds is None: ds = pf.h.all_data()
-        s1 = ds["Ones"].sum()
-        s2 = sum(mask.sum() for block, mask in ds.blocks)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2

diff -r 4628e092abdca5ac1b29449e093cbe3887d26c1e -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -321,15 +321,6 @@
     def compare(self, new_result, old_result):
         raise RuntimeError
 
-    def create_obj(self, pf, obj_type):
-        # obj_type should be tuple of
-        #  ( obj_name, ( args ) )
-        if obj_type is None:
-            return pf.h.all_data()
-        cls = getattr(pf.h, obj_type[0])
-        obj = cls(*obj_type[1])
-        return obj
-
     def create_plot(self, pf, plot_type, plot_field, plot_axis, plot_kwargs = None):
         # plot_type should be a string
         # plot_args should be a tuple
@@ -385,7 +376,7 @@
         self.decimals = decimals
 
     def run(self):
-        obj = self.create_obj(self.pf, self.obj_type)
+        obj = create_obj(self.pf, self.obj_type)
         avg = obj.quantities["WeightedAverageQuantity"](self.field,
                              weight="Ones")
         (mi, ma), = obj.quantities["Extrema"](self.field)
@@ -412,7 +403,7 @@
         self.decimals = decimals
 
     def run(self):
-        obj = self.create_obj(self.pf, self.obj_type)
+        obj = create_obj(self.pf, self.obj_type)
         return obj[self.field]
 
     def compare(self, new_result, old_result):
@@ -439,7 +430,7 @@
 
     def run(self):
         if self.obj_type is not None:
-            obj = self.create_obj(self.pf, self.obj_type)
+            obj = create_obj(self.pf, self.obj_type)
         else:
             obj = None
         if self.pf.domain_dimensions[self.axis] == 1: return None
@@ -480,7 +471,7 @@
 
     def run(self):
         if self.obj_type is not None:
-            obj = self.create_obj(self.pf, self.obj_type)
+            obj = create_obj(self.pf, self.obj_type)
         else:
             obj = None
         proj = self.pf.h.proj(self.field, self.axis, 
@@ -730,6 +721,15 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def create_obj(pf, obj_type):
+    # obj_type should be tuple of
+    #  ( obj_name, ( args ) )
+    if obj_type is None:
+        return pf.h.all_data()
+    cls = getattr(pf.h, obj_type[0])
+    obj = cls(*obj_type[1])
+    return obj
+
 class AssertWrapper(object):
     """
     Used to wrap a numpy testing assertion, in order to provide a useful name


https://bitbucket.org/yt_analysis/yt-3.0/commits/7d65b4e5b1b1/
Changeset:   7d65b4e5b1b1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 16:01:13
Summary:     A few optimizations with considerable payoff for ARTIO.
Affected #:  1 file

diff -r 46521c80a5bd530c809a35e3845cb87d0f1716f1 -r 7d65b4e5b1b10f538f8fe1d3898cb9cb60ce2073 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -832,6 +832,9 @@
                                 0, fields)
         return rv
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef read_sfc_particles(artio_fileset artio_handle,
                         np.int64_t sfc_start, np.int64_t sfc_end,
                         int read_unrefined, fields):
@@ -1015,7 +1018,8 @@
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
     cdef public object _last_mask
-    cdef public object _last_selector_id
+    cdef public np.int64_t _last_selector_id
+    cdef np.int64_t _last_mask_sum
     cdef ARTIOSFCRangeHandler range_handler
     cdef np.uint8_t *sfc_mask
     cdef np.int64_t nsfc
@@ -1030,7 +1034,8 @@
             self.dds[i] = range_handler.dds[i]
         self.handle = range_handler.handle
         self.artio_handle = range_handler.artio_handle
-        self._last_mask = self._last_selector_id = None
+        self._last_mask = None
+        self._last_selector_id = -1
         self.sfc_start = range_handler.sfc_start
         self.sfc_end = range_handler.sfc_end
         self.range_handler = range_handler
@@ -1077,6 +1082,9 @@
         cdef int i
         return self.mask(selector).sum()
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
@@ -1084,7 +1092,7 @@
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
@@ -1101,6 +1109,9 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
@@ -1109,7 +1120,7 @@
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
@@ -1126,23 +1137,29 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] width
         width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.zeros(num_cells, dtype="int64")
         return res
@@ -1176,7 +1193,7 @@
             # Note that RAMSES can have partial refinement inside an Oct.  This
             # means we actually do want the number of Octs, not the number of
             # cells.
-            num_cells = mask.sum()
+            num_cells = self._last_mask_sum
             if dims > 1:
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
@@ -1208,22 +1225,28 @@
         if self._last_selector_id == hash(selector):
             return self._last_mask
         mask = np.zeros((self.nsfc), dtype="uint8")
+        self._last_mask_sum = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if self.sfc_mask[sfc - self.sfc_start] == 0: continue
             sfci += 1
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
             mask[sfci] = 1
+            self._last_mask_sum += 1
         self._last_mask = mask.astype("bool")
         self._last_selector_id = hash(selector)
         return self._last_mask
 
+
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
                                 self.sfc_start, self.sfc_end,
                                 1, fields)
         return rv
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_sfc(self, SelectorObject selector, field_indices):
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
@@ -1238,7 +1261,7 @@
         max_level = self.artio_handle.max_level
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         tr = []
         for i in range(nf):
             tr.append(np.zeros(num_cells, dtype="float64"))


https://bitbucket.org/yt_analysis/yt-3.0/commits/b514603c5fd4/
Changeset:   b514603c5fd4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 17:38:00
Summary:     Updating tests for Gasoline
Affected #:  2 files

diff -r 7d65b4e5b1b10f538f8fe1d3898cb9cb60ce2073 -r b514603c5fd4e8833e4e52be43a2ec998ac4a832 yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -77,10 +77,10 @@
     yield assert_equal, str(pf), "agora_1e11.00400"
     dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
     dd = pf.h.all_data()
-    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    yield assert_equal, dd["Coordinates"].shape, (10550576, 3)
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
-    yield assert_equal, tot, 26847360
+    yield assert_equal, tot, 10550576
     for ds in dso:
         for field in _fields:
             for axis in [0, 1, 2]:

diff -r 7d65b4e5b1b10f538f8fe1d3898cb9cb60ce2073 -r b514603c5fd4e8833e4e52be43a2ec998ac4a832 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -253,7 +253,7 @@
     yield
     os.chdir(oldcwd)
 
-def can_run_pf(pf_fn, file_check):
+def can_run_pf(pf_fn, file_check = False):
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     path = ytcfg.get("yt", "test_data_dir")


https://bitbucket.org/yt_analysis/yt-3.0/commits/2f750152aa19/
Changeset:   2f750152aa19
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 19:12:39
Summary:     Fix for can_run_pf from Kacper.
Affected #:  1 file

diff -r b514603c5fd4e8833e4e52be43a2ec998ac4a832 -r 2f750152aa19f38763b636f7c914a7128e23744f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -261,7 +261,8 @@
         return False
     with temp_cwd(path):
         if file_check:
-            return os.path.isfile(pf_fn)
+            return os.path.isfile(pf_fn) and \
+                AnswerTestingTest.result_storage is not None
         try:
             load(pf_fn)
         except YTOutputNotIdentified:


https://bitbucket.org/yt_analysis/yt-3.0/commits/104a961a811f/
Changeset:   104a961a811f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 20:45:09
Summary:     A few more changes which have medium level impact.
Affected #:  2 files

diff -r 2f750152aa19f38763b636f7c914a7128e23744f -r 104a961a811fe636f7732276297ad709b8828937 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -123,7 +123,9 @@
     void artio_sfc_coords( artio_fileset_handle *handle, int64_t index, int coords[3] ) nogil
 
 cdef void check_artio_status(int status, char *fname="[unknown]"):
-    if status!=ARTIO_SUCCESS :
+    if status != ARTIO_SUCCESS:
+        import traceback
+        traceback.print_stack()
         callername = sys._getframe().f_code.co_name
         nline = sys._getframe().f_lineno
         raise RuntimeError('failure with status', status, 'in function',fname,'from caller', callername, nline)
@@ -665,9 +667,11 @@
     # this, we will avoid creating it as long as possible.
 
     cdef public artio_fileset artio_handle
+    cdef ARTIOSFCRangeHandler range_handler
     cdef np.int64_t level_indices[32]
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
+        self.range_handler = range_handler
         self.artio_handle = range_handler.artio_handle
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
@@ -785,6 +789,12 @@
         #   * Enable preloading during mesh initialization
         #   * Calculate domain indices on the fly rather than with a
         #     double-loop to calculate domain_counts
+        # The cons should be in order
+        cdef np.int64_t sfc_start, sfc_end
+        sfc_start = self.domains[0].con_id
+        sfc_end = self.domains[self.num_domains - 1].con_id
+        status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end )
+        check_artio_status(status) 
         cdef np.int64_t offset = 0 
         for si in range(self.num_domains):
             sfc = self.domains[si].con_id
@@ -818,6 +828,8 @@
                     dest[i + offset] = source[oct_ind, cell_inds[i + offset]]
             # Now, we offset by the actual number filled here.
             offset += domain_counts[si]
+        status = artio_grid_clear_sfc_cache(handle)
+        check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)
@@ -829,7 +841,7 @@
         sfc_start = self.domains[0].con_id
         sfc_end = self.domains[self.num_domains - 1].con_id
         rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
-                                0, fields)
+                                0, fields, self.range_handler.doct_count)
         return rv
 
 @cython.boundscheck(False)
@@ -837,7 +849,8 @@
 @cython.cdivision(True)
 cdef read_sfc_particles(artio_fileset artio_handle,
                         np.int64_t sfc_start, np.int64_t sfc_end,
-                        int read_unrefined, fields):
+                        int read_unrefined, fields,
+                        np.int64_t *doct_count):
     cdef int status, ispec, subspecies
     cdef np.int64_t sfc, particle, pid, ind, vind
     cdef int num_species = artio_handle.num_species
@@ -887,19 +900,12 @@
             sfc_start, sfc_end ) 
     check_artio_status(status)
 
-    # We cache so we can figure out if the cell is refined or not.
-    status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end)
-    check_artio_status(status) 
-
     # Pass through once.  We want every single particle.
+    cdef np.int64_t c 
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
-        check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
+        c = doct_count[sfc - sfc_start]
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
         status = artio_particle_read_root_cell_begin( handle, sfc,
                 num_particles_per_species )
         check_artio_status(status)
@@ -951,13 +957,10 @@
             vp.n_s += 1
 
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+        c = doct_count[sfc - sfc_start]
         check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
         status = artio_particle_read_root_cell_begin( handle, sfc,
                 num_particles_per_species )
         check_artio_status(status)
@@ -993,11 +996,8 @@
         status = artio_particle_read_root_cell_end( handle )
         check_artio_status(status)
 
-    #status = artio_particle_clear_sfc_cache(handle)
-    #check_artio_status(status)
-
-    #status = artio_grid_clear_sfc_cache(handle)
-    #check_artio_status(status)
+    status = artio_particle_clear_sfc_cache(handle)
+    check_artio_status(status)
 
     free(num_octs_per_level)
     free(num_particles_per_species)
@@ -1224,6 +1224,7 @@
         cdef np.int64_t sfc, sfci = -1
         if self._last_selector_id == hash(selector):
             return self._last_mask
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
         mask = np.zeros((self.nsfc), dtype="uint8")
         self._last_mask_sum = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
@@ -1241,7 +1242,7 @@
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
                                 self.sfc_start, self.sfc_end,
-                                1, fields)
+                                1, fields, self.range_handler.doct_count)
         return rv
 
     @cython.boundscheck(False)
@@ -1306,6 +1307,9 @@
         free(num_octs_per_level)
         return tr
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def deposit(self, ParticleDepositOperation pdeposit,
                 SelectorObject selector,
                 np.ndarray[np.float64_t, ndim=2] positions,

diff -r 2f750152aa19f38763b636f7c914a7128e23744f -r 104a961a811fe636f7732276297ad709b8828937 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -72,6 +72,7 @@
         return self.sfc_end
 
     def fill(self, fields, selector):
+        if len(fields) == 0: return []
         handle = self.oct_handler.artio_handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -88,6 +89,7 @@
         return tr
 
     def fill_particles(self, fields):
+        if len(fields) == 0: return {}
         art_fields = []
         for s, f in fields:
             fn = yt_to_art[f]
@@ -127,6 +129,7 @@
 
     def fill(self, fields, selector):
         # We know how big these will be.
+        if len(fields) == 0: return []
         handle = self.pf._handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -250,6 +253,8 @@
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges(
                     dobj.selector)
             ci = []
+            #v = np.array(list_sfc_ranges)
+            #list_sfc_ranges = [ (v.min(), v.max()) ]
             for (start, end) in list_sfc_ranges:
                 range_handler = ARTIOSFCRangeHandler(
                     self.pf.domain_dimensions,


https://bitbucket.org/yt_analysis/yt-3.0/commits/ab21fdb8513b/
Changeset:   ab21fdb8513b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 21:21:45
Summary:     Adding option for max_range to be passed in
Affected #:  1 file

diff -r 104a961a811fe636f7732276297ad709b8828937 -r ab21fdb8513b1f7610b18e78ef9532d7a8d46c60 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -490,8 +490,7 @@
 
         return (fcoords, ires, data)
 
-    def root_sfc_ranges_all(self) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges_all(self, int max_range_size = 1024) :
         cdef int64_t sfc_start, sfc_end
         cdef artio_selection *selection
 
@@ -505,8 +504,8 @@
         artio_selection_destroy(selection)
         return sfc_ranges
 
-    def root_sfc_ranges(self, SelectorObject selector) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges(self, SelectorObject selector,
+                        int max_range_size = 1024):
         cdef int coords[3]
         cdef int64_t sfc_start, sfc_end
         cdef np.float64_t left[3]
@@ -1299,8 +1298,8 @@
             status = artio_grid_read_root_cell_end( handle )
             check_artio_status(status)
         # Now we have all our sources.
-        #status = artio_grid_clear_sfc_cache(handle)
-        #check_artio_status(status)
+        status = artio_grid_clear_sfc_cache(handle)
+        check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)


https://bitbucket.org/yt_analysis/yt-3.0/commits/32515ac758a5/
Changeset:   32515ac758a5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 22:36:06
Summary:     A currently oddly-segfaulting optimization for particle counts.
Affected #:  1 file

diff -r ab21fdb8513b1f7610b18e78ef9532d7a8d46c60 -r 32515ac758a5c65330c13c2cb81833fa8b85f17d yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -564,6 +564,7 @@
     cdef np.int64_t dims[3]
     cdef public np.int64_t total_octs
     cdef np.int64_t *doct_count
+    cdef np.int64_t **pcount
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -571,6 +572,7 @@
                  artio_fileset artio_handle,
                  sfc_start, sfc_end):
         cdef int i
+        cdef np.int64_t sfc
         self.sfc_start = sfc_start
         self.sfc_end = sfc_end
         self.artio_handle = artio_handle
@@ -578,23 +580,39 @@
         self.octree_handler = None
         self.handle = artio_handle.handle
         self.oct_count = None
+        self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
+            * artio_handle.num_species)
+        for i in range(artio_handle.num_species):
+            self.pcount[i] = <np.int64_t*> malloc(sizeof(np.int64_t)
+                * (self.sfc_end - self.sfc_start + 1))
+            for sfc in range(self.sfc_end - self.sfc_start + 1):
+                self.pcount[i][sfc] = 0
         for i in range(3):
             self.dims[i] = domain_dimensions[i]
             self.DLE[i] = domain_left_edge[i]
             self.DRE[i] = domain_right_edge[i]
             self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
 
+    def __dealloc__(self):
+        cdef int i
+        for i in range(self.artio_handle.num_species):
+            free(self.pcount[i])
+        free(self.pcount)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def construct_mesh(self):
         cdef int status, level
-        cdef np.int64_t sfc, oc
+        cdef np.int64_t sfc, oc, i
         cdef double dpos[3]
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
+        cdef int num_species = self.artio_handle.num_species
+        cdef int *num_particles_per_species =  <int *>malloc(
+            sizeof(int)*num_species) 
         cdef ARTIOOctreeContainer octree
         self.octree_handler = octree = ARTIOOctreeContainer(self)
         # We want to pre-allocate an array of root pointers.  In the future,
@@ -621,7 +639,30 @@
                     num_octs_per_level, sfc)
             status = artio_grid_read_root_cell_end( self.handle )
             check_artio_status(status)
+        status = artio_grid_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+        # Now particles
+        status = artio_particle_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
+        check_artio_status(status) 
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            # Now particles
+            status = artio_particle_read_root_cell_begin( self.handle,
+                    sfc, num_particles_per_species )
+            check_artio_status(status)
+
+            for i in range(num_species):
+                self.pcount[i][sfc - self.sfc_start] = \
+                    num_particles_per_species[i]
+
+            status = artio_particle_read_root_cell_end( self.handle)
+            check_artio_status(status)
+
+        status = artio_particle_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+
         free(num_octs_per_level)
+        free(num_particles_per_species)
         self.oct_count = oct_count
         self.doct_count = <np.int64_t *> oct_count.data
         self.root_mesh_handler = ARTIORootMeshContainer(self)
@@ -840,7 +881,8 @@
         sfc_start = self.domains[0].con_id
         sfc_end = self.domains[self.num_domains - 1].con_id
         rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
-                                0, fields, self.range_handler.doct_count)
+                                0, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
 @cython.boundscheck(False)
@@ -849,7 +891,8 @@
 cdef read_sfc_particles(artio_fileset artio_handle,
                         np.int64_t sfc_start, np.int64_t sfc_end,
                         int read_unrefined, fields,
-                        np.int64_t *doct_count):
+                        np.int64_t *doct_count,
+                        np.int64_t **pcount):
     cdef int status, ispec, subspecies
     cdef np.int64_t sfc, particle, pid, ind, vind
     cdef int num_species = artio_handle.num_species
@@ -895,25 +938,16 @@
         vpoints[ispec].n_p = 0
         vpoints[ispec].n_s = 0
 
-    status = artio_particle_cache_sfc_range( handle,
-            sfc_start, sfc_end ) 
-    check_artio_status(status)
-
     # Pass through once.  We want every single particle.
-    cdef np.int64_t c 
+    tp = 0
+    cdef np.int64_t c
     for sfc in range(sfc_start, sfc_end + 1):
         c = doct_count[sfc - sfc_start]
         if read_unrefined == 1 and c > 0: continue
         if read_unrefined == 0 and c == 0: continue
-        status = artio_particle_read_root_cell_begin( handle, sfc,
-                num_particles_per_species )
-        check_artio_status(status)
 
         for ispec in range(num_species):
-            total_particles[ispec] += num_particles_per_species[ispec]
-
-        status = artio_particle_read_root_cell_end( handle )
-        check_artio_status(status)
+            total_particles[ispec] += pcount[ispec][sfc - sfc_start]
 
     # Now we allocate our final fields, which will be filled
     #for ispec in range(num_species):
@@ -930,30 +964,40 @@
         vp = &vpoints[species]
         if field == "MASS":
             vp.n_mass = 1
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             # We fill this *now*
             npf64arr += params["particle_species_mass"][species]
             vp.mass = <np.float64_t*> npf64arr.data
         elif field == "PID":
             vp.n_pid = 1
-            npi64arr = data[(species, field)] = np.zeros(tp, dtype="int64")
+            data[(species, field)] = np.zeros(tp, dtype="int64")
+            npi64arr = data[(species, field)]
             vp.pid = <np.int64_t*> npi64arr.data
         elif field == "SPECIES":
             vp.n_species = 1
-            npi8arr = data[(species, field)] = np.zeros(tp, dtype="int8")
+            data[(species, field)] = np.zeros(tp, dtype="int8")
+            npi8arr = data[(species, field)]
             # We fill this *now*
             npi8arr += species
             vp.species = <np.int8_t*> npi8arr.data
         elif npri_vars[species] > 0 and field in pri_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.p_ind[vp.n_p] = pri_vars.index(field)
             vp.pvars[vp.n_p] = <np.float64_t *> npf64arr.data
             vp.n_p += 1
         elif nsec_vars[species] > 0 and field in sec_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.s_ind[vp.n_s] = sec_vars.index(field)
             vp.svars[vp.n_s] = <np.float64_t *> npf64arr.data
             vp.n_s += 1
+        print "Allocated ", species, field, data[species, field].size
+
+    status = artio_particle_cache_sfc_range( handle,
+            sfc_start, sfc_end ) 
+    check_artio_status(status)
 
     for sfc in range(sfc_start, sfc_end + 1):
         c = doct_count[sfc - sfc_start]
@@ -1241,7 +1285,8 @@
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
                                 self.sfc_start, self.sfc_end,
-                                1, fields, self.range_handler.doct_count)
+                                1, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt-3.0/commits/be6e9372fa06/
Changeset:   be6e9372fa06
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 22:51:21
Summary:     A few fixes, and a major speedup for ARTIO.
Affected #:  1 file

diff -r 32515ac758a5c65330c13c2cb81833fa8b85f17d -r be6e9372fa06490ebc2f690903ad2dfdf3cb6d29 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -962,7 +962,7 @@
             "species_%02u_secondary_variable_labels" % (species,), [])
         tp = total_particles[species]
         vp = &vpoints[species]
-        if field == "MASS":
+        if field == "MASS" and params["particle_species_mass"][species] != 0.0:
             vp.n_mass = 1
             data[(species, field)] = np.zeros(tp, dtype="float64")
             npf64arr = data[(species, field)]
@@ -993,7 +993,6 @@
             vp.s_ind[vp.n_s] = sec_vars.index(field)
             vp.svars[vp.n_s] = <np.float64_t *> npf64arr.data
             vp.n_s += 1
-        print "Allocated ", species, field, data[species, field].size
 
     status = artio_particle_cache_sfc_range( handle,
             sfc_start, sfc_end ) 
@@ -1004,6 +1003,11 @@
         check_artio_status(status)
         if read_unrefined == 1 and c > 0: continue
         if read_unrefined == 0 and c == 0: continue
+        c = 0
+        for ispec in range(num_species) : 
+            if accessed_species[ispec] == 0: continue
+            c += pcount[ispec][sfc - sfc_start]
+        if c == 0: continue
         status = artio_particle_read_root_cell_begin( handle, sfc,
                 num_particles_per_species )
         check_artio_status(status)


https://bitbucket.org/yt_analysis/yt-3.0/commits/7f9535d0a16f/
Changeset:   7f9535d0a16f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 04:14:19
Summary:     Cache root mesh data on instantiation and avoid any reloading.
Affected #:  1 file

diff -r be6e9372fa06490ebc2f690903ad2dfdf3cb6d29 -r 7f9535d0a16f4a3a4b60b5d40cd6b0da2e2c2a07 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -565,6 +565,7 @@
     cdef public np.int64_t total_octs
     cdef np.int64_t *doct_count
     cdef np.int64_t **pcount
+    cdef float **root_mesh_data
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -580,6 +581,7 @@
         self.octree_handler = None
         self.handle = artio_handle.handle
         self.oct_count = None
+        self.root_mesh_data = NULL
         self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
             * artio_handle.num_species)
         for i in range(artio_handle.num_species):
@@ -597,13 +599,16 @@
         cdef int i
         for i in range(self.artio_handle.num_species):
             free(self.pcount[i])
+        for i in range(self.artio_handle.num_grid_variables):
+            free(self.root_mesh_data[i])
         free(self.pcount)
+        free(self.root_mesh_data)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def construct_mesh(self):
-        cdef int status, level
+        cdef int status, level, ngv
         cdef np.int64_t sfc, oc, i
         cdef double dpos[3]
         cdef int num_oct_levels
@@ -614,7 +619,14 @@
         cdef int *num_particles_per_species =  <int *>malloc(
             sizeof(int)*num_species) 
         cdef ARTIOOctreeContainer octree
+        ngv = self.artio_handle.num_grid_variables
+        cdef float *grid_variables = <float *>malloc(
+            ngv * sizeof(float))
         self.octree_handler = octree = ARTIOOctreeContainer(self)
+        self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
+        for i in range(ngv):
+            self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
+                self.sfc_end - self.sfc_start + 1)
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an
@@ -627,7 +639,11 @@
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             status = artio_grid_read_root_cell_begin( self.handle,
-                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+                sfc, dpos, grid_variables, &num_oct_levels,
+                num_octs_per_level)
+            for i in range(ngv):
+                self.root_mesh_data[i][sfc - self.sfc_start] = \
+                    grid_variables[i]
             check_artio_status(status)
             if num_oct_levels > 0:
                 oc = 0
@@ -661,6 +677,7 @@
         status = artio_particle_clear_sfc_cache( self.handle)
         check_artio_status(status)
 
+        free(grid_variables)
         free(num_octs_per_level)
         free(num_particles_per_species)
         self.oct_count = oct_count
@@ -1301,23 +1318,17 @@
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
         cdef np.int64_t sfc, num_cells, sfci = -1
         cdef np.float64_t val
-        cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
         # We duplicate some of the grid_variables stuff here so that we can
         # potentially release the GIL
         nf = len(field_indices)
         ngv = self.artio_handle.num_grid_variables
-        max_level = self.artio_handle.max_level
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         num_cells = self._last_mask_sum
         tr = []
         for i in range(nf):
             tr.append(np.zeros(num_cells, dtype="float64"))
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
-        cdef float *grid_variables = <float *>malloc(
-            ngv * sizeof(float))
         cdef int* field_ind = <int*> malloc(
             nf * sizeof(int))
         cdef np.float64_t **field_vals = <np.float64_t**> malloc(
@@ -1330,29 +1341,18 @@
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
         cdef int filled = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
+        cdef float **mesh_data = self.range_handler.root_mesh_data
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if self.sfc_mask[sfc - self.sfc_start] == 0: continue
             sfci += 1
             if mask[sfci] == 0: continue
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, grid_variables, &num_oct_levels,
-                    num_octs_per_level)
-            check_artio_status(status) 
             for i in range(nf):
-                field_vals[i][filled] = grid_variables[field_ind[i]]
+                field_vals[i][filled] = mesh_data[field_ind[i]][
+                    sfc - self.sfc_start]
             filled += 1
-            status = artio_grid_read_root_cell_end( handle )
-            check_artio_status(status)
         # Now we have all our sources.
-        status = artio_grid_clear_sfc_cache(handle)
-        check_artio_status(status)
         free(field_ind)
         free(field_vals)
-        free(grid_variables)
-        free(num_octs_per_level)
         return tr
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt-3.0/commits/d596ac6902a5/
Changeset:   d596ac6902a5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 04:31:56
Summary:     Making max_range_size a property of the parameter file.
Affected #:  1 file

diff -r 7f9535d0a16f4a3a4b60b5d40cd6b0da2e2c2a07 -r d596ac6902a5294c06a2f9a64a0bb338a3c62702 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -170,6 +170,10 @@
         self.float_type = np.float64
         super(ARTIOGeometryHandler, self).__init__(pf, data_style)
 
+    @property
+    def max_range(self):
+        return self.parameter_file.max_range
+
     def _setup_geometry(self):
         mylog.debug("Initializing Geometry Handler empty for now.")
 
@@ -244,14 +248,15 @@
             nz = getattr(dobj, "_num_zones", 0)
             if all_data:
                 mylog.debug("Selecting entire artio domain")
-                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all()
+                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all(
+                    max_range_size = self.max_range)
             elif sfc_start is not None and sfc_end is not None:
                 mylog.debug("Restricting to %s .. %s", sfc_start, sfc_end)
                 list_sfc_ranges = [(sfc_start, sfc_end)]
             else:
                 mylog.debug("Running selector on artio base grid")
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges(
-                    dobj.selector)
+                    dobj.selector, max_range_size = self.max_range)
             ci = []
             #v = np.array(list_sfc_ranges)
             #list_sfc_ranges = [ (v.min(), v.max()) ]
@@ -327,6 +332,7 @@
     _fieldinfo_known = KnownARTIOFields
     _particle_mass_name = "particle_mass"
     _particle_coordinates_name = "Coordinates"
+    max_range = 1024
 
     def __init__(self, filename, data_style='artio',
                  storage_filename=None):


https://bitbucket.org/yt_analysis/yt-3.0/commits/4ea6055dd0a0/
Changeset:   4ea6055dd0a0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 13:43:39
Summary:     Parentheses result in bad allocation.
Affected #:  1 file

diff -r d596ac6902a5294c06a2f9a64a0bb338a3c62702 -r 4ea6055dd0a07519f96828c16b21a55572241129 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -626,7 +626,7 @@
         self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
         for i in range(ngv):
             self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
-                self.sfc_end - self.sfc_start + 1)
+                (self.sfc_end - self.sfc_start + 1))
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an


https://bitbucket.org/yt_analysis/yt-3.0/commits/b4b67d5c46f9/
Changeset:   b4b67d5c46f9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-08 14:17:05
Summary:     Partial deallocation means we can't rely on artio_handle existing.
Affected #:  1 file

diff -r 4ea6055dd0a07519f96828c16b21a55572241129 -r b4b67d5c46f933a3b8e16c2c92bc6eeb68fd5c9b yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -566,6 +566,7 @@
     cdef np.int64_t *doct_count
     cdef np.int64_t **pcount
     cdef float **root_mesh_data
+    cdef np.int64_t nvars[2]
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -584,6 +585,8 @@
         self.root_mesh_data = NULL
         self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
             * artio_handle.num_species)
+        self.nvars[0] = artio_handle.num_species
+        self.nvars[1] = artio_handle.num_grid_variables
         for i in range(artio_handle.num_species):
             self.pcount[i] = <np.int64_t*> malloc(sizeof(np.int64_t)
                 * (self.sfc_end - self.sfc_start + 1))
@@ -597,9 +600,9 @@
 
     def __dealloc__(self):
         cdef int i
-        for i in range(self.artio_handle.num_species):
+        for i in range(self.nvars[0]):
             free(self.pcount[i])
-        for i in range(self.artio_handle.num_grid_variables):
+        for i in range(self.nvars[1]):
             free(self.root_mesh_data[i])
         free(self.pcount)
         free(self.root_mesh_data)
@@ -619,7 +622,7 @@
         cdef int *num_particles_per_species =  <int *>malloc(
             sizeof(int)*num_species) 
         cdef ARTIOOctreeContainer octree
-        ngv = self.artio_handle.num_grid_variables
+        ngv = self.nvars[1]
         cdef float *grid_variables = <float *>malloc(
             ngv * sizeof(float))
         self.octree_handler = octree = ARTIOOctreeContainer(self)


https://bitbucket.org/yt_analysis/yt-3.0/commits/90e60994750b/
Changeset:   90e60994750b
Branch:      yt-3.0
User:        xarthisius
Date:        2013-10-09 13:57:26
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #111)

Add a few tests and fix errors in ARTIO that cropped up with this.
Affected #:  35 files

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
     TwoPointFunctions, \
     FcnSet
 
+from .sunyaev_zeldovich.api import SZProjection
+
 from .radmc3d_export.api import \
     RadMC3DWriter
+

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunyaev_zeldovich")    
     return config

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,349 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+        
+try:
+    import SZpack
+except:
+    raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+    return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+    return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+    return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+    return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+    return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+    r""" Initialize a SZProjection object.
+
+    Parameters
+    ----------
+    pf : parameter_file
+        The parameter file.
+    freqs : array_like
+        The frequencies (in GHz) at which to compute the SZ spectral distortion.
+    mue : float, optional
+        Mean molecular weight for determining the electron number density.
+    high_order : boolean, optional
+        Should we calculate high-order moments of velocity and temperature?
+
+    Examples
+    --------
+    >>> freqs = [90., 180., 240.]
+    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    """
+    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+            
+        self.pf = pf
+        self.num_freqs = len(freqs)
+        self.high_order = high_order
+        self.freqs = np.array(freqs)
+        self.mueinv = 1./mue
+        self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+        self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+        self.data = {}
+
+        self.units = {}
+        self.units["TeSZ"] = r"$\mathrm{keV}$"
+        self.units["Tau"] = None
+
+        self.display_names = {}
+        self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+        self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+        for f, field in zip(self.freqs, self.freq_fields):
+            self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+            
+    def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an on-axis projection of the SZ signal.
+
+        Parameters
+        ----------
+        axis : integer or string
+            The axis of the simulation domain along which to make the SZprojection.
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+
+        Examples
+        --------
+        >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+        """
+        axis = fix_axis(axis)
+
+        def _beta_par(field, data):
+            axis = data.get_field_parameter("axis")
+            vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)    
+
+        proj = self.pf.h.proj(axis, "Density", source=source)
+        proj.set_field_parameter("axis", axis)
+        frb = proj.to_frb(width, nx)
+        dens = frb["Density"]
+        Te = frb["TeSZ"]/dens
+        bpar = frb["BetaPar"]/dens
+        omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+        bperp2 = np.zeros((nx,nx))
+        sigma1 = np.zeros((nx,nx))
+        kappa1 = np.zeros((nx,nx))                                    
+        if self.high_order:
+            bperp2 = frb["BetaPerpSquared"]/dens
+            sigma1 = frb["TBetaPar"]/dens/Te - bpar
+            kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        nx,ny = frb.buff_size
+        self.bounds = frb.bounds
+        self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+        self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+        self.nx = nx
+        
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+                                                                                                                
+    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+        r""" Make an off-axis projection of the SZ signal.
+        
+        Parameters
+        ----------
+        L : array_like
+            The normal vector of the projection. 
+        center : array_like or string, optional
+            The center of the projection.
+        width : float or tuple
+            The width of the projection.
+        nx : integer, optional
+            The dimensions on a side of the projection image.
+        source : yt.data_objects.api.AMRData, optional
+            If specified, this will be the data source used for selecting regions to project.
+            Currently unsupported in yt 2.x.
+                    
+        Examples
+        --------
+        >>> L = np.array([0.5, 1.0, 0.75])
+        >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+        """
+        if iterable(width):
+            w = width[0]/self.pf.units[width[1]]
+        else:
+            w = width
+        if center == "c":
+            ctr = self.pf.domain_center
+        elif center == "max":
+            ctr = self.pf.h.find_max("Density")
+        else:
+            ctr = center
+
+        if source is not None:
+            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+            raise NotImplementedError
+                
+        def _beta_par(field, data):
+            vpar = data["Density"]*(data["x-velocity"]*L[0]+
+                                    data["y-velocity"]*L[1]+
+                                    data["z-velocity"]*L[2])
+            return vpar/clight
+        add_field("BetaPar", function=_beta_par)
+
+        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+        omega1  = omega1/(Te*Te) - 1.
+        if self.high_order:
+            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+            sigma1  = sigma1/Te - bpar
+            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+            kappa1 -= bpar
+        else:
+            bperp2 = np.zeros((nx,nx))
+            sigma1 = np.zeros((nx,nx))
+            kappa1 = np.zeros((nx,nx))
+        tau = sigma_thompson*dens*self.mueinv/mh
+
+        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+        self.dx = w/nx
+        self.dy = w/nx
+        self.nx = nx
+
+        self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+        # Bad hack, but we get NaNs if we don't do something like this
+        small_beta = np.abs(bpar) < 1.0e-20
+        bpar[small_beta] = 1.0e-20
+                                                                   
+        comm = communication_system.communicators[-1]
+
+        nx, ny = self.nx,self.nx
+        signal = np.zeros((self.num_freqs,nx,ny))
+        xo = np.zeros((self.num_freqs))
+        
+        k = int(0)
+
+        start_i = comm.rank*nx/comm.size
+        end_i = (comm.rank+1)*nx/comm.size
+                        
+        pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+        for i in xrange(start_i, end_i):
+            for j in xrange(ny):
+                xo[:] = self.xinit[:]
+                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+                                           bpar[i,j], omega1[i,j],
+                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
+                signal[:,i,j] = xo[:]
+                pbar.update(k)
+                k += 1
+
+        signal = comm.mpi_allreduce(signal)
+        
+        pbar.finish()
+                
+        for i, field in enumerate(self.freq_fields):
+            self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+        self.data["Tau"] = ImageArray(tau)
+        self.data["TeSZ"] = ImageArray(Te)
+
+    @parallel_root_only
+    def write_fits(self, filename_prefix, clobber=True):
+        r""" Export images to a FITS file. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc.  
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the FITS filename.
+        clobber : boolean, optional
+            If the file already exists, do we overwrite?
+                    
+        Examples
+        --------
+        >>> szprj.write_fits("SZbullet", clobber=False)
+        """
+        coords = {}
+        coords["dx"] = self.dx*self.pf.units["kpc"]
+        coords["dy"] = self.dy*self.pf.units["kpc"]
+        coords["xctr"] = 0.0
+        coords["yctr"] = 0.0
+        coords["units"] = "kpc"
+        other_keys = {"Time" : self.pf.current_time}
+        write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+                   other_keys=other_keys)
+
+    @parallel_root_only
+    def write_png(self, filename_prefix):
+        r""" Export images to PNG files. Writes the SZ distortion in all
+        specified frequencies as well as the mass-weighted temperature and the
+        optical depth. Distance units are in kpc. 
+        
+        Parameters
+        ----------
+        filename_prefix : string
+            The prefix of the image filenames.
+                
+        Examples
+        --------
+        >>> szprj.write_png("SZsloshing")
+        """     
+        extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+        for field, image in self.items():
+            filename=filename_prefix+"_"+field+".png"
+            label = self.display_names[field]
+            if self.units[field] is not None:
+                label += " ("+self.units[field]+")"
+            write_projection(image, filename, colorbar_label=label, take_log=False,
+                             extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+                             ylabel=r"$\mathrm{y\ (kpc)}$")
+
+    @parallel_root_only
+    def write_hdf5(self, filename):
+        r"""Export the set of S-Z fields to a set of HDF5 datasets.
+        
+        Parameters
+        ----------
+        filename : string
+            This file will be opened in "write" mode.
+        
+        Examples
+        --------
+        >>> szprj.write_hdf5("SZsloshing.h5")                        
+        """
+        import h5py
+        f = h5py.File(filename, "w")
+        for field, data in self.items():
+            f.create_dataset(field,data=data)
+        f.close()
+   
+    def keys(self):
+        return self.data.keys()
+
+    def items(self):
+        return self.data.items()
+
+    def values(self):
+        return self.data.values()
+    
+    def has_key(self, key):
+        return key in self.data.keys()
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    @property
+    def shape(self):
+        return (self.nx,self.nx)

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+    config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,139 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+     GenericArrayTest, data_dir_load, GenericImageTest
+try:
+    from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+    pass
+import numpy as np
+try:
+    import SZpack
+except ImportError:
+    pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+def full_szpack3d(pf, xo):
+    data = pf.h.grids[0]
+    dz = pf.h.get_smallest_dx()*pf.units["cm"]
+    nx,ny,nz = data["Density"].shape
+    dn = np.zeros((nx,ny,nz))
+    Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+    Te = data["Temperature"]/K_per_keV
+    betac = data["z-velocity"]/clight
+    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
+    for i in xrange(nx):
+        pbar.update(i)
+        for j in xrange(ny):
+            for k in xrange(nz):
+                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+                                              Te[i,j,k], betac[i,j,k],
+                                              1.0, 0.0, 0.0, 1.0e-5)
+    pbar.finish()
+    return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+    R = 1000.
+    r_c = 100.
+    rho_c = 1.673e-26
+    beta = 1.
+    T0 = 4.
+    nx,ny,nz = 16,16,16
+    c = 0.17
+    a_c = 30.
+    a = 200.
+    v0 = 300.*cm_per_km
+    ddims = (nx,ny,nz)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:ny*1j,
+                       -R:R:nz*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+    velz = v0*temp/(T0*K_per_keV)
+
+    data = {}
+    data["Density"] = dens
+    data["Temperature"] = temp
+    data["x-velocity"] = np.zeros(ddims)
+    data["y-velocity"] = np.zeros(ddims)
+    data["z-velocity"] = velz
+
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+    L = 2*R*cm_per_kpc
+    dl = L/nz
+
+    pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+    return pf
+
+ at requires_module("SZpack")
+def test_projection():
+    pf = setup_cluster()
+    nx,ny,nz = pf.domain_dimensions
+    xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+    szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+    szprj.on_axis(2, nx=nx)
+    deltaI = np.zeros((3,nx,ny))
+    for i in xrange(3):
+        deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+        yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.on_axis(2, nx=100)
+    def onaxis_array_func():
+        return szprj.data
+    def onaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, onaxis_array_func),
+                 GenericImageTest(pf, onaxis_image_func, 3)]:
+        test_M7_onaxis.__name__ = test.description
+        yield test
+
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+    pf = data_dir_load(M7)
+    szprj = SZProjection(pf, freqs)
+    szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+    def offaxis_array_func():
+        return szprj.data
+    def offaxis_image_func(filename_prefix):
+        szprj.write_png(filename_prefix)
+    for test in [GenericArrayTest(pf, offaxis_array_func),
+                 GenericImageTest(pf, offaxis_image_func, 3)]:
+        test_M7_offaxis.__name__ = test.description
+        yield test

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -498,7 +498,7 @@
             points[:, 2] = points[:, 2] / self.period[2]
             fKD.qv_many = points.T
             fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
-            find_many_nn_nearest_neighbors()
+            fKD.find_many_nn_nearest_neighbors()
             # The -1 is for fortran counting.
             n = fKD.nn_tags[0,:] - 1
         return n

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold310',
+    gold_standard_filename = 'gold311',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -113,7 +113,7 @@
     _domain_ind = None
 
     def select_blocks(self, selector):
-        mask = self.oct_handler.mask(selector)
+        mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
         mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:
@@ -271,12 +271,14 @@
 
     @property
     def LeftEdge(self):
-        LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+        LE = (self._fcoords[0,0,0,self.ind,:]
+            - self._fwidth[0,0,0,self.ind,:])*0.5
         return LE
 
     @property
     def RightEdge(self):
-        RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+        RE = (self._fcoords[-1,-1,-1,self.ind,:]
+            + self._fwidth[-1,-1,-1,self.ind,:])*0.5
         return RE
 
     @property

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -3,7 +3,8 @@
 cimport numpy as np
 import sys 
 
-from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.geometry.selection_routines cimport \
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
@@ -122,7 +123,9 @@
     void artio_sfc_coords( artio_fileset_handle *handle, int64_t index, int coords[3] ) nogil
 
 cdef void check_artio_status(int status, char *fname="[unknown]"):
-    if status!=ARTIO_SUCCESS :
+    if status != ARTIO_SUCCESS:
+        import traceback
+        traceback.print_stack()
         callername = sys._getframe().f_code.co_name
         nline = sys._getframe().f_lineno
         raise RuntimeError('failure with status', status, 'in function',fname,'from caller', callername, nline)
@@ -487,8 +490,7 @@
 
         return (fcoords, ires, data)
 
-    def root_sfc_ranges_all(self) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges_all(self, int max_range_size = 1024) :
         cdef int64_t sfc_start, sfc_end
         cdef artio_selection *selection
 
@@ -502,8 +504,8 @@
         artio_selection_destroy(selection)
         return sfc_ranges
 
-    def root_sfc_ranges(self, SelectorObject selector) :
-        cdef int max_range_size = 1024
+    def root_sfc_ranges(self, SelectorObject selector,
+                        int max_range_size = 1024):
         cdef int coords[3]
         cdef int64_t sfc_start, sfc_end
         cdef np.float64_t left[3]
@@ -561,6 +563,10 @@
     cdef np.float64_t dds[3]
     cdef np.int64_t dims[3]
     cdef public np.int64_t total_octs
+    cdef np.int64_t *doct_count
+    cdef np.int64_t **pcount
+    cdef float **root_mesh_data
+    cdef np.int64_t nvars[2]
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -568,6 +574,7 @@
                  artio_fileset artio_handle,
                  sfc_start, sfc_end):
         cdef int i
+        cdef np.int64_t sfc
         self.sfc_start = sfc_start
         self.sfc_end = sfc_end
         self.artio_handle = artio_handle
@@ -575,25 +582,54 @@
         self.octree_handler = None
         self.handle = artio_handle.handle
         self.oct_count = None
+        self.root_mesh_data = NULL
+        self.pcount = <np.int64_t **> malloc(sizeof(np.int64_t*)
+            * artio_handle.num_species)
+        self.nvars[0] = artio_handle.num_species
+        self.nvars[1] = artio_handle.num_grid_variables
+        for i in range(artio_handle.num_species):
+            self.pcount[i] = <np.int64_t*> malloc(sizeof(np.int64_t)
+                * (self.sfc_end - self.sfc_start + 1))
+            for sfc in range(self.sfc_end - self.sfc_start + 1):
+                self.pcount[i][sfc] = 0
         for i in range(3):
             self.dims[i] = domain_dimensions[i]
             self.DLE[i] = domain_left_edge[i]
             self.DRE[i] = domain_right_edge[i]
             self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
 
+    def __dealloc__(self):
+        cdef int i
+        for i in range(self.nvars[0]):
+            free(self.pcount[i])
+        for i in range(self.nvars[1]):
+            free(self.root_mesh_data[i])
+        free(self.pcount)
+        free(self.root_mesh_data)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def construct_mesh(self):
-        cdef int status, level
-        cdef np.int64_t sfc, oc
+        cdef int status, level, ngv
+        cdef np.int64_t sfc, oc, i
         cdef double dpos[3]
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
+        cdef int num_species = self.artio_handle.num_species
+        cdef int *num_particles_per_species =  <int *>malloc(
+            sizeof(int)*num_species) 
         cdef ARTIOOctreeContainer octree
+        ngv = self.nvars[1]
+        cdef float *grid_variables = <float *>malloc(
+            ngv * sizeof(float))
         self.octree_handler = octree = ARTIOOctreeContainer(self)
+        self.root_mesh_data = <float **>malloc(sizeof(float *) * ngv)
+        for i in range(ngv):
+            self.root_mesh_data[i] = <float *>malloc(sizeof(float) * \
+                (self.sfc_end - self.sfc_start + 1))
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an
@@ -606,7 +642,11 @@
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             status = artio_grid_read_root_cell_begin( self.handle,
-                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+                sfc, dpos, grid_variables, &num_oct_levels,
+                num_octs_per_level)
+            for i in range(ngv):
+                self.root_mesh_data[i][sfc - self.sfc_start] = \
+                    grid_variables[i]
             check_artio_status(status)
             if num_oct_levels > 0:
                 oc = 0
@@ -618,13 +658,39 @@
                     num_octs_per_level, sfc)
             status = artio_grid_read_root_cell_end( self.handle )
             check_artio_status(status)
+        status = artio_grid_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+        # Now particles
+        status = artio_particle_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
+        check_artio_status(status) 
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            # Now particles
+            status = artio_particle_read_root_cell_begin( self.handle,
+                    sfc, num_particles_per_species )
+            check_artio_status(status)
+
+            for i in range(num_species):
+                self.pcount[i][sfc - self.sfc_start] = \
+                    num_particles_per_species[i]
+
+            status = artio_particle_read_root_cell_end( self.handle)
+            check_artio_status(status)
+
+        status = artio_particle_clear_sfc_cache( self.handle)
+        check_artio_status(status)
+
+        free(grid_variables)
         free(num_octs_per_level)
+        free(num_particles_per_species)
+        self.oct_count = oct_count
+        self.doct_count = <np.int64_t *> oct_count.data
         self.root_mesh_handler = ARTIORootMeshContainer(self)
-        self.oct_count = oct_count
 
     def free_mesh(self):
         self.octree_handler = None
         self.root_mesh_handler = None
+        self.doct_count = NULL
         self.oct_count = None
 
 def get_coords(artio_fileset handle, np.int64_t s):
@@ -661,9 +727,11 @@
     # this, we will avoid creating it as long as possible.
 
     cdef public artio_fileset artio_handle
+    cdef ARTIOSFCRangeHandler range_handler
     cdef np.int64_t level_indices[32]
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
+        self.range_handler = range_handler
         self.artio_handle = range_handler.artio_handle
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
@@ -781,6 +849,12 @@
         #   * Enable preloading during mesh initialization
         #   * Calculate domain indices on the fly rather than with a
         #     double-loop to calculate domain_counts
+        # The cons should be in order
+        cdef np.int64_t sfc_start, sfc_end
+        sfc_start = self.domains[0].con_id
+        sfc_end = self.domains[self.num_domains - 1].con_id
+        status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end )
+        check_artio_status(status) 
         cdef np.int64_t offset = 0 
         for si in range(self.num_domains):
             sfc = self.domains[si].con_id
@@ -814,6 +888,8 @@
                     dest[i + offset] = source[oct_ind, cell_inds[i + offset]]
             # Now, we offset by the actual number filled here.
             offset += domain_counts[si]
+        status = artio_grid_clear_sfc_cache(handle)
+        check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)
@@ -825,12 +901,18 @@
         sfc_start = self.domains[0].con_id
         sfc_end = self.domains[self.num_domains - 1].con_id
         rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
-                                0, fields)
+                                0, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef read_sfc_particles(artio_fileset artio_handle,
                         np.int64_t sfc_start, np.int64_t sfc_end,
-                        int read_unrefined, fields):
+                        int read_unrefined, fields,
+                        np.int64_t *doct_count,
+                        np.int64_t **pcount):
     cdef int status, ispec, subspecies
     cdef np.int64_t sfc, particle, pid, ind, vind
     cdef int num_species = artio_handle.num_species
@@ -876,32 +958,16 @@
         vpoints[ispec].n_p = 0
         vpoints[ispec].n_s = 0
 
-    status = artio_particle_cache_sfc_range( handle,
-            sfc_start, sfc_end ) 
-    check_artio_status(status)
-
-    # We cache so we can figure out if the cell is refined or not.
-    status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end)
-    check_artio_status(status) 
-
     # Pass through once.  We want every single particle.
+    tp = 0
+    cdef np.int64_t c
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
-        check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
-        status = artio_particle_read_root_cell_begin( handle, sfc,
-                num_particles_per_species )
-        check_artio_status(status)
+        c = doct_count[sfc - sfc_start]
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
 
         for ispec in range(num_species):
-            total_particles[ispec] += num_particles_per_species[ispec]
-
-        status = artio_particle_read_root_cell_end( handle )
-        check_artio_status(status)
+            total_particles[ispec] += pcount[ispec][sfc - sfc_start]
 
     # Now we allocate our final fields, which will be filled
     #for ispec in range(num_species):
@@ -916,41 +982,52 @@
             "species_%02u_secondary_variable_labels" % (species,), [])
         tp = total_particles[species]
         vp = &vpoints[species]
-        if field == "MASS":
+        if field == "MASS" and params["particle_species_mass"][species] != 0.0:
             vp.n_mass = 1
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             # We fill this *now*
             npf64arr += params["particle_species_mass"][species]
             vp.mass = <np.float64_t*> npf64arr.data
         elif field == "PID":
             vp.n_pid = 1
-            npi64arr = data[(species, field)] = np.zeros(tp, dtype="int64")
+            data[(species, field)] = np.zeros(tp, dtype="int64")
+            npi64arr = data[(species, field)]
             vp.pid = <np.int64_t*> npi64arr.data
         elif field == "SPECIES":
             vp.n_species = 1
-            npi8arr = data[(species, field)] = np.zeros(tp, dtype="int8")
+            data[(species, field)] = np.zeros(tp, dtype="int8")
+            npi8arr = data[(species, field)]
             # We fill this *now*
             npi8arr += species
             vp.species = <np.int8_t*> npi8arr.data
         elif npri_vars[species] > 0 and field in pri_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.p_ind[vp.n_p] = pri_vars.index(field)
             vp.pvars[vp.n_p] = <np.float64_t *> npf64arr.data
             vp.n_p += 1
         elif nsec_vars[species] > 0 and field in sec_vars :
-            npf64arr = data[(species, field)] = np.zeros(tp, dtype="float64")
+            data[(species, field)] = np.zeros(tp, dtype="float64")
+            npf64arr = data[(species, field)]
             vp.s_ind[vp.n_s] = sec_vars.index(field)
             vp.svars[vp.n_s] = <np.float64_t *> npf64arr.data
             vp.n_s += 1
 
+    status = artio_particle_cache_sfc_range( handle,
+            sfc_start, sfc_end ) 
+    check_artio_status(status)
+
     for sfc in range(sfc_start, sfc_end + 1):
-        status = artio_grid_read_root_cell_begin( handle,
-            sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+        c = doct_count[sfc - sfc_start]
         check_artio_status(status)
-        status = artio_grid_read_root_cell_end(handle)
-        check_artio_status(status)
-        if read_unrefined == 1 and num_oct_levels > 0: continue
-        if read_unrefined == 0 and num_oct_levels == 0: continue
+        if read_unrefined == 1 and c > 0: continue
+        if read_unrefined == 0 and c == 0: continue
+        c = 0
+        for ispec in range(num_species) : 
+            if accessed_species[ispec] == 0: continue
+            c += pcount[ispec][sfc - sfc_start]
+        if c == 0: continue
         status = artio_particle_read_root_cell_begin( handle, sfc,
                 num_particles_per_species )
         check_artio_status(status)
@@ -986,11 +1063,8 @@
         status = artio_particle_read_root_cell_end( handle )
         check_artio_status(status)
 
-    #status = artio_particle_clear_sfc_cache(handle)
-    #check_artio_status(status)
-
-    #status = artio_grid_clear_sfc_cache(handle)
-    #check_artio_status(status)
+    status = artio_particle_clear_sfc_cache(handle)
+    check_artio_status(status)
 
     free(num_octs_per_level)
     free(num_particles_per_species)
@@ -1011,11 +1085,15 @@
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
     cdef public object _last_mask
-    cdef public object _last_selector_id
+    cdef public np.int64_t _last_selector_id
+    cdef np.int64_t _last_mask_sum
     cdef ARTIOSFCRangeHandler range_handler
+    cdef np.uint8_t *sfc_mask
+    cdef np.int64_t nsfc
 
     def __init__(self, ARTIOSFCRangeHandler range_handler):
         cdef int i
+        cdef np.int64_t sfci
         for i in range(3):
             self.DLE[i] = range_handler.DLE[i]
             self.DRE[i] = range_handler.DRE[i]
@@ -1023,10 +1101,27 @@
             self.dds[i] = range_handler.dds[i]
         self.handle = range_handler.handle
         self.artio_handle = range_handler.artio_handle
-        self._last_mask = self._last_selector_id = None
+        self._last_mask = None
+        self._last_selector_id = -1
         self.sfc_start = range_handler.sfc_start
         self.sfc_end = range_handler.sfc_end
         self.range_handler = range_handler
+        # We assume that the number of octs has been created and filled
+        # already.  We no longer care about ANY of the SFCs that have octs
+        # inside them -- this goes for every operation that this object
+        # performs.
+        self.sfc_mask = <np.uint8_t *>malloc(sizeof(np.uint8_t) *
+          self.sfc_end - self.sfc_start + 1)
+        self.nsfc = 0
+        for sfci in range(self.sfc_end - self.sfc_start + 1):
+            if self.range_handler.oct_count[sfci] > 0:
+                self.sfc_mask[sfci] = 0
+            else:
+                self.sfc_mask[sfci] = 1
+                self.nsfc += 1
+
+    def __dealloc__(self):
+        free(self.sfc_mask)
 
     @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1054,19 +1149,24 @@
         cdef int i
         return self.mask(selector).sum()
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1076,20 +1176,25 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             # Note that we do *no* checks on refinement here.  In fact, this
             # entire setup should not need to touch the disk except if the
             # artio sfc calculators need to.
@@ -1099,23 +1204,29 @@
             filled += 1
         return coords
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.float64_t, ndim=2] width
         width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.zeros(num_cells, dtype="int64")
         return res
@@ -1132,7 +1243,7 @@
         # other.  Note that we *do* apply the selector here.
         cdef np.int64_t num_cells = -1
         cdef np.int64_t ind
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, sfci = -1
         cdef np.float64_t pos[3]
         cdef np.float64_t dpos[3]
         cdef int dim, status, filled = 0
@@ -1149,7 +1260,7 @@
             # Note that RAMSES can have partial refinement inside an Oct.  This
             # means we actually do want the number of Octs, not the number of
             # cells.
-            num_cells = mask.sum()
+            num_cells = self._last_mask_sum
             if dims > 1:
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
@@ -1158,7 +1269,9 @@
         ddata = (<char*>dest.data) + offset*ss*dims
         ind = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             memcpy(ddata, sdata + ind, dims * ss)
             ddata += dims * ss
             filled += 1
@@ -1170,56 +1283,55 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
+             int domain_id = -1): 
+        # We take a domain_id here to avoid subclassing
         cdef int i
         cdef np.float64_t pos[3]
-        cdef np.int64_t sfc
-        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        cdef np.int64_t sfc, sfci = -1
         if self._last_selector_id == hash(selector):
             return self._last_mask
-        if num_cells == -1:
-            # We need to count, but this process will only occur one time,
-            # since num_cells will later be cached.
-            num_cells = self.sfc_end - self.sfc_start + 1
-        mask = np.zeros((num_cells), dtype="uint8")
-        oct_count = self.range_handler.oct_count
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        mask = np.zeros((self.nsfc), dtype="uint8")
+        self._last_mask_sum = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if oct_count[sfc - self.sfc_start] > 0: continue
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
-            mask[sfc - self.sfc_start] = 1
+            mask[sfci] = 1
+            self._last_mask_sum += 1
         self._last_mask = mask.astype("bool")
         self._last_selector_id = hash(selector)
         return self._last_mask
 
+
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
                                 self.sfc_start, self.sfc_end,
-                                1, fields)
+                                1, fields, self.range_handler.doct_count,
+                                self.range_handler.pcount)
         return rv
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_sfc(self, SelectorObject selector, field_indices):
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
-        cdef np.int64_t sfc, num_cells
+        cdef np.int64_t sfc, num_cells, sfci = -1
         cdef np.float64_t val
-        cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
         # We duplicate some of the grid_variables stuff here so that we can
         # potentially release the GIL
         nf = len(field_indices)
         ngv = self.artio_handle.num_grid_variables
-        max_level = self.artio_handle.max_level
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
-        num_cells = mask.sum()
+        num_cells = self._last_mask_sum
         tr = []
         for i in range(nf):
             tr.append(np.zeros(num_cells, dtype="float64"))
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
-        cdef float *grid_variables = <float *>malloc(
-            ngv * sizeof(float))
         cdef int* field_ind = <int*> malloc(
             nf * sizeof(int))
         cdef np.float64_t **field_vals = <np.float64_t**> malloc(
@@ -1232,29 +1344,23 @@
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
         cdef int filled = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
+        cdef float **mesh_data = self.range_handler.root_mesh_data
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if mask[sfc - self.sfc_start] == 0: continue
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, grid_variables, &num_oct_levels,
-                    num_octs_per_level)
-            check_artio_status(status) 
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: continue
             for i in range(nf):
-                field_vals[i][filled] = grid_variables[field_ind[i]]
+                field_vals[i][filled] = mesh_data[field_ind[i]][
+                    sfc - self.sfc_start]
             filled += 1
-            status = artio_grid_read_root_cell_end( handle )
-            check_artio_status(status)
         # Now we have all our sources.
-        #status = artio_grid_clear_sfc_cache(handle)
-        #check_artio_status(status)
         free(field_ind)
         free(field_vals)
-        free(grid_variables)
-        free(num_octs_per_level)
         return tr
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def deposit(self, ParticleDepositOperation pdeposit,
                 SelectorObject selector,
                 np.ndarray[np.float64_t, ndim=2] positions,
@@ -1262,21 +1368,26 @@
         # This implements the necessary calls to enable particle deposition to
         # occur as needed.
         cdef int nf, i, j
+        cdef np.int64_t sfc, sfci
         if fields is None:
             fields = []
         nf = len(fields)
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         cdef np.ndarray[np.int64_t, ndim=1] domain_ind
-        domain_ind = np.zeros(mask.shape[0], dtype="int64") - 1
+        domain_ind = np.zeros(self.sfc_end - self.sfc_start + 1,
+                              dtype="int64") - 1
         j = 0
-        for i in range(mask.shape[0]):
-            if mask[i] == 1:
-                domain_ind[i] = j
-                j += 1
+        sfci = -1
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            if self.sfc_mask[sfc - self.sfc_start] == 0: continue
+            sfci += 1
+            if mask[sfci] == 0: 
+                continue
+            domain_ind[sfc - self.sfc_start] = j
+            j += 1
         cdef np.float64_t **field_pointers, *field_vals, pos[3], left_edge[3]
         cdef int coords[3]
-        cdef np.int64_t sfc
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
@@ -1306,5 +1417,72 @@
                 for j in range(nf):
                     field_pointers[j][i] = field_vals[j] 
 
+cdef class SFCRangeSelector(SelectorObject):
+    
+    cdef SelectorObject base_selector
+    cdef ARTIOSFCRangeHandler range_handler
+    cdef ARTIORootMeshContainer mesh_container
+    cdef np.int64_t sfc_start, sfc_end
+
+    def __init__(self, dobj):
+        self.base_selector = dobj.base_selector
+        self.mesh_container = dobj.oct_handler
+        self.range_handler = self.mesh_container.range_handler
+        self.sfc_start = self.mesh_container.sfc_start
+        self.sfc_end = self.mesh_container.sfc_end
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        return self.select_point(pos)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef np.int64_t sfc = self.mesh_container.pos_to_sfc(pos)
+        if sfc > self.sfc_end: return 0
+        cdef np.int64_t oc = self.range_handler.doct_count[
+            sfc - self.sfc_start]
+        if oc > 0: return 0
+        return 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        return self.base_selector.select_bbox(left_edge, right_edge)
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        # Because visitors now use select_grid, we should be explicitly
+        # checking this.
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
+    
+    def _hash_vals(self):
+        return (hash(self.base_selector), self.sfc_start, self.sfc_end)
+
 sfc_subset_selector = AlwaysSelector
+#sfc_subset_selector = SFCRangeSelector
 

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -72,6 +72,7 @@
         return self.sfc_end
 
     def fill(self, fields, selector):
+        if len(fields) == 0: return []
         handle = self.oct_handler.artio_handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -88,6 +89,7 @@
         return tr
 
     def fill_particles(self, fields):
+        if len(fields) == 0: return {}
         art_fields = []
         for s, f in fields:
             fn = yt_to_art[f]
@@ -127,6 +129,7 @@
 
     def fill(self, fields, selector):
         # We know how big these will be.
+        if len(fields) == 0: return []
         handle = self.pf._handle
         field_indices = [handle.parameters["grid_variable_labels"].index(
                         yt_to_art[f]) for (ft, f) in fields]
@@ -167,6 +170,10 @@
         self.float_type = np.float64
         super(ARTIOGeometryHandler, self).__init__(pf, data_style)
 
+    @property
+    def max_range(self):
+        return self.parameter_file.max_range
+
     def _setup_geometry(self):
         mylog.debug("Initializing Geometry Handler empty for now.")
 
@@ -241,15 +248,18 @@
             nz = getattr(dobj, "_num_zones", 0)
             if all_data:
                 mylog.debug("Selecting entire artio domain")
-                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all()
+                list_sfc_ranges = self.pf._handle.root_sfc_ranges_all(
+                    max_range_size = self.max_range)
             elif sfc_start is not None and sfc_end is not None:
                 mylog.debug("Restricting to %s .. %s", sfc_start, sfc_end)
                 list_sfc_ranges = [(sfc_start, sfc_end)]
             else:
                 mylog.debug("Running selector on artio base grid")
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges(
-                    dobj.selector)
+                    dobj.selector, max_range_size = self.max_range)
             ci = []
+            #v = np.array(list_sfc_ranges)
+            #list_sfc_ranges = [ (v.min(), v.max()) ]
             for (start, end) in list_sfc_ranges:
                 range_handler = ARTIOSFCRangeHandler(
                     self.pf.domain_dimensions,
@@ -322,6 +332,7 @@
     _fieldinfo_known = KnownARTIOFields
     _particle_mass_name = "particle_mass"
     _particle_coordinates_name = "Coordinates"
+    max_range = 1024
 
     def __init__(self, filename, data_style='artio',
                  storage_filename=None):

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -31,12 +32,15 @@
     pf = data_dir_load(sizmbhloz)
     yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         sizmbhloz, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        sizmbhloz, field, ds)
+            yield FieldValuesTest(sizmbhloz, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(gc)
     yield assert_equal, str(pf), "data.0077.3d.hdf5"
     for test in small_patch_amr(gc, _fields):
+        test_gc.__name__ = test.description
         yield test
 
 tb = "TurbBoxLowRes/data.0005.3d.hdf5"
@@ -37,4 +38,5 @@
     pf = data_dir_load(tb)
     yield assert_equal, str(pf), "data.0005.3d.hdf5"
     for test in small_patch_amr(tb, _fields):
+        test_tb.__name__ = test.description
         yield test

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(m7)
     yield assert_equal, str(pf), "moving7_0010"
     for test in small_patch_amr(m7, _fields):
+        test_moving7.__name__ = test.description
         yield test
 
 g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
@@ -37,4 +38,5 @@
     pf = data_dir_load(g30)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
+        test_galaxy0030.__name__ = test.description
         yield test

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
     for test in small_patch_amr(sloshing, _fields):
+        test_sloshing.__name__ = test.description
         yield test
 
 _fields_2d = ("Temperature", "Density")
@@ -39,4 +40,5 @@
     pf = data_dir_load(wt)
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
+        test_wind_tunnel.__name__ = test.description
         yield test

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -49,7 +49,6 @@
             ray = pf.h.ray(p1, p2)
             yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
     for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                yield FieldValuesTest(c5, field, ds)
+        for ds in dso:
+            yield FieldValuesTest(c5, field, ds)
 

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/orion/tests/test_outputs.py
--- a/yt/frontends/orion/tests/test_outputs.py
+++ b/yt/frontends/orion/tests/test_outputs.py
@@ -29,6 +29,7 @@
     pf = data_dir_load(radadvect)
     yield assert_equal, str(pf), "plt00000"
     for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
         yield test
 
 rt = "RadTube/plt00500"
@@ -37,4 +38,5 @@
     pf = data_dir_load(rt)
     yield assert_equal, str(pf), "plt00500"
     for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
         yield test

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -19,7 +19,8 @@
     requires_pf, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.artio.api import ARTIOStaticOutput
 
 _fields = ("Temperature", "Density", "VelocityMagnitude",
@@ -31,13 +32,15 @@
     pf = data_dir_load(output_00080)
     yield assert_equal, str(pf), "info_00080"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         output_00080, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        output_00080, field, ds)
-
+            yield FieldValuesTest(output_00080, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -436,7 +436,10 @@
 
         self._unit_base = unit_base or {}
         self._cosmology_parameters = cosmology_parameters
+        if parameter_file is not None:
+            parameter_file = os.path.abspath(parameter_file)
         self._param_file = parameter_file
+        filename = os.path.abspath(filename)
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -21,7 +21,8 @@
     big_patch_amr, \
     data_dir_load, \
     PixelizedProjectionValuesTest, \
-    FieldValuesTest
+    FieldValuesTest, \
+    create_obj
 from yt.frontends.sph.api import OWLSStaticOutput
 
 _fields = (("deposit", "all_density"), ("deposit", "all_count"),
@@ -40,13 +41,15 @@
     tot = sum(dd[ptype,"Coordinates"].shape[0]
               for ptype in pf.particle_types if ptype != "all")
     yield assert_equal, tot, (2*128*128*128)
-    for field in _fields:
-        for axis in [0, 1, 2]:
-            for ds in dso:
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
                         os33, axis, field, weight_field,
                         ds)
-                yield FieldValuesTest(
-                        os33, field, ds)
-
+            yield FieldValuesTest(os33, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/frontends/sph/tests/test_tipsy.py
--- /dev/null
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -0,0 +1,95 @@
+"""
+Tipsy tests using the AGORA dataset
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest, \
+    create_obj
+from yt.frontends.sph.api import TipsyStaticOutput
+
+_fields = (("deposit", "all_density"),
+           ("deposit", "all_count"),
+           ("deposit", "DarkMatter_density"),
+)
+
+pkdgrav = "halo1e11_run1.00400/halo1e11_run1.00400"
+ at requires_pf(pkdgrav, file_check = True)
+def test_pkdgrav():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(endian="<",
+                  field_dtypes = {"Coordinates": "d"},
+                  cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(pkdgrav, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "halo1e11_run1.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (26847360, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 26847360
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+            yield FieldValuesTest(pf, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2
+
+gasoline = "agora_1e11.00400/agora_1e11.00400"
+ at requires_pf(gasoline, file_check = True)
+def test_gasoline():
+    cosmology_parameters = dict(current_redshift = 0.0,
+                                omega_lambda = 0.728,
+                                omega_matter = 0.272,
+                                hubble_constant = 0.702)
+    kwargs = dict(cosmology_parameters = cosmology_parameters,
+                  unit_base = {'mpchcm': 1.0/60.0},
+                  n_ref = 64)
+    pf = data_dir_load(gasoline, TipsyStaticOutput, (), kwargs)
+    yield assert_equal, str(pf), "agora_1e11.00400"
+    dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape, (10550576, 3)
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, 10550576
+    for ds in dso:
+        for field in _fields:
+            for axis in [0, 1, 2]:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf, axis, field, weight_field,
+                        ds)
+            yield FieldValuesTest(pf, field, ds)
+        dobj = create_obj(pf, ds)
+        s1 = dobj["Ones"].sum()
+        s2 = sum(mask.sum() for block, mask in dobj.blocks)
+        yield assert_equal, s1, s2

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/gui/reason/extdirect_router.py
--- a/yt/gui/reason/extdirect_router.py
+++ b/yt/gui/reason/extdirect_router.py
@@ -9,6 +9,13 @@
 This code was released under the BSD License.
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import inspect
 
 class DirectException(Exception):
@@ -186,12 +193,4 @@
 
 
 
-"""
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -14,6 +14,7 @@
 
 import itertools as it
 import numpy as np
+import importlib
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
@@ -252,3 +253,23 @@
                     list_of_kwarg_dicts[i][key] = keywords[key][0]
 
     return list_of_kwarg_dicts
+
+def requires_module(module):
+    """
+    Decorator that takes a module name as an argument and tries to import it.
+    If the module imports without issue, the function is returned, but if not, 
+    a null function is returned. This is so tests that depend on certain modules
+    being imported will not fail if the module is not installed on the testing
+    platform.
+    """
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    try:
+        importlib.import_module(module)
+    except ImportError:
+        return ffalse
+    else:
+        return ftrue
+    

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -24,6 +24,7 @@
 import shelve
 import zlib
 import tempfile
+import glob
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -252,26 +253,34 @@
     yield
     os.chdir(oldcwd)
 
-def can_run_pf(pf_fn):
+def can_run_pf(pf_fn, file_check = False):
     if isinstance(pf_fn, StaticOutput):
         return AnswerTestingTest.result_storage is not None
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
     with temp_cwd(path):
+        if file_check:
+            return os.path.isfile(pf_fn) and \
+                AnswerTestingTest.result_storage is not None
         try:
             load(pf_fn)
         except YTOutputNotIdentified:
             return False
     return AnswerTestingTest.result_storage is not None
 
-def data_dir_load(pf_fn):
+def data_dir_load(pf_fn, cls = None, args = None, kwargs = None):
     path = ytcfg.get("yt", "test_data_dir")
     if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
     with temp_cwd(path):
-        pf = load(pf_fn)
+        if cls is None:
+            pf = load(pf_fn)
+        else:
+            args = args or ()
+            kwargs = kwargs or {}
+            pf = cls(pf_fn, *args, **kwargs)
         pf.h
         return pf
 
@@ -313,15 +322,6 @@
     def compare(self, new_result, old_result):
         raise RuntimeError
 
-    def create_obj(self, pf, obj_type):
-        # obj_type should be tuple of
-        #  ( obj_name, ( args ) )
-        if obj_type is None:
-            return pf.h.all_data()
-        cls = getattr(pf.h, obj_type[0])
-        obj = cls(*obj_type[1])
-        return obj
-
     def create_plot(self, pf, plot_type, plot_field, plot_axis, plot_kwargs = None):
         # plot_type should be a string
         # plot_args should be a tuple
@@ -377,7 +377,7 @@
         self.decimals = decimals
 
     def run(self):
-        obj = self.create_obj(self.pf, self.obj_type)
+        obj = create_obj(self.pf, self.obj_type)
         avg = obj.quantities["WeightedAverageQuantity"](self.field,
                              weight="Ones")
         (mi, ma), = obj.quantities["Extrema"](self.field)
@@ -404,7 +404,7 @@
         self.decimals = decimals
 
     def run(self):
-        obj = self.create_obj(self.pf, self.obj_type)
+        obj = create_obj(self.pf, self.obj_type)
         return obj[self.field]
 
     def compare(self, new_result, old_result):
@@ -431,7 +431,7 @@
 
     def run(self):
         if self.obj_type is not None:
-            obj = self.create_obj(self.pf, self.obj_type)
+            obj = create_obj(self.pf, self.obj_type)
         else:
             obj = None
         if self.pf.domain_dimensions[self.axis] == 1: return None
@@ -472,7 +472,7 @@
 
     def run(self):
         if self.obj_type is not None:
-            obj = self.create_obj(self.pf, self.obj_type)
+            obj = create_obj(self.pf, self.obj_type)
         else:
             obj = None
         proj = self.pf.h.proj(self.field, self.axis, 
@@ -577,6 +577,16 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+def compare_image_lists(new_result, old_result, decimals):
+    fns = ['old.png', 'new.png']
+    num_images = len(old_result)
+    assert(num_images > 0)
+    for i in xrange(num_images):
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
+        assert compare_images(fns[0], fns[1], 10**(decimals)) == None
+        for fn in fns: os.remove(fn)
+            
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
@@ -604,20 +614,80 @@
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):
-        fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
-        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
-        assert compare_images(fns[0], fns[1], 10**(-self.decimals)) == None
-        for fn in fns: os.remove(fn)
+        compare_image_lists(new_result, old_result, self.decimals)
 
-def requires_pf(pf_fn, big_data = False):
+class GenericArrayTest(AnswerTestingTest):
+    _type_name = "GenericArray"
+    _attrs = ('array_func_name','args','kwargs')
+    def __init__(self, pf_fn, array_func, args=None, kwargs=None, decimals=None):
+        super(GenericArrayTest, self).__init__(pf_fn)
+        self.array_func = array_func
+        self.array_func_name = array_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        return self.array_func(*args, **kwargs)
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
+
+class GenericImageTest(AnswerTestingTest):
+    _type_name = "GenericImage"
+    _attrs = ('image_func_name','args','kwargs')
+    def __init__(self, pf_fn, image_func, decimals, args=None, kwargs=None):
+        super(GenericImageTest, self).__init__(pf_fn)
+        self.image_func = image_func
+        self.image_func_name = image_func.func_name
+        self.args = args
+        self.kwargs = kwargs
+        self.decimals = decimals
+    def run(self):
+        if self.args is None:
+            args = []
+        else:
+            args = self.args
+        if self.kwargs is None:
+            kwargs = {}
+        else:
+            kwargs = self.kwargs
+        comp_imgs = []
+        tmpdir = tempfile.mkdtemp()
+        image_prefix = os.path.join(tmpdir,"test_img")
+        self.image_func(image_prefix, *args, **kwargs)
+        imgs = glob.glob(image_prefix+"*")
+        assert(len(imgs) > 0)
+        for img in imgs:
+            img_data = mpimg.imread(img)
+            os.remove(img)
+            comp_imgs.append(zlib.compress(img_data.dumps()))
+        return comp_imgs
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
+
+def requires_pf(pf_fn, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None
     def ftrue(func):
         return func
     if run_big_data == False and big_data == True:
         return ffalse
-    elif not can_run_pf(pf_fn):
+    elif not can_run_pf(pf_fn, file_check):
         return ffalse
     else:
         return ftrue
@@ -652,6 +722,15 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def create_obj(pf, obj_type):
+    # obj_type should be tuple of
+    #  ( obj_name, ( args ) )
+    if obj_type is None:
+        return pf.h.all_data()
+    cls = getattr(pf.h, obj_type[0])
+    obj = cls(*obj_type[1])
+    return obj
+
 class AssertWrapper(object):
     """
     Used to wrap a numpy testing assertion, in order to provide a useful name

diff -r 55bffcd057e21b219fdb8f1091b4ca66d8ec8591 -r 90e60994750b95e412851f6174d3087a0377b5af yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -84,6 +84,7 @@
 erg_per_keV = erg_per_eV * 1.0e3
 K_per_keV = erg_per_keV / boltzmann_constant_cgs
 keV_per_K = 1.0 / K_per_keV
+Tcmb = 2.726 # Current CMB temperature
 
 #Short cuts
 G = gravitational_constant_cgs

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list