[yt-svn] commit/yt: 63 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 16 09:37:59 PDT 2015


63 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/79d55b06077c/
Changeset:   79d55b06077c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-02-23 01:40:46+00:00
Summary:     Add max_t and zbuffer to walk_volume and image samplers.
Affected #:  2 files

diff -r 48082ac3407b75b95fe281f41df7b6ccab7a1ace -r 79d55b06077cd459b0402b7080400d4feec29829 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -59,7 +59,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = *,
-                     np.float64_t enter_t = *) nogil
+                     np.float64_t max_t = *) nogil
 
 cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
     return (i*vc.dims[1]+j)*vc.dims[2]+k

diff -r 48082ac3407b75b95fe281f41df7b6ccab7a1ace -r 79d55b06077cd459b0402b7080400d4feec29829 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -198,6 +198,7 @@
 
 cdef struct ImageContainer:
     np.float64_t *vp_pos, *vp_dir, *center, *image,
+    np.float64_t *zbuffer
     np.float64_t pdx, pdy, bounds[4]
     int nv[2]
     int vp_strides[3]
@@ -213,6 +214,7 @@
     cdef ImageContainer *image
     cdef sampler_function *sampler
     cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
     def __init__(self, 
@@ -227,6 +229,8 @@
                   *args, **kwargs):
         self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
         cdef ImageContainer *imagec = self.image
+        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        zbuffer = kwargs.pop("zbuffer", None)
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
@@ -237,12 +241,16 @@
         self.aimage = image
         self.ax_vec = x_vec
         self.ay_vec = y_vec
+        self.azbuffer = zbuffer
         imagec.vp_pos = <np.float64_t *> vp_pos.data
         imagec.vp_dir = <np.float64_t *> vp_dir.data
         imagec.center = <np.float64_t *> center.data
         imagec.image = <np.float64_t *> image.data
         imagec.x_vec = <np.float64_t *> x_vec.data
         imagec.y_vec = <np.float64_t *> y_vec.data
+        imagec.zbuffer = NULL
+        if zbuffer is not None:
+            imagec.zbuffer = <np.float64_t *> zbuffer.data
         imagec.nv[0] = image.shape[0]
         imagec.nv[1] = image.shape[1]
         for i in range(4): imagec.bounds[i] = bounds[i]
@@ -316,7 +324,7 @@
         cdef ImageContainer *im = self.image
         self.setup(pg)
         if self.sampler == NULL: raise RuntimeError
-        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
+        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4], max_t
         hit = 0
         cdef np.int64_t nx, ny, size
         if im.vd_strides[0] == -1:
@@ -356,8 +364,12 @@
                     v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
                     offset = im.im_strides[0] * vi + im.im_strides[1] * vj
                     for i in range(Nch): idata.rgba[i] = im.image[i + offset]
+                    if im.zbuffer != NULL:
+                        max_t = im.zbuffer[im.nv[0] * vi + vj]
+                    else:
+                        max_t = 1.0
                     walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata))
+                                (<void *> idata), NULL, max_t)
                     for i in range(Nch): im.image[i + offset] = idata.rgba[i]
                 free(idata)
                 free(v_pos)
@@ -376,8 +388,12 @@
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
                     for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
+                    if im.zbuffer != NULL:
+                        max_t = im.zbuffer[j]
+                    else:
+                        max_t = 1.0
                     walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata))
+                                (<void *> idata), NULL, max_t)
                     for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
                 free(v_dir)
                 free(idata)
@@ -467,9 +483,9 @@
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
-                  n_samples = 10):
+                  n_samples = 10, **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         # Now we handle tf_obj
         self.vra = <VolumeRenderAccumulator *> \
@@ -668,9 +684,9 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   tf_obj, n_samples = 10,
-                  star_list = None):
+                  star_list = None, **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         cdef np.ndarray[np.float64_t, ndim=1] temp
         # Now we handle tf_obj
@@ -740,9 +756,10 @@
                   np.ndarray[np.float64_t, ndim=1] width,
                   tf_obj, n_samples = 10,
                   light_dir=[1.,1.,1.],
-                  light_rgba=[1.,1.,1.,1.]):
+                  light_rgba=[1.,1.,1.,1.],
+                  **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         cdef np.ndarray[np.float64_t, ndim=1] temp
         # Now we handle tf_obj
@@ -912,12 +929,12 @@
                      sampler_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0) nogil:
+                     np.float64_t max_t = 1.0) nogil:
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.1
     cdef np.float64_t iv_dir[3]
     cdef np.float64_t tmax[3], tdelta[3]
-    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t dist, alpha, dt, exit_t, enter_t = -1.0
     cdef np.float64_t tr, tl, temp_x, temp_y, dv
     direction = -1
     if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
@@ -957,7 +974,7 @@
             direction = i
             intersect_t = tl
     if enter_t >= 0.0: intersect_t = enter_t 
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    if not ((0.0 <= intersect_t) and (intersect_t < max_t)): return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
@@ -1001,12 +1018,12 @@
                 i = 1
             else:
                 i = 2
-        exit_t = fmin(tmax[i], 1.0)
+        exit_t = fmin(tmax[i], max_t)
         sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
         cur_ind[i] += step[i]
         enter_t = tmax[i]
         tmax[i] += tdelta[i]
-        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i] or enter_t >= 1.0:
+        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i] or enter_t >= max_t:
             break
     if return_t != NULL: return_t[0] = exit_t
     return hit


https://bitbucket.org/yt_analysis/yt/commits/db97fb0b7dc0/
Changeset:   db97fb0b7dc0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-02-23 02:33:53+00:00
Summary:     Do front-to-back now instead of back-to-front.
Affected #:  1 file

diff -r 79d55b06077cd459b0402b7080400d4feec29829 -r db97fb0b7dc0ebdce4e01eee35019585f81eb61a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -583,7 +583,7 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.back_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
@@ -2165,8 +2165,8 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.front_center.ravel()])
+        args = (rotp, -self.box_vectors[2], self.front_center,
             (-self.width[0]/2., self.width[0]/2.,
              -self.width[1]/2., self.width[1]/2.),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],


https://bitbucket.org/yt_analysis/yt/commits/18177fadc5e9/
Changeset:   18177fadc5e9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-02-23 04:50:33+00:00
Summary:     Adding dummy zbuffer.
Affected #:  1 file

diff -r db97fb0b7dc0ebdce4e01eee35019585f81eb61a -r 18177fadc5e944aab2263e3a5c27249f15df1547 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -550,6 +550,7 @@
     star_trees = None
     def get_sampler(self, args):
         kwargs = {}
+        kwargs['zbuffer'] = np.ones((self.resolution[0], self.resolution[1]))
         if self.star_trees is not None:
             kwargs = {'star_list': self.star_trees}
         if self.use_light:


https://bitbucket.org/yt_analysis/yt/commits/51c539404363/
Changeset:   51c539404363
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-19 21:51:13+00:00
Summary:     Fixing up the FisheyeCamera on experimental bookmark.
Affected #:  2 files

diff -r 794514da3f86f4792efff526fdeeb9556e53b0b5 -r 51c539404363e2fc376c68b5ffd3d8c5f381006a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1383,6 +1383,7 @@
         self.center = np.array(center, dtype='float64')
         self.radius = radius
         self.fov = fov
+        self.no_ghost = no_ghost
         if iterable(resolution):
             raise RuntimeError("Resolution must be a single int")
         self.resolution = resolution
@@ -1392,12 +1393,17 @@
         if fields is None: fields = ["density"]
         self.fields = fields
         self.sub_samples = sub_samples
+        dd = self.pf.h.all_data()
+        efields = dd._determine_fields(self.fields)
         self.log_fields = log_fields
+        if self.log_fields is None:
+            self.log_fields = [self.pf._get_field_info(*f).take_log for f in efields]
         if volume is None:
-            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields)
+            volume = AMRKDTree(self.pf)
         self.volume = volume
 
+        self.initialize_source()
+
     def get_information(self):
         return {}
 

diff -r 794514da3f86f4792efff526fdeeb9556e53b0b5 -r 51c539404363e2fc376c68b5ffd3d8c5f381006a yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -22,7 +22,8 @@
 import numpy as np
 from yt.mods import ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.volume_rendering.api import \
-    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera
+    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
+    FisheyeCamera
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase
 
@@ -156,3 +157,10 @@
             snap
         cam.snapshot('final.png')
         assert_fname('final.png')
+
+    def test_fisheye(self):
+        pf = self.pf
+        tf = self.setup_transfer_function('camera')
+        cam = FisheyeCamera(pf.domain_center, pf.domain_width[0],
+                            360.0, 256, transfer_function=tf, pf=pf)
+        cam.snapshot('fisheye.png')


https://bitbucket.org/yt_analysis/yt/commits/4247473adb0f/
Changeset:   4247473adb0f
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-23 22:11:31+00:00
Summary:     Pulling the camera apart.  Separating into RenderSource, Engine, Scene.
Affected #:  5 files

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c4cda112cf21/
Changeset:   c4cda112cf21
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:08:22+00:00
Summary:     Disabling a bunch of cameras, and off axis projections, for now.
Affected #:  4 files

diff -r 4247473adb0f1191e0f04a57cdc72cc65b4226ab -r c4cda112cf2127c28a0da6c20d4aea2124db1d97 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -161,7 +161,11 @@
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera, off_axis_projection, MosaicFisheyeCamera
+    HomogenizedVolume, Camera
+
+# Not Currently Supported
+# from yt.visualization.volume_rendering.api import \
+#     off_axis_projection, MosaicFisheyeCamera
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects

diff -r 4247473adb0f1191e0f04a57cdc72cc65b4226ab -r c4cda112cf2127c28a0da6c20d4aea2124db1d97 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -18,7 +18,7 @@
     x_dict, \
     y_dict, \
     axis_names
-from .volume_rendering.api import off_axis_projection
+#from .volume_rendering.api import off_axis_projection
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
@@ -373,11 +373,12 @@
         width = self.pf.arr((self.bounds[1] - self.bounds[0],
                              self.bounds[3] - self.bounds[2],
                              self.bounds[5] - self.bounds[4]))
-        buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
-                                   width, ds.resolution, item,
-                                   weight=ds.weight_field, volume=ds.volume,
-                                   no_ghost=ds.no_ghost, interpolated=ds.interpolated,
-                                   north_vector=ds.north_vector)
+        raise NotImplementedError
+        # buff = off_axis_projection(ds.pf, ds.center, ds.normal_vector,
+        #                            width, ds.resolution, item,
+        #                            weight=ds.weight_field, volume=ds.volume,
+        #                            no_ghost=ds.no_ghost, interpolated=ds.interpolated,
+        #                            north_vector=ds.north_vector)
         ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
         self[item] = ia
         return ia 

diff -r 4247473adb0f1191e0f04a57cdc72cc65b4226ab -r c4cda112cf2127c28a0da6c20d4aea2124db1d97 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -23,6 +23,4 @@
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
 
-from camera import Camera, PerspectiveCamera, StereoPairCamera, \
-    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
-    HEALpixCamera, InteractiveCamera, ProjectionCamera
+from camera import Camera

diff -r 4247473adb0f1191e0f04a57cdc72cc65b4226ab -r c4cda112cf2127c28a0da6c20d4aea2124db1d97 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -13,21 +13,52 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.funcs import iterable
 from yt.utilities.orientation import Orientation
-from yt.utilities.parallel_tools.parallel_analysis_interface \
-    import ParallelAnalysisInterface
+from yt.units.yt_array import YTArray
+import numpy as np
 
 
-class Camera(ParallelAnalysisInterface):
+class Camera(Orientation):
 
     r"""    """
 
+    def __init__(self, data_source):
+        """Initialize a Camera Instance"""
+        self.data_source = data_source
+        self.position = data_source.pf.domain_right_edge
+        self.north_vector = np.array([0.0, 0.0, 1.0])
+        self.resolution = (512, 512)
 
-    def __init__(self):
-        """Initialize a Camera Instance"""
-        ParallelAnalysisInterface.__init__(self)
-        self.position = None
-        self.focus = None
-        self.orientation = None
+        width = data_source.pf.domain_width.max()
+        focus = data_source.pf.domain_center
+
+        if iterable(width) and len(width) > 1 and isinstance(width[1], str):
+            width = self.pf.quan(width[0], input_units=width[1])
+            # Now convert back to code length for subsequent manipulation
+            width = width.in_units("code_length").value
+        if not iterable(width):
+            width = (width, width, width)  # left/right, top/bottom, front/back
+        if not isinstance(width, YTArray):
+            width = self.data_source.pf.arr(width, input_units="code_length")
+        if not isinstance(focus, YTArray):
+            focus = self.pf.arr(focus, input_units="code_length")
+        self.width = width
+        self.focus = focus
+
+        super(Camera, self).__init__(self.focus - self.position,
+                                     self.north_vector, steady_north=True)
+
+        self._setup_box_properties()
         self.light = None
 
+    def _setup_box_properties(self):
+        unit_vectors = self.unit_vectors
+        width = self.width
+        center = self.focus
+        self.box_vectors = YTArray([unit_vectors[0] * width[0],
+                                    unit_vectors[1] * width[1],
+                                    unit_vectors[2] * width[2]])
+        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
+        self.back_center = center - 0.5 * width[2] * unit_vectors[2]
+        self.front_center = center + 0.5 * width[2] * unit_vectors[2]


https://bitbucket.org/yt_analysis/yt/commits/95408d0da65b/
Changeset:   95408d0da65b
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:08:45+00:00
Summary:     Orientation needs to inherit from object
Affected #:  1 file

diff -r c4cda112cf2127c28a0da6c20d4aea2124db1d97 -r 95408d0da65ba2c1df5556cbd35c742f3133517c yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -19,7 +19,7 @@
 from yt.funcs import *
 from yt.utilities.math_utils import get_rotation_matrix
 
-class Orientation:
+class Orientation(object):
     def __init__(self, normal_vector, north_vector=None, steady_north=False):
         r"""An object that returns a set of basis vectors for orienting
         cameras a data containers.


https://bitbucket.org/yt_analysis/yt/commits/67fc4de2dd5c/
Changeset:   67fc4de2dd5c
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:09:23+00:00
Summary:     Fix up the render engine.
Affected #:  1 file

diff -r 95408d0da65ba2c1df5556cbd35c742f3133517c -r 67fc4de2dd5cf6cae4f6c5325f711bb646d63f75 yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ b/yt/visualization/volume_rendering/engine.py
@@ -36,46 +36,51 @@
     def __init__(self, scene, render_source):
         super(PlaneParallelEngine, self).__init__()
         self.scene = scene
+        self.camera = scene.camera
         self.render_source = render_source
-        self.sampler = self.get_sampler()
+        self.transfer_function = self.render_source.transfer_function
         self.sub_samples = 5
-        self.transfer_function = self.render_sourcce.transfer_function
         self.num_threads = 1
         self.double_check = False
+        self.sampler = self.get_sampler()
 
     def get_sampler(self):
         image = self.render_source.current_image
-        rotp = np.concatenate([self.scene.camera.orienter.inv_mat.ravel('F'),
+        rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
                                self.scene.camera.back_center.ravel()])
         args = (rotp, self.camera.box_vectors[2], self.camera.back_center,
                 (-self.camera.width[0] / 2.0, self.camera.width[0] / 2.0,
                  -self.camera.width[1] / 2.0, self.camera.width[1] / 2.0),
-                image, self.camera.orienter.unit_vectors[
-                    0], self.camera.orienter.unit_vectors[1],
+                image, self.camera.unit_vectors[
+                    0], self.camera.unit_vectors[1],
                 np.array(self.camera.width, dtype='float64'),
-                self.transfer_function, self.camera.sub_samples)
+                self.transfer_function, self.sub_samples)
         sampler = VolumeRenderSampler(*args)
         return sampler
 
     def run(self):
-        pbar = get_pbar("Ray casting",
-                        (self.render_source.volume.brick_dimensions
-                         + 1).prod(axis=-1).sum())
         total_cells = 0
         if self.double_check:
-            for brick in self.volume.bricks:
+            for brick in self.render_source.volume.bricks:
                 for data in brick.my_data:
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
         view_pos = self.camera.front_center + \
-            self.camera.orienter.unit_vectors[2] * 1.0e6 * self.camera.width[2]
-        for brick in self.volume.traverse(view_pos):
+            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
+        for brick in self.render_source.volume.traverse(view_pos):
             self.sampler(brick, num_threads=self.num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
 
-        pbar.finish()
         self.render_source.current_image = \
             self.finalize_image(self.sampler.aimage)
         return
+
+    def finalize_image(self, image):
+        cam = self.scene.camera
+        view_pos = cam.front_center + cam.unit_vectors[2] * \
+            1.0e6 * cam.width[2]
+        image = self.render_source.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image


https://bitbucket.org/yt_analysis/yt/commits/adde1f002bba/
Changeset:   adde1f002bba
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:09:50+00:00
Summary:     Render source should set up good defaults now.
Affected #:  1 file

diff -r 67fc4de2dd5cf6cae4f6c5325f711bb646d63f75 -r adde1f002bbacdc4ffa15f3ee86d9b1e22aa1583 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -13,10 +13,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.data_objects.api import ImageArray
+import numpy as np
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.amr_kdtree.api import AMRKDTree
+from transfer_function_helper import TransferFunctionHelper
+from engine import PlaneParallelEngine
 
 
 class RenderSource(ParallelAnalysisInterface):
@@ -37,10 +39,19 @@
 
     """docstring for VolumeSource"""
 
-    def __init__(self, data_source):
+    def __init__(self, scene, data_source, field=None):
         super(VolumeSource, self).__init__()
+        self.scene = scene
         self.data_source = data_source
         self.volume = None
+        self.field = field
+        self.current_image = None
+        self.engine = None
+        self.setup()
+
+        # In the future these will merge
+        self.transfer_function = None
+        self.tfh = None
         self.setup()
 
     def validate(self):
@@ -51,9 +62,31 @@
         if self.volume is None:
             raise RuntimeError("Volume not initialized")
 
+        if self.transfer_function is None:
+            raise RuntimeError("Transfer Function not Supplied")
+
     def setup(self):
         """setup VolumeSource"""
-        self.volume = AMRKDTree(self.data_source)
+        self.current_image = self.new_image()
+        self.tfh = \
+            TransferFunctionHelper(self.data_source.pf)
+        self.tfh.set_field(self.field)
+        self.tfh.set_bounds([0.0, 1.0])
+        self.tfh.set_log(False)
+        self.tfh.build_transfer_function()
+        self.tfh.setup_default()
+        self.transfer_function = self.tfh.tf
+        self.engine = PlaneParallelEngine(self.scene, self)
+        self.volume = AMRKDTree(self.data_source.pf,
+                                data_source=self.data_source)
+        self.volume.initialize_source([self.field], [False], True)
+
+    def new_image(self):
+        cam = self.scene.camera
+        image = np.zeros((cam.resolution[0],
+                          cam.resolution[1], 4),
+                         dtype='float64', order='C')
+        return image
 
     def teardown(self):
         """docstring for teardown"""
@@ -65,25 +98,5 @@
 
     def request(self, *args, **kwargs):
         """docstring for request"""
-        pass
-
-    def _render(self, double_check, num_threads, image, sampler):
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
-        total_cells = 0
-        if double_check:
-            for brick in self.volume.bricks:
-                for data in brick.my_data:
-                    if np.any(np.isnan(data)):
-                        raise RuntimeError
-
-        for brick in self.volume.traverse(self.front_center):
-            sampler(brick, num_threads=num_threads)
-            total_cells += np.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-
-        pbar.finish()
-        image = self.finalize_image(sampler.aimage)
-        return image
-
-
+        self.engine.run()
+        return self.current_image


https://bitbucket.org/yt_analysis/yt/commits/97961e1059f9/
Changeset:   97961e1059f9
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:10:37+00:00
Summary:     Not quite the api that is right, but returning all the images is the first step before compositing.
Affected #:  1 file

diff -r adde1f002bbacdc4ffa15f3ee86d9b1e22aa1583 -r 97961e1059f9ba265d16f97151a0a24faf932130 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -12,7 +12,10 @@
 #-----------------------------------------------------------------------------
 
 
+from yt.funcs import mylog
 from yt.data_objects.static_output import Dataset
+from camera import Camera
+from render_source import VolumeSource
 
 
 class Scene(object):
@@ -26,6 +29,7 @@
         self.datasets = []
         self.camera = None
         self.sources = {}
+        self.camera_path = None
 
     def request(self):
         pass
@@ -57,21 +61,37 @@
 
     """docstring for RenderScene"""
 
-    def __init__(self, data_source=None,):
+    def __init__(self, data_source=None, field=None):
         super(RenderScene, self).__init__()
         if isinstance(data_source, Dataset):
             self.ds = data_source
-            data_source = ds.all_data()
+            data_source = data_source.all_data()
         else:
             self.ds = data_source.pf
+
+        print 'DATA SOURCE: ', data_source
         self.data_source = data_source
+        self.camera = Camera(data_source)
+        self.field = field
+        self.render_sources = {}
+        self.default_setup()
 
-        self.camera = Camera(data_source)
-        self.render_sources = {}
-        self.camera_path = CameraPath()
+    def default_setup(self):
+        """docstring for default_setup"""
+        if self.field is None:
+            self.ds.field_list
+            self.field = self.ds.field_list[0]
+            print 'WHAT FIELD AM I: ', self.field
+            mylog.info('Setting default field to %s' % self.field.__repr__())
 
         if self.data_source:
-            self.render_sources['vr1'] = VolumeSource(self.data_source)
+            self.render_sources['vr1'] = \
+                VolumeSource(self, self.data_source, self.field)
 
+    def render(self):
+        ims = {}
+        for k, v in self.render_sources.iteritems():
+            print 'Running', k, v
+            ims[k] = v.request()
 
-
+        return ims


https://bitbucket.org/yt_analysis/yt/commits/f7f5c243d5e1/
Changeset:   f7f5c243d5e1
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 05:10:54+00:00
Summary:     Add a default setup for the transfer function helper
Affected #:  1 file

diff -r 97961e1059f9ba265d16f97151a0a24faf932130 -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -65,7 +65,7 @@
             in the dataset.  This can be slow for very large datasets.
         """
         if bounds is None:
-            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)
         self.bounds = bounds
 
         # Do some error checking.
@@ -130,6 +130,12 @@
                                         nbins=512)
         return self.tf
 
+    def setup_default(self):
+        """docstring for setup_default"""
+        mi, ma = self.bounds
+        print 'I AM MAPPING BETWEEN', mi, ma
+        self.tf.map_to_colormap(mi, ma, scale=10.0, colormap='RdBu_r')
+
     def plot(self, fn=None, profile_field=None, profile_weight=None):
         """
         Save the current transfer function to a bitmap, or display


https://bitbucket.org/yt_analysis/yt/commits/6e98dcf56473/
Changeset:   6e98dcf56473
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 22:07:48+00:00
Summary:     Adding lots of validation, moving pieces between engine and camera, not sure where they should end up.
Affected #:  7 files

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/frontends/halo_catalogs/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/data_structures.py
@@ -52,8 +52,11 @@
     _suffix = ".h5"
 
     def __init__(self, filename, dataset_type="halocatalog_hdf5",
-                 n_ref = 16, over_refine_factor = 1):
+                 n_ref = 16, over_refine_factor = 1, catalog=None):
         self.n_ref = n_ref
+        if catalog is None:
+            datalog = whatever_default
+        self.catalog = catalog
         self.over_refine_factor = over_refine_factor
         super(HaloCatalogDataset, self).__init__(filename, dataset_type)
 

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -60,7 +60,7 @@
         self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
         if self.north_vector is None:
-            self.north_vector = self.unit_vectors[1] 
+            self.north_vector = self.unit_vectors[1]
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -23,12 +23,27 @@
 
     r"""    """
 
-    def __init__(self, data_source):
+    _moved = True
+
+    def __init__(self, data_source=None):
         """Initialize a Camera Instance"""
         self.data_source = data_source
+        self.position = None
+        self.north_vector = np.array([0.0, 0.0, 1.0])
+        self.resolution = (256, 256)
+        self.light = None
+        self.width = None
+        self.focus = np.zeros(3)
+        self.position = np.ones(3)
+        if data_source is not None:
+            self.inherit_default_from_data_source()
+        else:
+            super(Camera, self).__init__(self.focus - self.position,
+                                         self.north_vector, steady_north=True)
+
+    def inherit_default_from_data_source(self):
+        data_source = self.data_source
         self.position = data_source.pf.domain_right_edge
-        self.north_vector = np.array([0.0, 0.0, 1.0])
-        self.resolution = (512, 512)
 
         width = data_source.pf.domain_width.max()
         focus = data_source.pf.domain_center
@@ -43,22 +58,11 @@
             width = self.data_source.pf.arr(width, input_units="code_length")
         if not isinstance(focus, YTArray):
             focus = self.pf.arr(focus, input_units="code_length")
+
         self.width = width
         self.focus = focus
 
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=True)
+        self._moved = True
 
-        self._setup_box_properties()
-        self.light = None
-
-    def _setup_box_properties(self):
-        unit_vectors = self.unit_vectors
-        width = self.width
-        center = self.focus
-        self.box_vectors = YTArray([unit_vectors[0] * width[0],
-                                    unit_vectors[1] * width[1],
-                                    unit_vectors[2] * width[2]])
-        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
-        self.back_center = center - 0.5 * width[2] * unit_vectors[2]
-        self.front_center = center + 0.5 * width[2] * unit_vectors[2]

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ b/yt/visualization/volume_rendering/engine.py
@@ -13,11 +13,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import get_pbar
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import \
     VolumeRenderSampler
+from yt.units.yt_array import YTArray
 import numpy as np
 
 
@@ -28,6 +28,10 @@
     def __init__(self, ):
         super(Engine, self).__init__()
 
+    def camera_updated(self):
+        """docstring for update_camera"""
+        pass
+
 
 class PlaneParallelEngine(Engine):
 
@@ -42,13 +46,40 @@
         self.sub_samples = 5
         self.num_threads = 1
         self.double_check = False
+        self.box_vectors = None
+        self.origin = None
+        self.back_center = None
+        self.front_center = None
+
+        if scene.camera:
+            self._setup_box_properties()
         self.sampler = self.get_sampler()
 
+    def set_camera(self, camera):
+        """set the camera for this engine"""
+        self.camera = camera
+
+    def camera_updated(self):
+        if self.camera._moved:
+            self._setup_box_properties()
+
+    def _setup_box_properties(self):
+        unit_vectors = self.camera.unit_vectors
+        width = self.camera.width
+        center = self.camera.focus
+        self.box_vectors = YTArray([unit_vectors[0] * width[0],
+                                    unit_vectors[1] * width[1],
+                                    unit_vectors[2] * width[2]])
+        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
+        self.back_center = center - 0.5 * width[2] * unit_vectors[2]
+        self.front_center = center + 0.5 * width[2] * unit_vectors[2]
+
     def get_sampler(self):
+        self.render_source.prepare()
         image = self.render_source.current_image
         rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
-                               self.scene.camera.back_center.ravel()])
-        args = (rotp, self.camera.box_vectors[2], self.camera.back_center,
+                               self.back_center.ravel()])
+        args = (rotp, self.box_vectors[2], self.back_center,
                 (-self.camera.width[0] / 2.0, self.camera.width[0] / 2.0,
                  -self.camera.width[1] / 2.0, self.camera.width[1] / 2.0),
                 image, self.camera.unit_vectors[
@@ -66,7 +97,7 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.camera.front_center + \
+        view_pos = self.front_center + \
             self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
         for brick in self.render_source.volume.traverse(view_pos):
             self.sampler(brick, num_threads=self.num_threads)
@@ -78,7 +109,7 @@
 
     def finalize_image(self, image):
         cam = self.scene.camera
-        view_pos = cam.front_center + cam.unit_vectors[2] * \
+        view_pos = self.front_center + cam.unit_vectors[2] * \
             1.0e6 * cam.width[2]
         image = self.render_source.volume.reduce_tree_images(image, view_pos)
         if self.transfer_function.grey_opacity is False:

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -19,6 +19,7 @@
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from transfer_function_helper import TransferFunctionHelper
 from engine import PlaneParallelEngine
+from camera import Camera
 
 
 class RenderSource(ParallelAnalysisInterface):
@@ -28,65 +29,104 @@
 
     def __init__(self):
         super(RenderSource, self).__init__()
-        pass
+        self.opaque = False
 
     def request(self, *args, **kwargs):
         """returns a new ImageArray"""
         pass
 
+    def setup(self):
+        """Set up data needed to render"""
+        pass
+
+
+class OpaqueSource(RenderSource):
+    """docstring for OpaqueSource"""
+    def __init__(self):
+        super(OpaqueSource, self).__init__()
+        self.opaque = True
+
 
 class VolumeSource(RenderSource):
 
     """docstring for VolumeSource"""
 
-    def __init__(self, scene, data_source, field=None):
+    def __init__(self, data_source, field=None):
         super(VolumeSource, self).__init__()
-        self.scene = scene
         self.data_source = data_source
+        self.field = field
+        self.scene = None
         self.volume = None
-        self.field = field
         self.current_image = None
         self.engine = None
-        self.setup()
 
         # In the future these will merge
         self.transfer_function = None
         self.tfh = None
-        self.setup()
+        self.build_default_volume()
+
+    def build_defaults(self):
+        if self.data_source is not None:
+            self.build_default_transfer_function()
+            self.build_default_engine()
 
     def validate(self):
-        """docstring for validate"""
+        """Make sure that all dependencies have been met"""
+        if self.scene is None:
+            raise RuntimeError("Scene not initialized")
+
         if self.data_source is None:
             raise RuntimeError("Data source not initialized")
 
         if self.volume is None:
             raise RuntimeError("Volume not initialized")
 
+        if self.engine is None:
+            raise RuntimeError("Engine not initialized")
+
         if self.transfer_function is None:
             raise RuntimeError("Transfer Function not Supplied")
+        self.setup()
 
-    def setup(self):
-        """setup VolumeSource"""
-        self.current_image = self.new_image()
+    def build_default_transfer_function(self):
         self.tfh = \
             TransferFunctionHelper(self.data_source.pf)
         self.tfh.set_field(self.field)
-        self.tfh.set_bounds([0.0, 1.0])
-        self.tfh.set_log(False)
         self.tfh.build_transfer_function()
         self.tfh.setup_default()
         self.transfer_function = self.tfh.tf
+
+    def build_default_engine(self):
         self.engine = PlaneParallelEngine(self.scene, self)
+
+    def build_default_volume(self):
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
-        self.volume.initialize_source([self.field], [False], True)
+        log_fields = [self.data_source.pf.field_info[self.field].take_log]
+        self.volume.set_fields([self.field], log_fields, False)
+
+    def set_scene(self, scene):
+        self.scene = scene
+        if self.engine is not None:
+            self.engine.set_camera(scene.camera)
+
+    def set_camera(self, camera):
+        """Set camera in this object, as well as any attributes"""
+        self.engine.set_camera(camera)
+
+    def prepare(self):
+        """prepare for rendering"""
+        self.scene.validate()
+        self.new_image()
 
     def new_image(self):
         cam = self.scene.camera
-        image = np.zeros((cam.resolution[0],
-                          cam.resolution[1], 4),
-                         dtype='float64', order='C')
-        return image
+        if cam is None:
+            cam = Camera(self.data_source)
+            self.scene.camera = cam
+        self.current_image = np.zeros((cam.resolution[0], cam.resolution[1],
+                                       4), dtype='float64', order='C')
+        return self.current_image
 
     def teardown(self):
         """docstring for teardown"""

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -15,7 +15,7 @@
 from yt.funcs import mylog
 from yt.data_objects.static_output import Dataset
 from camera import Camera
-from render_source import VolumeSource
+from render_source import VolumeSource, OpaqueSource
 
 
 class Scene(object):
@@ -31,9 +31,50 @@
         self.sources = {}
         self.camera_path = None
 
+    def set_camera(self, camera):
+        self.camera = camera
+
+        for source in self.sources.values():
+            source.set_camera(self.camera)
+
+    def setup_camera_links(self):
+        """
+        The camera object needs to be linked to:
+            * Engines
+            * Render Sources
+        """
+        if self.camera is None:
+            raise RuntimeError("Camera does not exist")
+
+        for source in self.sources.values():
+            source.set_camera(self.camera)
+
+    def iter_opaque_sources(self):
+        """
+        Iterate over opaque RenderSource objects,
+        returning a tuple of (key, source)
+        """
+        for k, source in self.sources.iteritems():
+            if isinstance(source, OpaqueSource):
+                yield k, source
+
+    def validate(self):
+        if self.camera is None:
+            for k, source in self.sources.iteritems():
+                try:
+                    self.camera = Camera(source.data_source)
+                    return
+                except:
+                    pass
+                raise RuntimeError("Couldn't build default camera")
+        return
+
     def request(self):
         pass
 
+    def composite(self):
+        pass
+
     @property
     def current(self):
         if self._current is None:
@@ -44,17 +85,29 @@
         """Add a dataset to the scene"""
         self.datasets.append(ds)
 
-    def add_volume_rendering(self):
-        """docstring for add_volume_rendering"""
-        pass
+    def add_source(self, render_source, keyname=None):
+        """
+        Add a render source to the scene.  This will autodetect the
+        type of source.
+        """
+        if keyname is None:
+            keyname = 'source_%02i' % len(self.sources)
 
-    def add_slice(self):
-        """docstring for add_slice"""
-        pass
+        render_source.set_scene(self)
 
-    def add_streamlines(self):
-        """docstring for add_streamlines"""
-        pass
+        self.sources[keyname] = render_source
+
+        return self
+
+    def render(self):
+        self.validate()
+        ims = {}
+        for k, v in self.sources.iteritems():
+            v.validate()
+            print 'Running', k, v
+            ims[k] = v.request()
+
+        return ims
 
 
 class RenderScene(Scene):
@@ -69,7 +122,6 @@
         else:
             self.ds = data_source.pf
 
-        print 'DATA SOURCE: ', data_source
         self.data_source = data_source
         self.camera = Camera(data_source)
         self.field = field
@@ -81,17 +133,11 @@
         if self.field is None:
             self.ds.field_list
             self.field = self.ds.field_list[0]
-            print 'WHAT FIELD AM I: ', self.field
             mylog.info('Setting default field to %s' % self.field.__repr__())
 
         if self.data_source:
-            self.render_sources['vr1'] = \
-                VolumeSource(self, self.data_source, self.field)
+            render_source = VolumeSource(self.data_source, self.field)
+            self.add_source(render_source)
+            render_source.build_defaults()
 
-    def render(self):
-        ims = {}
-        for k, v in self.render_sources.iteritems():
-            print 'Running', k, v
-            ims[k] = v.request()
 
-        return ims

diff -r f7f5c243d5e1ad1ac8d0ab719f956747b9b0a5f3 -r 6e98dcf564739c112ffa92393dd561f1264dd34b yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -125,6 +125,7 @@
             mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
         else:
             mi, ma = self.bounds
+
         self.tf = ColorTransferFunction((mi, ma),
                                         grey_opacity=self.grey_opacity,
                                         nbins=512)


https://bitbucket.org/yt_analysis/yt/commits/abbfa770503a/
Changeset:   abbfa770503a
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 22:11:45+00:00
Summary:     Merging in from tip
Affected #:  52 files

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -36,6 +36,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
+yt/utilities/lib/origami.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c CITATION
--- a/CITATION
+++ b/CITATION
@@ -29,3 +29,28 @@
    adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
   adsnote = {Provided by the SAO/NASA Astrophysics Data System}
 }
+
+Using yt can also utilize other functionality.  If you utilize ORIGAMI, we ask
+that you please cite the ORIGAMI paper:
+
+ at ARTICLE{2012ApJ...754..126F,
+   author = {{Falck}, B.~L. and {Neyrinck}, M.~C. and {Szalay}, A.~S.},
+    title = "{ORIGAMI: Delineating Halos Using Phase-space Folds}",
+  journal = {\apj},
+archivePrefix = "arXiv",
+   eprint = {1201.2353},
+ primaryClass = "astro-ph.CO",
+ keywords = {dark matter, galaxies: halos, large-scale structure of universe, methods: numerical},
+     year = 2012,
+    month = aug,
+   volume = 754,
+      eid = {126},
+    pages = {126},
+      doi = {10.1088/0004-637X/754/2/126},
+   adsurl = {http://adsabs.harvard.edu/abs/2012ApJ...754..126F},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+The main homepage for ORIGAMI can be found here:
+
+http://icg.port.ac.uk/~falckb/origami.html

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -138,7 +138,7 @@
     # Create evaluated version and save it to the dest path.
     # Always use --pylab so figures appear inline
     # perhaps this is questionable?
-    nb_runner = NotebookRunner(nb_in=nb_path, pylab=True)
+    nb_runner = NotebookRunner(nb_path, pylab=False)
     nb_runner.run_notebook(skip_exceptions=skip_exceptions)
     if dest_path is None:
         dest_path = 'temp_evaluated.ipynb'

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ b/doc/source/analyzing/creating_derived_fields.rst
@@ -295,8 +295,6 @@
      (*Advanced*) Should this field appear in the dropdown box in Reason?
    ``not_in_all``
      (*Advanced*) If this is *True*, the field may not be in all the grids.
-   ``projection_conversion``
-     (*Advanced*) Which unit should we multiply by in a projection?
 
 How Do Units Work?
 ------------------

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -40,8 +40,7 @@
 
    add_enzo_field("Cooling_Time", units=r"\rm{s}",
                   function=NullFunc,
-                  validators=ValidateDataField("Cooling_Time"),
-                  projection_conversion="1")
+                  validators=ValidateDataField("Cooling_Time"))
 
 Note that we used the ``NullFunc`` function here.  To add a derived field,
 which is not expected to necessarily exist on disk, use the standard

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/examining/index.rst
--- a/doc/source/examining/index.rst
+++ b/doc/source/examining/index.rst
@@ -6,6 +6,6 @@
 .. toctree::
    :maxdepth: 2
 
-   supported_frontends_data
+   loading_data
    generic_array_data
    low_level_inspection

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/examining/loading_data.rst
--- /dev/null
+++ b/doc/source/examining/loading_data.rst
@@ -0,0 +1,601 @@
+.. _loading-data:
+
+Loading Data
+============
+
+This section contains information on how to load data into ``yt``, as well as
+some important caveats about different data formats.
+
+.. _loading-enzo-data:
+
+Enzo Data
+---------
+
+Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
+dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
+it the parameter file name.  This would be the name of the output file, and it
+contains no extension.  For instance, if you have the following files:
+
+.. code-block:: none
+
+   DD0010/
+   DD0010/data0010
+   DD0010/data0010.index
+   DD0010/data0010.cpu0000
+   DD0010/data0010.cpu0001
+   DD0010/data0010.cpu0002
+   DD0010/data0010.cpu0003
+
+You would feed the ``load`` command the filename ``DD0010/data0010`` as
+mentioned.
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("DD0010/data0010")
+
+.. rubric:: Caveats
+
+* There are no major caveats for Enzo usage
+* Units should be correct, if you utilize standard unit-setting routines.  yt
+  will notify you if it cannot determine the units, although this
+  notification will be passive.
+* 2D and 1D data are supported, but the extraneous dimensions are set to be
+  of length 1.0 in "code length" which may produce strange results for volume
+  quantities.
+
+.. _loading-orion-data:
+
+Boxlib Data
+-----------
+
+yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
+Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
+Malone, and Matthew Turk.
+
+To load a Boxlib dataset, you can use the ``load`` command provided by
+``yt.mods`` and supply to it the directory file name.  **You must also have the
+``inputs`` file in the base directory.**  For instance, if you were in a
+directory with the following files:
+
+.. code-block:: none
+
+   inputs
+   pltgmlcs5600/
+   pltgmlcs5600/Header
+   pltgmlcs5600/Level_0
+   pltgmlcs5600/Level_0/Cell_H
+   pltgmlcs5600/Level_1
+   pltgmlcs5600/Level_1/Cell_H
+   pltgmlcs5600/Level_2
+   pltgmlcs5600/Level_2/Cell_H
+   pltgmlcs5600/Level_3
+   pltgmlcs5600/Level_3/Cell_H
+   pltgmlcs5600/Level_4
+   pltgmlcs5600/Level_4/Cell_H
+
+You would feed it the filename ``pltgmlcs5600``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("pltgmlcs5600")
+
+.. _loading-flash-data:
+
+FLASH Data
+----------
+
+FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
+FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
+supply to it the file name of a plot file or checkpoint file, but particle
+files are not currently directly loadable by themselves, due to the fact that
+they typically lack grid information. For instance, if you were in a directory
+with the following files:
+
+.. code-block:: none
+
+   cosmoSim_coolhdf5_chk_0026
+
+You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("cosmoSim_coolhdf5_chk_0026")
+
+If you have a FLASH particle file that was created at the same time as
+a plotfile or checkpoint file (therefore having particle data
+consistent with the grid structure of the latter), its data may be loaded with the
+``particle_filename`` optional argument:
+
+.. code-block:: python
+
+    from yt.mods import *
+    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
+
+.. rubric:: Caveats
+
+* Please be careful that the units are correctly utilized; yt assumes cgs.
+
+.. _loading-ramses-data:
+
+RAMSES Data
+-----------
+
+In yt-3.0, RAMSES data is fully supported.  If you are interested in taking a
+development or stewardship role, please contact the yt-dev mailing list.  To
+load a RAMSES dataset, you can use the ``load`` command provided by ``yt.mods``
+and supply to it the ``info*.txt`` filename.  For instance, if you were in a
+directory with the following files:
+
+.. code-block:: none
+
+   output_00007
+   output_00007/amr_00007.out00001
+   output_00007/grav_00007.out00001
+   output_00007/hydro_00007.out00001
+   output_00007/info_00007.txt
+   output_00007/part_00007.out00001
+
+You would feed it the filename ``output_00007/info_00007.txt``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("output_00007/info_00007.txt")
+
+yt will attempt to guess the fields in the file.  You may also specify a list
+of fields by supplying the ``fields`` keyword in your call to ``load``.
+
+.. _loading-gadget-data:
+
+Gadget Data
+-----------
+
+yt has support for reading Gadget data in both raw binary and HDF5 formats.  It
+is able to access the particles as it would any other particle dataset, and it
+can apply smoothing kernels to the data to produce both quantitative analysis
+and visualization.
+
+Gadget data in HDF5 format can be loaded with the ``load`` command:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("snapshot_061.hdf5")
+
+However, yt cannot detect raw-binary Gadget data, and so you must specify the
+format as being Gadget:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = GadgetDataset("snapshot_061")
+
+.. _particle-bbox:
+
+Units and Bounding Boxes
+++++++++++++++++++++++++
+
+There are two additional pieces of information that may be needed.  If your
+simulation is cosmological, yt can often guess the bounding box and the units
+of the simulation.  However, for isolated simulations and for cosmological
+simulations with non-standard units, these must be supplied.  For example, if
+a length unit of 1.0 corresponds to a kiloparsec, you can supply this in the
+constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``, ``Mpccm/h``
+and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h`` (``cm`` for
+comoving here) are usable unit definitions.
+
+yt will attempt to use units for ``mass``, ``length`` and ``time`` as supplied
+in the argument ``unit_base``.  The ``bounding_box`` argument is a list of
+two-item tuples or lists that describe the left and right extents of the
+particles.
+
+.. code-block:: python
+
+   pf = GadgetDataset("snap_004",
+           unit_base = {'length': ('kpc', 1.0)},
+           bounding_box = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]])
+
+.. _particle-indexing-criteria:
+
+Indexing Criteria
++++++++++++++++++
+
+yt generates a global mesh index via octree that governs the resolution of
+volume elements.  This is governed by two parameters, ``n_ref`` and
+``over_refine_factor``.  They are weak proxies for each other.  The first,
+``n_ref``, governs how many particles in an oct results in that oct being
+refined into eight child octs.  Lower values mean higher resolution; the
+default is 64.  The secon parameter, ``over_refine_factor``, governs how many
+cells are in a given oct; the default value of 1 corresponds to 8 cells.
+The number of cells in an oct is defined by the expression
+``2**(3*over_refine_factor)``.
+
+It's recommended that if you want higher-resolution, try reducing the value of
+``n_ref`` to 32 or 16.
+
+.. _gadget-field-spec:
+
+Field Specifications
+++++++++++++++++++++
+
+Binary Gadget outputs often have additional fields or particle types that are
+non-standard from the default Gadget distribution format.  These can be
+specified in the call to ``GadgetDataset`` by either supplying one of the
+sets of field specifications as a string or by supplying a field specification
+itself.  As an example, yt has built-in definitions for ``default`` (the
+default) and ``agora_unlv``.  Field specifications must be tuples, and must be
+of this format:
+
+.. code-block:: python
+
+   default = ( "Coordinates",
+               "Velocities",
+               "ParticleIDs",
+               "Mass",
+               ("InternalEnergy", "Gas"),
+               ("Density", "Gas"),
+               ("SmoothingLength", "Gas"),
+   )
+
+This is the default specification used by the Gadget frontend.  It means that
+the fields are, in order, Coordinates, Velocities, ParticleIDs, Mass, and the
+fields InternalEnergy, Density and SmoothingLength *only* for Gas particles.
+So for example, if you have defined a Metallicity field for the particle type
+Halo, which comes right after ParticleIDs in the file, you could define it like
+this:
+
+.. code-block:: python
+
+   my_field_def = ( "Coordinates",
+               "Velocities",
+               "ParticleIDs",
+               ("Metallicity", "Halo"),
+               "Mass",
+               ("InternalEnergy", "Gas"),
+               ("Density", "Gas"),
+               ("SmoothingLength", "Gas"),
+   )
+
+To save time, you can utilize the plugins file for yt and use it to add items
+to the dictionary where these definitions are stored.  You could do this like
+so:
+
+.. code-block:: python
+
+   from yt.frontends.sph.definitions import gadget_field_specs
+   gadget_field_specs["my_field_def"] = my_field_def
+
+Please also feel free to issue a pull request with any new field
+specifications, as we're happy to include them in the main distribution!
+
+.. _gadget-ptype-spec:
+
+Particle Type Definitions
++++++++++++++++++++++++++
+
+In some cases, research groups add new particle types or re-order them.  You
+can supply alternate particle types by using the keyword ``ptype_spec`` to the
+``GadgetDataset`` call.  The default for Gadget binary data is:
+
+.. code-block:: python
+
+    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
+
+You can specify alternate names, but note that this may cause problems with the
+field specification if none of the names match old names.
+
+.. _gadget-header-spec:
+
+Header Specification
+++++++++++++++++++++
+
+If you have modified the header in your Gadget binary file, you can specify an
+alternate header specification with the keyword ``header_spec``.  This can
+either be a list of strings corresponding to individual header types known to
+yt, or it can be a combination of strings and header specifications.  The
+default header specification (found in ``yt/frontends/sph/definitions.py``) is:
+
+.. code-block:: python
+   
+    default      = (('Npart', 6, 'i'),
+                    ('Massarr', 6, 'd'),
+                    ('Time', 1, 'd'),
+                    ('Redshift', 1, 'd'),
+                    ('FlagSfr', 1, 'i'),
+                    ('FlagFeedback', 1, 'i'),
+                    ('Nall', 6, 'i'),
+                    ('FlagCooling', 1, 'i'),
+                    ('NumFiles', 1, 'i'),
+                    ('BoxSize', 1, 'd'),
+                    ('Omega0', 1, 'd'),
+                    ('OmegaLambda', 1, 'd'),
+                    ('HubbleParam', 1, 'd'),
+                    ('FlagAge', 1, 'i'),
+                    ('FlagMEtals', 1, 'i'),
+                    ('NallHW', 6, 'i'),
+                    ('unused', 16, 'i'))
+
+These items will all be accessible inside the object ``pf.parameters``, which
+is a dictionary.  You can add combinations of new items, specified in the same
+way, or alternately other types of headers.  The other string keys defined are
+``pad32``, ``pad64``, ``pad128``, and ``pad256`` each of which corresponds to
+an empty padding in bytes.  For example, if you have an additional 256 bytes of
+padding at the end, you can specify this with:
+
+.. code-block:: python
+
+   header_spec = ["default", "pad256"]
+
+This can then be supplied to the constructor.  Note that you can also do this
+manually, for instance with:
+
+
+.. code-block:: python
+
+   header_spec = ["default", (('some_value', 8, 'd'),
+                              ('another_value', 1, 'i'))]
+
+The letters correspond to data types from the Python struct module.  Please
+feel free to submit alternate header types to the main yt repository.
+
+.. _specifying-gadget-units:
+
+Specifying Units
+++++++++++++++++
+
+If you are running a cosmology simulation, yt will be able to guess the units
+with some reliability.  However, if you are not and you do not specify a
+parameter file, yt will not be able to and will use the defaults of length
+being 1.0 Mpc/h (comoving), velocity being in cm/s, and mass being in 10^10
+Msun/h.  You can specify alternate units by supplying the ``unit_base`` keyword
+argument of this form:
+
+.. code-block:: python
+
+   unit_base = {'length': (1.0, 'cm'), 'mass': (1.0, 'g'), 'time': (1.0, 's')}
+
+yt will utilize length, mass and time to set up all other units.
+
+.. _loading-tipsy-data:
+
+Tipsy Data
+----------
+
+yt also supports loading Tipsy data.  Many of its characteristics are similar
+to how Gadget data is loaded; specifically, it shares its definition of
+indexing and mesh-identification with that described in
+:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
+has not yet implemented header specifications, field specifications, or
+particle type specifications.  *These are all excellent projects for new
+contributors!*
+
+Tipsy data cannot be automatically detected.  You can load it with a command
+similar to the following:
+
+.. code-block:: python
+
+    ds = TipsyDataset('test.00169',
+        parameter_file='test.param',
+        endian = '<',
+        domain_left_edge = domain_left_edge,
+        domain_right_edge = domain_right_edge,
+    )
+
+Not all of these arguments are necessary; additionally, yt accepts the
+arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
+``unit_base``.  By default, yt will not utilize a parameter file, and by
+default it will assume the data is "big" endian (`>`).  Optionally, you may
+specify ``field_dtypes``, which describe the size of various fields.  For
+example, if you have stored positions as 64-bit floats, you can specify this
+with:
+
+.. code-block:: python
+
+    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
+                           field_dtypes = {"Coordinates": "d"})
+
+.. _specifying-cosmology-tipsy:
+
+Specifying Tipsy Cosmological Parameters
+++++++++++++++++++++++++++++++++++++++++
+
+Cosmological parameters can be specified to Tipsy to enable computation of
+default units.  The parameters recognized are of this form:
+
+.. code-block:: python
+
+   cosmology_parameters = {'current_redshift': 0.0,
+                           'omega_lambda': 0.728,
+                           'omega_matter': 0.272,
+                           'hubble_constant': 0.702}
+
+These will be used set the units, if they are specified.
+
+.. _loading-artio-data:
+
+ARTIO Data
+----------
+
+ARTIO data has a well-specified internal parameter system and has few free
+parameters.  However, for optimization purposes, the parameter that provides
+the most guidance to yt as to how to manage ARTIO data is ``max_range``.  This
+governs the maximum number of space-filling curve cells that will be used in a
+single "chunk" of data read from disk.  For small datasets, setting this number
+very large will enable more data to be loaded into memory at any given time;
+for very large datasets, this parameter can be left alone safely.  By default
+it is set to 1024; it can in principle be set as high as the total number of
+SFC cells.
+
+To load ARTIO data, you can specify a command such as this:
+
+.. code-block:: python
+
+    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
+
+.. _loading-art-data:
+
+ART Data
+--------
+
+ART data enjoys preliminary support and has been supported in the past by
+Christopher Moody.  Please contact the ``yt-dev`` mailing list if you are
+interested in using yt for ART data, or if you are interested in assisting with
+development of yt to work with ART data.
+
+To load an ART dataset you can use the ``load`` command provided by 
+``yt.mods`` and passing the gas mesh file. It will search for and attempt 
+to find the complementary dark matter and stellar particle header and data 
+files. However, your simulations may not follow the same naming convention.
+
+So for example, a single snapshot might have a series of files looking like
+this:
+
+.. code-block:: none
+
+   10MpcBox_csf512_a0.300.d    #Gas mesh
+   PMcrda0.300.DAT             #Particle header
+   PMcrs0a0.300.DAT            #Particle data (positions,velocities)
+   stars_a0.300.dat            #Stellar data (metallicities, ages, etc.)
+
+The ART frontend tries to find the associated files matching the above, but
+if that fails you can specify ``file_particle_data``,``file_particle_data``,
+``file_star_data`` in addition to the specifying the gas mesh. You also have 
+the option of gridding particles, and assigning them onto the meshes.
+This process is in beta, and for the time being it's probably  best to leave
+``do_grid_particles=False`` as the default.
+
+To speed up the loading of an ART file, you have a few options. You can turn 
+off the particles entirely by setting ``discover_particles=False``. You can
+also only grid octs up to a certain level, ``limit_level=5``, which is useful
+when debugging by artificially creating a 'smaller' dataset to work with.
+
+Finally, when stellar ages are computed we 'spread' the ages evenly within a
+smoothing window. By default this is turned on and set to 10Myr. To turn this 
+off you can set ``spread=False``, and you can tweak the age smoothing window
+by specifying the window in seconds, ``spread=1.0e7*265*24*3600``. 
+
+.. code-block:: python
+    
+   from yt.mods import *
+
+   pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
+
+.. _loading-moab-data:
+
+MOAB Data
+---------
+
+.. _loading-pyne-data:
+
+PyNE Data
+---------
+
+.. _loading-numpy-array:
+
+Generic Array Data
+------------------
+
+Even if your data is not strictly related to fields commonly used in
+astrophysical codes or your code is not supported yet, you can still feed it to
+``yt`` to use its advanced visualization and analysis facilities. The only
+requirement is that your data can be represented as one or more uniform, three
+dimensional numpy arrays. Assuming that you have your data in ``arr``,
+the following code:
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_uniform_grid
+
+   data = dict(Density = arr)
+   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
+   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
+will create ``yt``-native parameter file ``pf`` that will treat your array as
+density field in cubic domain of 3 Mpc edge size (3 * 3.08e24 cm) and
+simultaneously divide the domain into 12 chunks, so that you can take advantage
+of the underlying parallelism. 
+
+Particle fields are detected as one-dimensional fields. The number of
+particles is set by the ``number_of_particles`` key in
+``data``. Particle fields are then added as one-dimensional arrays in
+a similar manner as the three-dimensional grid fields:
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_uniform_grid
+
+   data = dict(Density = dens, 
+               number_of_particles = 1000000,
+               particle_position_x = posx_arr, 
+	       particle_position_y = posy_arr,
+	       particle_position_z = posz_arr)
+   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
+   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
+where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
+arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Particles may be difficult to integrate.
+* Data must already reside in memory.
+
+.. _loading-amr-data:
+
+Generic AMR Data
+----------------
+
+It is possible to create native ``yt`` parameter file from Python's dictionary
+that describes set of rectangular patches of data of possibly varying
+resolution. 
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_amr_grids
+
+   grid_data = [
+       dict(left_edge = [0.0, 0.0, 0.0],
+            right_edge = [1.0, 1.0, 1.],
+            level = 0,
+            dimensions = [32, 32, 32],
+            number_of_particles = 0)
+       dict(left_edge = [0.25, 0.25, 0.25],
+            right_edge = [0.75, 0.75, 0.75],
+            level = 1,
+            dimensions = [32, 32, 32],
+            number_of_particles = 0)
+   ]
+  
+   for g in grid_data:
+       g["density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+  
+   pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+
+Particle fields are supported by adding 1-dimensional arrays and
+setting the ``number_of_particles`` key to each ``grid``'s dict:
+
+.. code-block:: python
+
+    for g in grid_data:
+        g["number_of_particles"] = 100000
+        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Some functions may behave oddly, and parallelism will be disappointing or
+  non-existent in most cases.
+* No consistency checks are performed on the index
+* Data must already reside in memory.
+* Consistency between particle positions and grids is not checked;
+  ``load_amr_grids`` assumes that particle positions associated with one grid are
+  not bounded within another grid at a higher level, so this must be
+  ensured by the user prior to loading the grid data. 
+
+Generic Particle Data
+---------------------
+

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/examining/supported_frontends_data.rst
--- a/doc/source/examining/supported_frontends_data.rst
+++ /dev/null
@@ -1,186 +0,0 @@
-.. _loading-data-from-supported-codes:
-
-Loading Data from Supported Codes
-=================================
-
-This section contains information on how to load data into ``yt`` from
-supported codes, as well as some important caveats about different
-data formats.
-
-.. _loading-enzo-data:
-
-Enzo Data
----------
-
-Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
-dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
-it the parameter file name.  This would be the name of the output file, and it
-contains no extension.  For instance, if you have the following files:
-
-.. code-block:: none
-
-   DD0010/
-   DD0010/data0010
-   DD0010/data0010.index
-   DD0010/data0010.cpu0000
-   DD0010/data0010.cpu0001
-   DD0010/data0010.cpu0002
-   DD0010/data0010.cpu0003
-
-You would feed the ``load`` command the filename ``DD0010/data0010`` as
-mentioned.
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("DD0010/data0010")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Enzo usage
-* Units should be correct, if you utilize standard unit-setting routines.  yt
-  will notify you if it cannot determine the units, although this
-  notification will be passive.
-* 2D and 1D data are supported, but the extraneous dimensions are set to be
-  of length 1.0
-
-.. _loading-orion-data:
-
-Orion Data
-----------
-
-Orion data is fully supported. To load an Orion dataset, you can use the
-``load`` command provided by ``yt.mods`` and supply to it the directory file
-name.  **You must also have the ``inputs`` file in the base directory.** For
-instance, if you were in a directory with the following files:
-
-.. code-block:: none
-
-   inputs
-   pltgmlcs5600/
-   pltgmlcs5600/Header
-   pltgmlcs5600/Level_0
-   pltgmlcs5600/Level_0/Cell_H
-   pltgmlcs5600/Level_1
-   pltgmlcs5600/Level_1/Cell_H
-   pltgmlcs5600/Level_2
-   pltgmlcs5600/Level_2/Cell_H
-   pltgmlcs5600/Level_3
-   pltgmlcs5600/Level_3/Cell_H
-   pltgmlcs5600/Level_4
-   pltgmlcs5600/Level_4/Cell_H
-
-You would feed it the filename ``pltgmlcs5600``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("pltgmlcs5600")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Orion usage
-* Star particles are not supported at the current time
-
-.. _loading-flash-data:
-
-FLASH Data
-----------
-
-FLASH HDF5 data is fully supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
-supply to it the file name of a plot file or checkpoint file.  Particle
-files are not currently directly loadable by themselves, due to the
-fact that they typically lack grid information. For instance, if you were in a directory with
-the following files:
-
-.. code-block:: none
-
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
-
-.. code-block:: python
-
-    from yt.mods import *
-    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
-
-.. rubric:: Caveats
-
-* Please be careful that the units are correctly utilized; yt assumes cgs
-* Velocities and length units will be scaled to comoving coordinates if yt is
-  able to discern you are examining a cosmology simulation; particle and grid
-  positions will not be.
-* Domains may be visualized assuming periodicity.
-
-Athena Data
------------
-
-Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported. 
-
-Loading Athena datasets is slightly different depending on whether
-your dataset came from a serial or a parallel run. If the data came
-from a serial run or you have joined the VTK files together using the
-Athena tool ``join_vtk``, you can load the data like this:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("kh.0010.vtk")
-
-The filename corresponds to the file on SMR level 0, whereas if there
-are multiple levels the corresponding files will be picked up
-automatically, assuming they are laid out in ``lev*`` subdirectories
-under the directory where the base file is located.
-
-For parallel datasets, yt assumes that they are laid out in
-directories named ``id*``, one for each processor number, each with
-``lev*`` subdirectories for additional refinement levels. To load this
-data, call ``load`` with the base file in the ``id0`` directory:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("id0/kh.0010.vtk")
-
-which will pick up all of the files in the different ``id*`` directories for
-the entire dataset. 
-
-yt works in cgs ("Gaussian") units, but Athena data is not
-normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and density to ``load``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("id0/cluster_merger.0250.vtk", 
-          parameters={"LengthUnits":3.0856e24,
-                               "TimeUnits":3.1557e13,"DensityUnits":1.67e-24)
-
-This means that the yt fields (e.g. ``Density``, ``x-velocity``,
-``Bx``) will be in cgs units, but the Athena fields (e.g.,
-``density``, ``velocity_x``, ``cell_centered_B_x``) will be in code
-units. 
-
-.. rubric:: Caveats
-
-* yt primarily works with primitive variables. If the Athena
-  dataset contains conservative variables, the yt primitive fields will be generated from the
-  conserved variables on disk. 
-* Domains may be visualized assuming periodicity.
-* Particle list data is currently unsupported.
-* In some parallel Athena datasets, it is possible for a grid from one
-  refinement level to overlap with more than one grid on the parent
-  level. This may result in unpredictable behavior for some analysis
-  or visualization tasks. 

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c doc/source/loading_data.rst
--- a/doc/source/loading_data.rst
+++ /dev/null
@@ -1,601 +0,0 @@
-.. _loading-data:
-
-Loading Data
-============
-
-This section contains information on how to load data into ``yt``, as well as
-some important caveats about different data formats.
-
-.. _loading-enzo-data:
-
-Enzo Data
----------
-
-Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
-dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
-it the parameter file name.  This would be the name of the output file, and it
-contains no extension.  For instance, if you have the following files:
-
-.. code-block:: none
-
-   DD0010/
-   DD0010/data0010
-   DD0010/data0010.index
-   DD0010/data0010.cpu0000
-   DD0010/data0010.cpu0001
-   DD0010/data0010.cpu0002
-   DD0010/data0010.cpu0003
-
-You would feed the ``load`` command the filename ``DD0010/data0010`` as
-mentioned.
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("DD0010/data0010")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Enzo usage
-* Units should be correct, if you utilize standard unit-setting routines.  yt
-  will notify you if it cannot determine the units, although this
-  notification will be passive.
-* 2D and 1D data are supported, but the extraneous dimensions are set to be
-  of length 1.0 in "code length" which may produce strange results for volume
-  quantities.
-
-.. _loading-orion-data:
-
-Boxlib Data
------------
-
-yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
-Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
-Malone, and Matthew Turk.
-
-To load a Boxlib dataset, you can use the ``load`` command provided by
-``yt.mods`` and supply to it the directory file name.  **You must also have the
-``inputs`` file in the base directory.**  For instance, if you were in a
-directory with the following files:
-
-.. code-block:: none
-
-   inputs
-   pltgmlcs5600/
-   pltgmlcs5600/Header
-   pltgmlcs5600/Level_0
-   pltgmlcs5600/Level_0/Cell_H
-   pltgmlcs5600/Level_1
-   pltgmlcs5600/Level_1/Cell_H
-   pltgmlcs5600/Level_2
-   pltgmlcs5600/Level_2/Cell_H
-   pltgmlcs5600/Level_3
-   pltgmlcs5600/Level_3/Cell_H
-   pltgmlcs5600/Level_4
-   pltgmlcs5600/Level_4/Cell_H
-
-You would feed it the filename ``pltgmlcs5600``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("pltgmlcs5600")
-
-.. _loading-flash-data:
-
-FLASH Data
-----------
-
-FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
-supply to it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
-
-.. code-block:: none
-
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
-
-.. code-block:: python
-
-    from yt.mods import *
-    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
-
-.. rubric:: Caveats
-
-* Please be careful that the units are correctly utilized; yt assumes cgs.
-
-.. _loading-ramses-data:
-
-RAMSES Data
------------
-
-In yt-3.0, RAMSES data is fully supported.  If you are interested in taking a
-development or stewardship role, please contact the yt-dev mailing list.  To
-load a RAMSES dataset, you can use the ``load`` command provided by ``yt.mods``
-and supply to it the ``info*.txt`` filename.  For instance, if you were in a
-directory with the following files:
-
-.. code-block:: none
-
-   output_00007
-   output_00007/amr_00007.out00001
-   output_00007/grav_00007.out00001
-   output_00007/hydro_00007.out00001
-   output_00007/info_00007.txt
-   output_00007/part_00007.out00001
-
-You would feed it the filename ``output_00007/info_00007.txt``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("output_00007/info_00007.txt")
-
-yt will attempt to guess the fields in the file.  You may also specify a list
-of fields by supplying the ``fields`` keyword in your call to ``load``.
-
-.. _loading-gadget-data:
-
-Gadget Data
------------
-
-yt has support for reading Gadget data in both raw binary and HDF5 formats.  It
-is able to access the particles as it would any other particle dataset, and it
-can apply smoothing kernels to the data to produce both quantitative analysis
-and visualization.
-
-Gadget data in HDF5 format can be loaded with the ``load`` command:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("snapshot_061.hdf5")
-
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = GadgetDataset("snapshot_061")
-
-.. _particle-bbox:
-
-Units and Bounding Boxes
-++++++++++++++++++++++++
-
-There are two additional pieces of information that may be needed.  If your
-simulation is cosmological, yt can often guess the bounding box and the units
-of the simulation.  However, for isolated simulations and for cosmological
-simulations with non-standard units, these must be supplied.  For example, if
-a length unit of 1.0 corresponds to a kiloparsec, you can supply this in the
-constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``, ``Mpccm/h``
-and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h`` (``cm`` for
-comoving here) are usable unit definitions.
-
-yt will attempt to use units for ``mass``, ``length`` and ``time`` as supplied
-in the argument ``unit_base``.  The ``bounding_box`` argument is a list of
-two-item tuples or lists that describe the left and right extents of the
-particles.
-
-.. code-block:: python
-
-   pf = GadgetDataset("snap_004",
-           unit_base = {'length': ('kpc', 1.0)},
-           bounding_box = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]])
-
-.. _particle-indexing-criteria:
-
-Indexing Criteria
-+++++++++++++++++
-
-yt generates a global mesh index via octree that governs the resolution of
-volume elements.  This is governed by two parameters, ``n_ref`` and
-``over_refine_factor``.  They are weak proxies for each other.  The first,
-``n_ref``, governs how many particles in an oct results in that oct being
-refined into eight child octs.  Lower values mean higher resolution; the
-default is 64.  The secon parameter, ``over_refine_factor``, governs how many
-cells are in a given oct; the default value of 1 corresponds to 8 cells.
-The number of cells in an oct is defined by the expression
-``2**(3*over_refine_factor)``.
-
-It's recommended that if you want higher-resolution, try reducing the value of
-``n_ref`` to 32 or 16.
-
-.. _gadget-field-spec:
-
-Field Specifications
-++++++++++++++++++++
-
-Binary Gadget outputs often have additional fields or particle types that are
-non-standard from the default Gadget distribution format.  These can be
-specified in the call to ``GadgetDataset`` by either supplying one of the
-sets of field specifications as a string or by supplying a field specification
-itself.  As an example, yt has built-in definitions for ``default`` (the
-default) and ``agora_unlv``.  Field specifications must be tuples, and must be
-of this format:
-
-.. code-block:: python
-
-   default = ( "Coordinates",
-               "Velocities",
-               "ParticleIDs",
-               "Mass",
-               ("InternalEnergy", "Gas"),
-               ("Density", "Gas"),
-               ("SmoothingLength", "Gas"),
-   )
-
-This is the default specification used by the Gadget frontend.  It means that
-the fields are, in order, Coordinates, Velocities, ParticleIDs, Mass, and the
-fields InternalEnergy, Density and SmoothingLength *only* for Gas particles.
-So for example, if you have defined a Metallicity field for the particle type
-Halo, which comes right after ParticleIDs in the file, you could define it like
-this:
-
-.. code-block:: python
-
-   my_field_def = ( "Coordinates",
-               "Velocities",
-               "ParticleIDs",
-               ("Metallicity", "Halo"),
-               "Mass",
-               ("InternalEnergy", "Gas"),
-               ("Density", "Gas"),
-               ("SmoothingLength", "Gas"),
-   )
-
-To save time, you can utilize the plugins file for yt and use it to add items
-to the dictionary where these definitions are stored.  You could do this like
-so:
-
-.. code-block:: python
-
-   from yt.frontends.sph.definitions import gadget_field_specs
-   gadget_field_specs["my_field_def"] = my_field_def
-
-Please also feel free to issue a pull request with any new field
-specifications, as we're happy to include them in the main distribution!
-
-.. _gadget-ptype-spec:
-
-Particle Type Definitions
-+++++++++++++++++++++++++
-
-In some cases, research groups add new particle types or re-order them.  You
-can supply alternate particle types by using the keyword ``ptype_spec`` to the
-``GadgetDataset`` call.  The default for Gadget binary data is:
-
-.. code-block:: python
-
-    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
-
-You can specify alternate names, but note that this may cause problems with the
-field specification if none of the names match old names.
-
-.. _gadget-header-spec:
-
-Header Specification
-++++++++++++++++++++
-
-If you have modified the header in your Gadget binary file, you can specify an
-alternate header specification with the keyword ``header_spec``.  This can
-either be a list of strings corresponding to individual header types known to
-yt, or it can be a combination of strings and header specifications.  The
-default header specification (found in ``yt/frontends/sph/definitions.py``) is:
-
-.. code-block:: python
-   
-    default      = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i'))
-
-These items will all be accessible inside the object ``pf.parameters``, which
-is a dictionary.  You can add combinations of new items, specified in the same
-way, or alternately other types of headers.  The other string keys defined are
-``pad32``, ``pad64``, ``pad128``, and ``pad256`` each of which corresponds to
-an empty padding in bytes.  For example, if you have an additional 256 bytes of
-padding at the end, you can specify this with:
-
-.. code-block:: python
-
-   header_spec = ["default", "pad256"]
-
-This can then be supplied to the constructor.  Note that you can also do this
-manually, for instance with:
-
-
-.. code-block:: python
-
-   header_spec = ["default", (('some_value', 8, 'd'),
-                              ('another_value', 1, 'i'))]
-
-The letters correspond to data types from the Python struct module.  Please
-feel free to submit alternate header types to the main yt repository.
-
-.. _specifying-gadget-units:
-
-Specifying Units
-++++++++++++++++
-
-If you are running a cosmology simulation, yt will be able to guess the units
-with some reliability.  However, if you are not and you do not specify a
-parameter file, yt will not be able to and will use the defaults of length
-being 1.0 Mpc/h (comoving), velocity being in cm/s, and mass being in 10^10
-Msun/h.  You can specify alternate units by supplying the ``unit_base`` keyword
-argument of this form:
-
-.. code-block:: python
-
-   unit_base = {'length': (1.0, 'cm'), 'mass': (1.0, 'g'), 'time': (1.0, 's')}
-
-yt will utilize length, mass and time to set up all other units.
-
-.. _loading-tipsy-data:
-
-Tipsy Data
-----------
-
-yt also supports loading Tipsy data.  Many of its characteristics are similar
-to how Gadget data is loaded; specifically, it shares its definition of
-indexing and mesh-identification with that described in
-:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
-has not yet implemented header specifications, field specifications, or
-particle type specifications.  *These are all excellent projects for new
-contributors!*
-
-Tipsy data cannot be automatically detected.  You can load it with a command
-similar to the following:
-
-.. code-block:: python
-
-    ds = TipsyDataset('test.00169',
-        parameter_file='test.param',
-        endian = '<',
-        domain_left_edge = domain_left_edge,
-        domain_right_edge = domain_right_edge,
-    )
-
-Not all of these arguments are necessary; additionally, yt accepts the
-arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
-``unit_base``.  By default, yt will not utilize a parameter file, and by
-default it will assume the data is "big" endian (`>`).  Optionally, you may
-specify ``field_dtypes``, which describe the size of various fields.  For
-example, if you have stored positions as 64-bit floats, you can specify this
-with:
-
-.. code-block:: python
-
-    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
-                           field_dtypes = {"Coordinates": "d"})
-
-.. _specifying-cosmology-tipsy:
-
-Specifying Tipsy Cosmological Parameters
-++++++++++++++++++++++++++++++++++++++++
-
-Cosmological parameters can be specified to Tipsy to enable computation of
-default units.  The parameters recognized are of this form:
-
-.. code-block:: python
-
-   cosmology_parameters = {'current_redshift': 0.0,
-                           'omega_lambda': 0.728,
-                           'omega_matter': 0.272,
-                           'hubble_constant': 0.702}
-
-These will be used set the units, if they are specified.
-
-.. _loading-artio-data:
-
-ARTIO Data
-----------
-
-ARTIO data has a well-specified internal parameter system and has few free
-parameters.  However, for optimization purposes, the parameter that provides
-the most guidance to yt as to how to manage ARTIO data is ``max_range``.  This
-governs the maximum number of space-filling curve cells that will be used in a
-single "chunk" of data read from disk.  For small datasets, setting this number
-very large will enable more data to be loaded into memory at any given time;
-for very large datasets, this parameter can be left alone safely.  By default
-it is set to 1024; it can in principle be set as high as the total number of
-SFC cells.
-
-To load ARTIO data, you can specify a command such as this:
-
-.. code-block:: python
-
-    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
-
-.. _loading-art-data:
-
-ART Data
---------
-
-ART data enjoys preliminary support and has been supported in the past by
-Christopher Moody.  Please contact the ``yt-dev`` mailing list if you are
-interested in using yt for ART data, or if you are interested in assisting with
-development of yt to work with ART data.
-
-To load an ART dataset you can use the ``load`` command provided by 
-``yt.mods`` and passing the gas mesh file. It will search for and attempt 
-to find the complementary dark matter and stellar particle header and data 
-files. However, your simulations may not follow the same naming convention.
-
-So for example, a single snapshot might have a series of files looking like
-this:
-
-.. code-block:: none
-
-   10MpcBox_csf512_a0.300.d    #Gas mesh
-   PMcrda0.300.DAT             #Particle header
-   PMcrs0a0.300.DAT            #Particle data (positions,velocities)
-   stars_a0.300.dat            #Stellar data (metallicities, ages, etc.)
-
-The ART frontend tries to find the associated files matching the above, but
-if that fails you can specify ``file_particle_data``,``file_particle_data``,
-``file_star_data`` in addition to the specifying the gas mesh. You also have 
-the option of gridding particles, and assigning them onto the meshes.
-This process is in beta, and for the time being it's probably  best to leave
-``do_grid_particles=False`` as the default.
-
-To speed up the loading of an ART file, you have a few options. You can turn 
-off the particles entirely by setting ``discover_particles=False``. You can
-also only grid octs up to a certain level, ``limit_level=5``, which is useful
-when debugging by artificially creating a 'smaller' dataset to work with.
-
-Finally, when stellar ages are computed we 'spread' the ages evenly within a
-smoothing window. By default this is turned on and set to 10Myr. To turn this 
-off you can set ``spread=False``, and you can tweak the age smoothing window
-by specifying the window in seconds, ``spread=1.0e7*265*24*3600``. 
-
-.. code-block:: python
-    
-   from yt.mods import *
-
-   pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
-
-.. _loading-moab-data:
-
-MOAB Data
----------
-
-.. _loading-pyne-data:
-
-PyNE Data
----------
-
-.. _loading-numpy-array:
-
-Generic Array Data
-------------------
-
-Even if your data is not strictly related to fields commonly used in
-astrophysical codes or your code is not supported yet, you can still feed it to
-``yt`` to use its advanced visualization and analysis facilities. The only
-requirement is that your data can be represented as one or more uniform, three
-dimensional numpy arrays. Assuming that you have your data in ``arr``,
-the following code:
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_uniform_grid
-
-   data = dict(Density = arr)
-   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
-   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
-
-will create ``yt``-native parameter file ``pf`` that will treat your array as
-density field in cubic domain of 3 Mpc edge size (3 * 3.08e24 cm) and
-simultaneously divide the domain into 12 chunks, so that you can take advantage
-of the underlying parallelism. 
-
-Particle fields are detected as one-dimensional fields. The number of
-particles is set by the ``number_of_particles`` key in
-``data``. Particle fields are then added as one-dimensional arrays in
-a similar manner as the three-dimensional grid fields:
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_uniform_grid
-
-   data = dict(Density = dens, 
-               number_of_particles = 1000000,
-               particle_position_x = posx_arr, 
-	       particle_position_y = posy_arr,
-	       particle_position_z = posz_arr)
-   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
-   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
-
-where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
-arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
-
-.. rubric:: Caveats
-
-* Units will be incorrect unless the data has already been converted to cgs.
-* Particles may be difficult to integrate.
-* Data must already reside in memory.
-
-.. _loading-amr-data:
-
-Generic AMR Data
-----------------
-
-It is possible to create native ``yt`` parameter file from Python's dictionary
-that describes set of rectangular patches of data of possibly varying
-resolution. 
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_amr_grids
-
-   grid_data = [
-       dict(left_edge = [0.0, 0.0, 0.0],
-            right_edge = [1.0, 1.0, 1.],
-            level = 0,
-            dimensions = [32, 32, 32],
-            number_of_particles = 0)
-       dict(left_edge = [0.25, 0.25, 0.25],
-            right_edge = [0.75, 0.75, 0.75],
-            level = 1,
-            dimensions = [32, 32, 32],
-            number_of_particles = 0)
-   ]
-  
-   for g in grid_data:
-       g["density"] = np.random.random(g["dimensions"]) * 2**g["level"]
-  
-   pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
-
-Particle fields are supported by adding 1-dimensional arrays and
-setting the ``number_of_particles`` key to each ``grid``'s dict:
-
-.. code-block:: python
-
-    for g in grid_data:
-        g["number_of_particles"] = 100000
-        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
-
-.. rubric:: Caveats
-
-* Units will be incorrect unless the data has already been converted to cgs.
-* Some functions may behave oddly, and parallelism will be disappointing or
-  non-existent in most cases.
-* No consistency checks are performed on the index
-* Data must already reside in memory.
-* Consistency between particle positions and grids is not checked;
-  ``load_amr_grids`` assumes that particle positions associated with one grid are
-  not bounded within another grid at a higher level, so this must be
-  ensured by the user prior to loading the grid data. 
-
-Generic Particle Data
----------------------
-

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -0,0 +1,244 @@
+import numpy as np
+import os, sys
+cimport numpy as np
+cimport cython
+#from cpython.mem cimport PyMem_Malloc
+from libc.stdlib cimport malloc, free
+import sys
+
+
+
+# Importing relevant rockstar data types particle, fof halo, halo
+
+cdef import from "particle.h":
+	struct particle:
+		np.int64_t id
+		float pos[6]
+
+cdef import from "fof.h":
+	struct fof:
+		np.int64_t num_p
+		particle *particles
+
+cdef import from "halo.h":
+	struct halo:
+		np.int64_t id
+		float pos[6], corevel[3], bulkvel[3]
+		float m, r, child_r, vmax_r, mgrav,	vmax, rvmax, rs, klypin_rs, vrms
+		float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+		float bullock_spin, kin_to_pot
+		np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+		float min_pos_err, min_vel_err, min_bulkvel_err
+
+# For finding sub halos import finder function and global variable
+# rockstar uses to store the results
+
+cdef import from "groupies.h":
+	void find_subs(fof *f) 
+	halo *halos
+	np.int64_t num_halos
+	void calc_mass_definition()
+
+# For outputing halos, rockstar style
+
+cdef import from "meta_io.h":
+	void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) 
+
+# For setting up the configuration of rockstar
+
+cdef import from "config.h":
+	void setup_config()
+
+cdef import from "config_vars.h":
+	# Rockstar cleverly puts all of the config variables inside a templated
+	# definition of their vaiables.
+	char *FILE_FORMAT
+	np.float64_t PARTICLE_MASS
+
+	char *MASS_DEFINITION
+	np.int64_t MIN_HALO_OUTPUT_SIZE
+	np.float64_t FORCE_RES
+
+	np.float64_t SCALE_NOW
+	np.float64_t h0
+	np.float64_t Ol
+	np.float64_t Om
+
+	np.int64_t GADGET_ID_BYTES
+	np.float64_t GADGET_MASS_CONVERSION
+	np.float64_t GADGET_LENGTH_CONVERSION
+	np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+	np.int64_t RESCALE_PARTICLE_MASS
+
+	np.int64_t PARALLEL_IO
+	char *PARALLEL_IO_SERVER_ADDRESS
+	char *PARALLEL_IO_SERVER_PORT
+	np.int64_t PARALLEL_IO_WRITER_PORT
+	char *PARALLEL_IO_SERVER_INTERFACE
+	char *RUN_ON_SUCCESS
+
+	char *INBASE
+	char *FILENAME
+	np.int64_t STARTING_SNAP
+	np.int64_t NUM_SNAPS
+	np.int64_t NUM_BLOCKS
+	np.int64_t NUM_READERS
+	np.int64_t PRELOAD_PARTICLES
+	char *SNAPSHOT_NAMES
+	char *LIGHTCONE_ALT_SNAPS
+	char *BLOCK_NAMES
+
+	char *OUTBASE
+	np.float64_t OVERLAP_LENGTH
+	np.int64_t NUM_WRITERS
+	np.int64_t FORK_READERS_FROM_WRITERS
+	np.int64_t FORK_PROCESSORS_PER_MACHINE
+
+	char *OUTPUT_FORMAT
+	np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+	np.int64_t FULL_PARTICLE_CHUNKS
+	char *BGC2_SNAPNAMES
+
+	np.int64_t BOUND_PROPS
+	np.int64_t BOUND_OUT_TO_HALO_EDGE
+	np.int64_t DO_MERGER_TREE_ONLY
+	np.int64_t IGNORE_PARTICLE_IDS
+	np.float64_t TRIM_OVERLAP
+	np.float64_t ROUND_AFTER_TRIM
+	np.int64_t LIGHTCONE
+	np.int64_t PERIODIC
+
+	np.float64_t LIGHTCONE_ORIGIN[3]
+	np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+
+	np.float64_t LIMIT_CENTER[3]
+	np.float64_t LIMIT_RADIUS
+
+	np.int64_t SWAP_ENDIANNESS
+	np.int64_t GADGET_VARIANT
+
+	np.float64_t FOF_FRACTION
+	np.float64_t FOF_LINKING_LENGTH
+	np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+	np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+	np.int64_t TEMPORAL_HALO_FINDING
+	np.int64_t MIN_HALO_PARTICLES
+	np.float64_t UNBOUND_THRESHOLD
+	np.int64_t ALT_NFW_METRIC
+
+	np.int64_t TOTAL_PARTICLES
+	np.float64_t BOX_SIZE
+	np.int64_t OUTPUT_HMAD
+	np.int64_t OUTPUT_PARTICLES
+	np.int64_t OUTPUT_LEVELS
+	np.float64_t DUMP_PARTICLES[3]
+
+	np.float64_t AVG_PARTICLE_SPACING
+	np.int64_t SINGLE_SNAP
+
+
+
+cdef class RockstarGroupiesInterface:
+	
+	cdef public object pf
+	cdef public object fof
+
+	# For future use/consistency
+	def __cinit__(self,pf):
+		self.pf = pf
+
+	def setup_rockstar(self,
+						particle_mass,
+						int periodic = 1, force_res=None,
+						int min_halo_size = 25, outbase = "None",
+						callbacks = None):
+		global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+		global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+		global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+		global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+		global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+		
+
+		if force_res is not None:
+			FORCE_RES=np.float64(force_res)
+
+		OVERLAP_LENGTH = 0.0
+		
+		FILENAME = "inline.<block>"
+		FILE_FORMAT = "GENERIC"
+		OUTPUT_FORMAT = "ASCII"
+		MIN_HALO_OUTPUT_SIZE=min_halo_size
+		
+		pf = self.pf
+
+		h0 = pf.hubble_constant
+		Ol = pf.omega_lambda
+		Om = pf.omega_matter
+		
+		SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+		
+		if not outbase =='None'.decode('UTF-8'):
+			#output directory. since we can't change the output filenames
+			#workaround is to make a new directory
+			OUTBASE = outbase 
+
+
+		PARTICLE_MASS = particle_mass.in_units('Msun/h')
+		PERIODIC = periodic
+		BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
+
+		# Set up the configuration options
+		setup_config()
+
+		# Needs to be called so rockstar can use the particle mass parameter
+		# to calculate virial quantities properly
+		calc_mass_definition()
+
+
+
+	def make_rockstar_fof(self,fof_ids, pos, vel):
+
+		# Turn positions and velocities into units we want
+		pos = pos.in_units('Mpccm/h')
+		vel = vel.in_units('km/s')
+
+		# Define fof object
+		cdef fof fof_obj
+
+		# Find number of particles
+		cdef np.int64_t num_particles = len(fof_ids)
+
+		# Allocate space for correct number of particles
+		cdef particle* particles = <particle*> malloc(num_particles * sizeof(particle))
+
+		# Fill in array of particles with particle that fof identified
+		# This is possibly the slowest way to code this, but for now
+		# I just want it to work
+		for i,id in enumerate(fof_ids):
+			particles[i].id = id
+
+			# fill in locations & velocities
+			for j in range(3):
+				particles[i].pos[j] = pos[id][j]
+				particles[i].pos[j+3] = vel[id][j]
+
+
+		# Assign pointer to particles into FOF object 
+		fof_obj.particles = particles
+
+		# Assign number of particles into FOF object
+		fof_obj.num_p = num_particles
+
+		# Make pointer to fof object
+		cdef fof* fof_pointer = & fof_obj
+
+		# Find the sub halos using rockstar by passing a pointer to the fof object
+		find_subs( fof_pointer)
+
+		# Output the halos, rockstar style
+		output_halos(0, 0, 0, NULL) 
+
+		free(particles)
+
+
+

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -39,26 +39,35 @@
     pass
 
 vlist = "xyz"
+def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
+    def _t_squared(field, data):
+        return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
+    registry.add_field(("gas", "t_squared"),
+                       function = _t_squared,
+                       units="g*keV**2/cm**3")
+    def _beta_perp_squared(field, data):
+        return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
+    registry.add_field(("gas","beta_perp_squared"), 
+                       function = _beta_perp_squared,
+                       units="g/cm**3")
 
- at derived_field(name=("gas","t_squared"), units="g*keV**2/cm**3")
-def _t_squared(field, data):
-    return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
+    def _beta_par_squared(field, data):
+        return data["gas","beta_par"]**2/data["gas","density"]
+    registry.add_field("gas","beta_par_squared",
+                       function = _beta_par_squared,
+                       units="g/cm**3")
 
- at derived_field(name=("gas","beta_perp_squared"), units="g/cm**3")
-def _beta_perp_squared(field, data):
-    return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
+    def _t_beta_par(field, data):
+        return data["gas","kT"]*data["gas","beta_par"]
+    registry.add_field(("gas","t_beta_par"),
+                       function = _t_beta_par,
+                       units="keV*g/cm**3")
 
- at derived_field(name=("gas","beta_par_squared"), units="g/cm**3")
-def _beta_par_squared(field, data):
-    return data["gas","beta_par"]**2/data["gas","density"]
-
- at derived_field(name=("gas","t_beta_par"), units="keV*g/cm**3")
-def _t_beta_par(field, data):
-    return data["gas","kT"]*data["gas","beta_par"]
-
- at derived_field(name=("gas","t_sz"), units="keV*g/cm**3")
-def _t_sz(field, data):
-    return data["gas","density"]*data["gas","kT"]
+    def _t_sz(field, data):
+        return data["gas","density"]*data["gas","kT"]
+    registry.add_field(("gas","t_sz"),
+                       function = _t_sz,
+                       units="keV*g/cm**3")
 
 def generate_beta_par(L):
     def _beta_par(field, data):
@@ -90,6 +99,7 @@
     def __init__(self, pf, freqs, mue=1.143, high_order=False):
 
         self.pf = pf
+        pf.field_info.load_plugin(setup_sunyaev_zeldovich_fields)
         self.num_freqs = len(freqs)
         self.high_order = high_order
         self.freqs = pf.arr(freqs, "GHz")
@@ -139,7 +149,6 @@
 
         beta_par = generate_beta_par(L)
         self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
-        proj = self.pf.proj("density", axis, center=ctr, data_source=source)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -517,7 +517,7 @@
 
     # Now all the object related stuff
     def all_data(self, find_max=False):
-        if find_max: c = self.find_max("Density")[1]
+        if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
             self.domain_left_edge, self.domain_right_edge)

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -78,14 +78,11 @@
        Used for baryon fields from the data that are not in all the grids
     display_name : str
        A name used in the plots
-    projection_conversion : unit
-       which unit should we multiply by in a projection?
     """
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None,
-                 projection_conversion="cm"):
+                 not_in_all=False, display_name=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -124,7 +121,6 @@
         dd['display_field'] = True
         dd['not_in_all'] = self.not_in_all
         dd['display_name'] = self.display_name
-        dd['projection_conversion'] = self.projection_conversion
         return dd
 
     def get_units(self):

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -128,7 +128,9 @@
                 return self[item]
         elif finfo is not None and finfo.particle_type:
             if "particle_position" in (item, item[1]) or \
-               "particle_velocity" in (item, item[1]):
+               "particle_velocity" in (item, item[1]) or \
+               "Velocity" in (item, item[1]) or \
+               "Coordinates" in (item, item[1]):
                 # A vector
                 self[item] = \
                   YTArray(np.ones((self.NumberOfParticles, 3)),

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -39,7 +39,8 @@
     particle_deposition_functions, \
     particle_vector_functions, \
     particle_scalar_functions, \
-    standard_particle_fields
+    standard_particle_fields, \
+    add_volume_weighted_smoothed_field
 
 class FieldInfoContainer(dict):
     """
@@ -54,6 +55,7 @@
     known_particle_fields = ()
 
     def __init__(self, pf, field_list, slice_info = None):
+        self._show_field_errors = []
         self.pf = pf
         # Now we start setting things up.
         self.field_list = field_list
@@ -97,6 +99,26 @@
             self.add_output_field(field, 
                                   units = self.pf.field_units.get(field, ""),
                                   particle_type = True)
+        self.setup_smoothed_fields(ptype)
+
+    def setup_smoothed_fields(self, ptype, num_neighbors = 64, ftype = "gas"):
+        # We can in principle compute this, but it is not yet implemented.
+        if (ptype, "density") not in self:
+            return
+        if (ptype, "smoothing_length") in self:
+            sml_name = "smoothing_length"
+        else:
+            sml_name = None
+        new_aliases = []
+        for _, alias_name in self.field_aliases:
+            fn = add_volume_weighted_smoothed_field(ptype,
+                "particle_position", "particle_mass",
+                sml_name, "density", alias_name, self,
+                num_neighbors)
+            new_aliases.append(((ftype, alias_name), fn[0]))
+        for alias, source in new_aliases:
+            #print "Aliasing %s => %s" % (alias, source)
+            self.alias(alias, source)
 
     def setup_fluid_aliases(self):
         known_other_fields = dict(self.known_other_fields)
@@ -150,8 +172,11 @@
         self.find_dependencies(loaded)
 
     def load_plugin(self, plugin_name, ftype = "gas", skip_check = False):
+        if callable(plugin_name):
+            f = plugin_name
+        else:
+            f = field_plugins[plugin_name]
         orig = set(self.items())
-        f = field_plugins[plugin_name]
         f(self, ftype, slice_info = self.slice_info)
         loaded = [n for n, v in set(self.items()).difference(orig)]
         return loaded
@@ -288,6 +313,8 @@
             try:
                 fd = fi.get_dependencies(pf = self.pf)
             except Exception as e:
+                if field in self._show_field_errors:
+                    raise
                 if type(e) != YTFieldNotFound:
                     mylog.debug("Raises %s during field %s detection.",
                                 str(type(e)), field)

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -78,7 +78,6 @@
 
     registry.add_field(("index", "zeros"), function=_zeros,
               units = "",
-              projection_conversion="unitary",
               display_field=False)
 
     def _ones(field, data):
@@ -88,7 +87,6 @@
         return data.apply_units(arr, field.units)
 
     registry.add_field(("index", "ones"), function=_ones,
-              projection_conversion="unitary",
               units = "",
               display_field=False)
 

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -18,7 +18,8 @@
 from .field_plugin_registry import \
     register_field_plugin
 
-from .field_info_container import FieldInfoContainer
+from .field_info_container import \
+    FieldInfoContainer
 
 # Empty FieldInfoContainer
 local_fields = FieldInfoContainer(None, [], None)
@@ -31,4 +32,6 @@
     # info container, and since they are not mutable in any real way, we are
     # fine.
     # Note that we actually don't care about the ftype here.
+    for f in local_fields:
+        registry._show_field_errors.append(f)
     registry.update(local_fields)

diff -r 6e98dcf564739c112ffa92393dd561f1264dd34b -r abbfa770503a6345f7d649e20bf606bd4e7e546c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -80,8 +80,7 @@
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype,
-             projection_conversion = '1')
+             display_name = "\\mathrm{%s Count}" % ptype)
 
     def particle_mass(field, data):
         pos = data[ptype, coord_name]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/9a9808333bca/
Changeset:   9a9808333bca
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-24 22:12:44+00:00
Summary:     Fixing small merge error
Affected #:  1 file

diff -r abbfa770503a6345f7d649e20bf606bd4e7e546c -r 9a9808333bca9271781ae65e68a2547870a0f681 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -161,7 +161,7 @@
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera
+    Camera
 
 # Not Currently Supported
 # from yt.visualization.volume_rendering.api import \


https://bitbucket.org/yt_analysis/yt/commits/a0e31374bfab/
Changeset:   a0e31374bfab
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 02:03:45+00:00
Summary:     Modifying a few defaults, making a create_volume_rendering function.
Affected #:  4 files

diff -r 9a9808333bca9271781ae65e68a2547870a0f681 -r a0e31374bfab7da071c1c483efd3a425050fa388 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -45,8 +45,11 @@
         data_source = self.data_source
         self.position = data_source.pf.domain_right_edge
 
-        width = data_source.pf.domain_width.max()
-        focus = data_source.pf.domain_center
+        width = 1.5 * data_source.pf.domain_width.max()
+        (xmi, xma), (ymi, yma), (zmi, zma) = \
+            data_source.quantities['Extrema'](['x', 'y', 'z'])
+        width = np.sqrt((xma-xmi)**2 + (yma-ymi)**2 + (zma-zmi)**2)
+        focus = data_source.get_field_parameter('center')
 
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):
             width = self.pf.quan(width[0], input_units=width[1])

diff -r 9a9808333bca9271781ae65e68a2547870a0f681 -r a0e31374bfab7da071c1c483efd3a425050fa388 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.data_objects.api import ImageArray
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.amr_kdtree.api import AMRKDTree
@@ -103,7 +104,7 @@
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
-        self.volume.set_fields([self.field], log_fields, False)
+        self.volume.set_fields([self.field], log_fields, True)
 
     def set_scene(self, scene):
         self.scene = scene
@@ -124,8 +125,10 @@
         if cam is None:
             cam = Camera(self.data_source)
             self.scene.camera = cam
-        self.current_image = np.zeros((cam.resolution[0], cam.resolution[1],
-                                       4), dtype='float64', order='C')
+        self.current_image = ImageArray(
+            np.zeros((cam.resolution[0], cam.resolution[1],
+                      4), dtype='float64', order='C'),
+            info={'imtype': 'rendering'})
         return self.current_image
 
     def teardown(self):

diff -r 9a9808333bca9271781ae65e68a2547870a0f681 -r a0e31374bfab7da071c1c483efd3a425050fa388 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -16,6 +16,8 @@
 from yt.data_objects.static_output import Dataset
 from camera import Camera
 from render_source import VolumeSource, OpaqueSource
+from yt.data_objects.api import ImageArray
+import numpy as np
 
 
 class Scene(object):
@@ -99,7 +101,7 @@
 
         return self
 
-    def render(self):
+    def render(self, fname=None):
         self.validate()
         ims = {}
         for k, v in self.sources.iteritems():
@@ -107,14 +109,42 @@
             print 'Running', k, v
             ims[k] = v.request()
 
-        return ims
+        bmp = np.zeros_like(ims.values()[0])
+        for k, v in ims.iteritems():
+            bmp += v
+        bmp = ImageArray(bmp.d)
+        assert(isinstance(bmp, ImageArray))
 
+        if fname is not None:
+            bmp.write_png(fname, clip_ratio=4.0)
+        return bmp
+
+
+def create_volume_rendering(data_source, field=None):
+    if isinstance(data_source, Dataset):
+        pf = data_source
+        data_source = data_source.all_data()
+    else:
+        pf = data_source.pf
+
+    sc = Scene()
+    camera = Camera(data_source)
+    if field is None:
+        pf.field_list
+        field = pf.field_list[0]
+        mylog.info('Setting default field to %s' % field.__repr__())
+    render_source = VolumeSource(data_source, field)
+
+    sc.set_camera(camera)
+    sc.add_source(render_source)
+    render_source.build_defaults()
+    return sc
 
 class RenderScene(Scene):
 
     """docstring for RenderScene"""
 
-    def __init__(self, data_source=None, field=None):
+    def __init__(self, data_source, field=None):
         super(RenderScene, self).__init__()
         if isinstance(data_source, Dataset):
             self.ds = data_source
@@ -139,5 +169,3 @@
             render_source = VolumeSource(self.data_source, self.field)
             self.add_source(render_source)
             render_source.build_defaults()
-
-

diff -r 9a9808333bca9271781ae65e68a2547870a0f681 -r a0e31374bfab7da071c1c483efd3a425050fa388 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -49,7 +49,7 @@
         self.log = False
         self.tf = None
         self.bounds = None
-        self.grey_opacity = True
+        self.grey_opacity = False
         self.profiles = {}
 
     def set_bounds(self, bounds=None):
@@ -66,7 +66,8 @@
         """
         if bounds is None:
             bounds = self.pf.h.all_data().quantities['Extrema'](self.field)
-        self.bounds = bounds
+            bounds = [b.ndarray_view() for b in bounds]
+            self.bounds = bounds
 
         # Do some error checking.
         assert(len(self.bounds) == 2)
@@ -85,6 +86,7 @@
             The field to be rendered.
         """
         self.field = field
+        self.log = self.pf._get_field_info(self.field).take_log
 
     def set_log(self, log):
         """
@@ -133,9 +135,11 @@
 
     def setup_default(self):
         """docstring for setup_default"""
-        mi, ma = self.bounds
-        print 'I AM MAPPING BETWEEN', mi, ma
-        self.tf.map_to_colormap(mi, ma, scale=10.0, colormap='RdBu_r')
+        if self.log:
+            mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
+        else:
+            mi, ma = self.bounds
+        self.tf.add_layers(7, col_bounds=[mi, ma], colormap='RdBu_r')
 
     def plot(self, fn=None, profile_field=None, profile_weight=None):
         """


https://bitbucket.org/yt_analysis/yt/commits/8321d213dd00/
Changeset:   8321d213dd00
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 06:43:34+00:00
Summary:     Adding pitch/yaw/roll to camera, moving pieces from orientation into camera.
Affected #:  2 files

diff -r a0e31374bfab7da071c1c483efd3a425050fa388 -r 8321d213dd004bbca2446d60572b71f40a25b043 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -16,8 +16,9 @@
 
 import numpy as np
 
-from yt.funcs import *
-from yt.utilities.math_utils import get_rotation_matrix
+from yt.funcs import mylog
+from yt.units.yt_array import YTArray
+
 
 class Orientation(object):
     def __init__(self, normal_vector, north_vector=None, steady_north=False):
@@ -32,15 +33,15 @@
         normal_vector : array_like
            A vector normal to the image plane
         north_vector  : array_like, optional
-           The 'up' direction to orient the image plane.  
+           The 'up' direction to orient the image plane.
            If not specified, gets calculated automatically
         steady_north  : bool, optional
            Boolean to control whether to normalize the north_vector
-           by subtracting off the dot product of it and the normal 
+           by subtracting off the dot product of it and the normal
            vector.  Makes it easier to do rotations along a single
            axis.  If north_vector is specified, is switched to
            True.  Default: False
-           
+
         """
 
         # Make sure vectors are unitless
@@ -63,14 +64,16 @@
             self.north_vector = self.unit_vectors[1]
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
+        mylog.debug('Setting normalized vectors' + str(normal_vector)
+                    + str(north_vector))
         # Now we set up our various vectors
-        normal_vector /= np.sqrt( np.dot(normal_vector, normal_vector))
+        normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
         if north_vector is None:
             vecs = np.identity(3)
             t = np.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            east_vector = np.cross(vecs[ax,:], normal_vector).ravel()
-            # self.north_vector must remain None otherwise rotations about a fixed axis will break.  
+            east_vector = np.cross(vecs[ax, :], normal_vector).ravel()
+            # self.north_vector must remain None otherwise rotations about a fixed axis will break. 
             # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
@@ -82,25 +85,5 @@
         self.normal_vector = normal_vector
         self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], "")
         self.inv_mat = np.linalg.pinv(self.unit_vectors)
-        
-    def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""Change the view direction based on any of the orientation parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to an orientable object.
 
-        Parameters
-        ----------
-        normal_vector: array_like, optional
-            The new looking vector.
-        north_vector : array_like, optional
-            The 'up' direction for the plane of rays.  If not specific,
-            calculated automatically.
-        """
-        if north_vector is None:
-            north_vector = self.north_vector
-        if normal_vector is None:
-            normal_vector = self.normal_vector
-        self._setup_normalized_vectors(normal_vector, north_vector)
-
-        

diff -r a0e31374bfab7da071c1c483efd3a425050fa388 -r 8321d213dd004bbca2446d60572b71f40a25b043 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -16,6 +16,7 @@
 from yt.funcs import iterable
 from yt.utilities.orientation import Orientation
 from yt.units.yt_array import YTArray
+from yt.utilities.math_utils import get_rotation_matrix
 import numpy as np
 
 
@@ -48,7 +49,8 @@
         width = 1.5 * data_source.pf.domain_width.max()
         (xmi, xma), (ymi, yma), (zmi, zma) = \
             data_source.quantities['Extrema'](['x', 'y', 'z'])
-        width = np.sqrt((xma-xmi)**2 + (yma-ymi)**2 + (zma-zmi)**2)
+        width = np.sqrt((xma-xmi)**2 + (yma-ymi)**2 + (zma-zmi)**2) /\
+            np.sqrt(3)
         focus = data_source.get_field_parameter('center')
 
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):
@@ -68,3 +70,112 @@
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=True)
         self._moved = True
+
+    def switch_orientation(self, normal_vector=None, north_vector=None):
+        r"""Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes related
+        to an orientable object.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def switch_view(self, normal_vector=None, north_vector=None):
+        r"""Change the view based on any of the view parameters.
+
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self.switch_orientation(normal_vector=normal_vector,
+                                         north_vector=north_vector)
+        self._moved = True
+
+    def pitch(self, theta):
+        r"""Rotate by a given angle about the horizontal axis
+
+        Pitch the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to pitch the view.
+
+        Examples
+        --------
+
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[0]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]),
+            north_vector=np.dot(R, self.unit_vectors[1]))
+        if self.steady_north:
+            self.north_vector = self.unit_vectors[1]
+
+    def yaw(self, theta):
+        r"""Rotate by a given angle about the vertical axis
+
+        Yaw the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to yaw the view.
+
+        Examples
+        --------
+
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[1]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]))
+
+    def roll(self, theta):
+        r"""Rotate by a given angle about the view normal axis
+
+        Roll the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to roll the view.
+
+        Examples
+        --------
+
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[2]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]),
+            north_vector=np.dot(R, self.unit_vectors[1]))
+        if self.steady_north:
+            self.north_vector = np.dot(R, self.north_vector)
+


https://bitbucket.org/yt_analysis/yt/commits/a32889ff816e/
Changeset:   a32889ff816e
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 06:44:39+00:00
Summary:     Adding hooks for engine updating.
Affected #:  2 files

diff -r 8321d213dd004bbca2446d60572b71f40a25b043 -r a32889ff816e7ecdec18d9bcea97b46bcfa14cba yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ b/yt/visualization/volume_rendering/engine.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import \
@@ -62,6 +63,8 @@
     def camera_updated(self):
         if self.camera._moved:
             self._setup_box_properties()
+            self.sampler = self.get_sampler()
+            self.camera._moved = False
 
     def _setup_box_properties(self):
         unit_vectors = self.camera.unit_vectors
@@ -73,6 +76,9 @@
         self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
+        mylog.debug('Setting box properties')
+        mylog.debug(self.back_center)
+        mylog.debug(self.front_center)
 
     def get_sampler(self):
         self.render_source.prepare()
@@ -85,11 +91,12 @@
                 image, self.camera.unit_vectors[
                     0], self.camera.unit_vectors[1],
                 np.array(self.camera.width, dtype='float64'),
-                self.transfer_function, self.sub_samples)
+                self.render_source.transfer_function, self.sub_samples)
         sampler = VolumeRenderSampler(*args)
         return sampler
 
     def run(self):
+        self.camera_updated()
         total_cells = 0
         if self.double_check:
             for brick in self.render_source.volume.bricks:

diff -r 8321d213dd004bbca2446d60572b71f40a25b043 -r a32889ff816e7ecdec18d9bcea97b46bcfa14cba yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -141,5 +141,6 @@
 
     def request(self, *args, **kwargs):
         """docstring for request"""
+        self.prepare()
         self.engine.run()
         return self.current_image


https://bitbucket.org/yt_analysis/yt/commits/8e195b23124e/
Changeset:   8e195b23124e
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 06:45:43+00:00
Summary:     Adding a SceneHandle class that grabs a particular full set of modifieable pieces of the Scene (scene, render_source, engine, camera).
Affected #:  1 file

diff -r a32889ff816e7ecdec18d9bcea97b46bcfa14cba -r 8e195b23124e949baedb508850467437e94d5e7a yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -19,6 +19,26 @@
 from yt.data_objects.api import ImageArray
 import numpy as np
 
+class SceneHandle(object):
+    """docstring for SceneHandle"""
+    def __init__(self, scene, camera, source, engine):
+        self.scene = scene
+        self.camera = camera
+        self.source = source
+        self.engine = engine
+
+    def __repr__(self):
+        desc = super(SceneHandle, self).__repr__()
+        desc += str(self)
+        return desc
+
+    def __str__(self):
+        desc = "Scene Handler\n"
+        desc += ".scene: " + self.scene.__repr__() + "\n"
+        desc += ".camera: " + self.camera.__repr__() + "\n"
+        desc += ".source: " + self.source.__repr__() + "\n"
+        desc += ".engine: " + self.engine.__repr__() + "\n"
+        return desc
 
 class Scene(object):
 
@@ -28,7 +48,6 @@
 
     def __init__(self):
         super(Scene, self).__init__()
-        self.datasets = []
         self.camera = None
         self.sources = {}
         self.camera_path = None
@@ -39,17 +58,14 @@
         for source in self.sources.values():
             source.set_camera(self.camera)
 
-    def setup_camera_links(self):
-        """
-        The camera object needs to be linked to:
-            * Engines
-            * Render Sources
-        """
-        if self.camera is None:
-            raise RuntimeError("Camera does not exist")
+    def get_handle(self, key=None):
+        """docstring for get_handle"""
 
-        for source in self.sources.values():
-            source.set_camera(self.camera)
+        if key is None:
+            key = self.sources.keys()[0]
+        handle = SceneHandle(self, self.camera, self.sources[key],
+                             self.sources[key].engine)
+        return handle
 
     def iter_opaque_sources(self):
         """
@@ -83,10 +99,6 @@
             self.request()
         return self._current
 
-    def register_dataset(self, ds):
-        """Add a dataset to the scene"""
-        self.datasets.append(ds)
-
     def add_source(self, render_source, keyname=None):
         """
         Add a render source to the scene.  This will autodetect the
@@ -101,7 +113,7 @@
 
         return self
 
-    def render(self, fname=None):
+    def render(self, fname=None, clip_ratio=None):
         self.validate()
         ims = {}
         for k, v in self.sources.iteritems():
@@ -116,7 +128,7 @@
         assert(isinstance(bmp, ImageArray))
 
         if fname is not None:
-            bmp.write_png(fname, clip_ratio=4.0)
+            bmp.write_png(fname, clip_ratio=clip_ratio)
         return bmp
 
 


https://bitbucket.org/yt_analysis/yt/commits/065c644ffd6d/
Changeset:   065c644ffd6d
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 06:46:41+00:00
Summary:     Adding testing script. Doesn't use answer testing at the moment, but useful to link code state up with working example
Affected #:  1 file

diff -r 8e195b23124e949baedb508850467437e94d5e7a -r 065c644ffd6da33608e47a085e619af3973abd0e yt/visualization/volume_rendering/tests/test_scene.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -0,0 +1,73 @@
+
+"""
+Test for Volume Rendering Scene, and their movement.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.mods import *
+from yt.testing import \
+    fake_random_pf
+from yt.visualization.volume_rendering.scene import Scene, RenderScene, \
+    create_volume_rendering
+from yt.visualization.volume_rendering.camera import Camera
+from yt.visualization.volume_rendering.render_source import VolumeSource
+
+
+# def test_default(pf):
+#     sc = RenderScene(pf)
+#     for k, im in sc.render().iteritems():
+#         write_bitmap(im, 'scene_%s.png' % k)
+#     return sc
+# 
+# 
+# def test_data_source(pf):
+#     sc = Scene()
+#     ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 3)
+#     vol = VolumeSource(ds, field=('gas', 'density'))
+#     cam = Camera(ds)
+#     sc.set_camera(cam)
+#     sc.add_source(vol)
+# 
+#     vol.build_defaults()
+#     for k, im in sc.render().iteritems():
+#         write_bitmap(im, 'data_scene_%s.png' % k)
+# 
+# 
+# def test_two_source(pf):
+#     sc = Scene()
+#     vol = VolumeSource(pf.h.sphere(pf.domain_center, pf.domain_width[0] / 3),
+#                        field=('gas', 'density'))
+#     sc.add_source(vol)
+#     vol.build_defaults()
+# 
+#     vol = VolumeSource(pf.h.sphere(pf.domain_center / 3,
+#                                    pf.domain_width[0] / 3),
+#                        field=('gas', 'density'))
+#     sc.add_source(vol)
+#     for k, im in sc.render().iteritems():
+#         write_bitmap(im, 'muliple_scene_%s.png' % k)
+# 
+
+#pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
+pf = fake_random_pf(64)
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
+sc = create_volume_rendering(ds, field=('gas', 'density'))
+sc.render('test.png')
+h = sc.get_handle()
+h.source.transfer_function.grey_opacity = True
+h.source.transfer_function.map_to_colormap(-0.5, -0.2, scale=50.0, colormap='RdBu_r')
+cam = h.camera
+for i in range(10):
+    cam.yaw(np.pi / 10.)
+    sc.render('test_%04i.png' % i)
+


https://bitbucket.org/yt_analysis/yt/commits/b8e02ac99427/
Changeset:   b8e02ac99427
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 17:51:48+00:00
Summary:     Update the north vector choices. I think not having one makes more sense by default.
Affected #:  5 files

diff -r 065c644ffd6da33608e47a085e619af3973abd0e -r b8e02ac994275d3d993cac793d7465daa7e5b1df yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -30,7 +30,7 @@
         """Initialize a Camera Instance"""
         self.data_source = data_source
         self.position = None
-        self.north_vector = np.array([0.0, 0.0, 1.0])
+        self.north_vector = None
         self.resolution = (256, 256)
         self.light = None
         self.width = None
@@ -40,7 +40,7 @@
             self.inherit_default_from_data_source()
         else:
             super(Camera, self).__init__(self.focus - self.position,
-                                         self.north_vector, steady_north=True)
+                                         self.north_vector, steady_north=False)
 
     def inherit_default_from_data_source(self):
         data_source = self.data_source
@@ -68,7 +68,7 @@
         self.focus = focus
 
         super(Camera, self).__init__(self.focus - self.position,
-                                     self.north_vector, steady_north=True)
+                                     self.north_vector, steady_north=False)
         self._moved = True
 
     def switch_orientation(self, normal_vector=None, north_vector=None):

diff -r 065c644ffd6da33608e47a085e619af3973abd0e -r b8e02ac994275d3d993cac793d7465daa7e5b1df yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ b/yt/visualization/volume_rendering/engine.py
@@ -45,7 +45,7 @@
         self.render_source = render_source
         self.transfer_function = self.render_source.transfer_function
         self.sub_samples = 5
-        self.num_threads = 1
+        self.num_threads = 0
         self.double_check = False
         self.box_vectors = None
         self.origin = None

diff -r 065c644ffd6da33608e47a085e619af3973abd0e -r b8e02ac994275d3d993cac793d7465daa7e5b1df yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -32,14 +32,15 @@
         super(RenderSource, self).__init__()
         self.opaque = False
 
-    def request(self, *args, **kwargs):
-        """returns a new ImageArray"""
-        pass
-
     def setup(self):
         """Set up data needed to render"""
         pass
 
+    def render(self, zbuffer=None):
+        """docstring for request"""
+        self.prepare()
+        self.engine.run()
+        return self.current_image
 
 class OpaqueSource(RenderSource):
     """docstring for OpaqueSource"""
@@ -139,7 +140,7 @@
         """docstring for add_sampler"""
         pass
 
-    def request(self, *args, **kwargs):
+    def render(self, zbuffer=None):
         """docstring for request"""
         self.prepare()
         self.engine.run()

diff -r 065c644ffd6da33608e47a085e619af3973abd0e -r b8e02ac994275d3d993cac793d7465daa7e5b1df yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -76,6 +76,16 @@
             if isinstance(source, OpaqueSource):
                 yield k, source
 
+    def iter_transparent_sources(self):
+        """
+        Iterate over opaque RenderSource objects,
+        returning a tuple of (key, source)
+        """
+        for k, source in self.sources.iteritems():
+            if not isinstance(source, OpaqueSource):
+                yield k, source
+
+
     def validate(self):
         if self.camera is None:
             for k, source in self.sources.iteritems():
@@ -119,7 +129,7 @@
         for k, v in self.sources.iteritems():
             v.validate()
             print 'Running', k, v
-            ims[k] = v.request()
+            ims[k] = v.render()
 
         bmp = np.zeros_like(ims.values()[0])
         for k, v in ims.iteritems():

diff -r 065c644ffd6da33608e47a085e619af3973abd0e -r b8e02ac994275d3d993cac793d7465daa7e5b1df yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -63,11 +63,13 @@
 ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
 sc = create_volume_rendering(ds, field=('gas', 'density'))
 sc.render('test.png')
+
 h = sc.get_handle()
 h.source.transfer_function.grey_opacity = True
-h.source.transfer_function.map_to_colormap(-0.5, -0.2, scale=50.0, colormap='RdBu_r')
+h.source.transfer_function.map_to_colormap(-2, 0.0, scale=50.0, colormap='RdBu_r')
+
 cam = h.camera
-for i in range(10):
-    cam.yaw(np.pi / 10.)
+for i in range(36):
+    cam.pitch(-2*np.pi / 36.)
     sc.render('test_%04i.png' % i)
 


https://bitbucket.org/yt_analysis/yt/commits/528eaddb85e8/
Changeset:   528eaddb85e8
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 17:52:44+00:00
Summary:     Adding a ZBuffer class to handle compositing. Much more work to be done.
Affected #:  2 files

diff -r b8e02ac994275d3d993cac793d7465daa7e5b1df -r 528eaddb85e8e33833535dc69664a494c18dc56b yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -17,6 +17,7 @@
 from camera import Camera
 from render_source import VolumeSource, OpaqueSource
 from yt.data_objects.api import ImageArray
+from zbuffer_array import ZBuffer
 import numpy as np
 
 class SceneHandle(object):
@@ -97,10 +98,19 @@
                 raise RuntimeError("Couldn't build default camera")
         return
 
-    def request(self):
-        pass
+    def composite(self):
+        opaque = ZBuffer(
+            np.zeros(self.camera.resolution[0],
+                     self.camera.resolution[1],
+                     4),
+            np.ones(self.camera.resolution) * np.inf)
 
-    def composite(self):
+        for k, source in self.iter_opaque_sources():
+            opaque = opaque + source.zbuffer
+
+        for k, source in self.iter_transparent_sources():
+            source.render(zbuffer=opaque)
+            opaque = opaque + source.zbuffer
         pass
 
     @property

diff -r b8e02ac994275d3d993cac793d7465daa7e5b1df -r 528eaddb85e8e33833535dc69664a494c18dc56b yt/visualization/volume_rendering/zbuffer_array.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -0,0 +1,55 @@
+"""
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+from yt.funcs import mylog
+from yt.data_objects.api import ImageArray
+import numpy as np
+
+
+class ZBuffer(object):
+    """docstring for ZBuffer"""
+    def __init__(self, rgba, z):
+        super(ZBuffer, self).__init__()
+        assert(rgba.shape[:len(z.shape)] == z.shape)
+        self.rgba = rgba
+        self.z = z
+        self.shape = z.shape
+
+    def __add__(self, other):
+        assert(self.shape == other.shape)
+        f_or_b = self.z < other.z
+        rgba = self.rgba * f_or_b + other.rgba * (1 - f_or_b)
+        z = np.min([self.z, other.z], axis=0)
+        return ZBuffer(rgba, z)
+
+    def __eq__(self, other):
+        equal = True
+        equal *= np.all(self.rgba == other.rgba)
+        equal *= np.all(self.z == other.z)
+        return equal
+
+    def paint(self, ind, value, z):
+        if z < self.z[ind]:
+            self.rgba[ind] = value
+            self.z[ind] = z
+
+if __name__ == "__main__":
+    shape = (64, 64)
+    for shape in [(64, 64), (16, 16, 4), (128), (16, 32)]:
+        b1 = ZBuffer(np.random.random(shape), np.ones(shape))
+        b2 = ZBuffer(np.random.random(shape), np.zeros(shape))
+        c = b1 + b2
+        assert(np.all(c.rgba == b2.rgba))
+        assert(np.all(c.z == b2.z))
+        assert(np.all(c == b2))


https://bitbucket.org/yt_analysis/yt/commits/2e6a22284891/
Changeset:   2e6a22284891
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-25 18:04:13+00:00
Summary:     Merging in zbuffer work from a while ago.
Affected #:  4 files

diff -r 528eaddb85e8e33833535dc69664a494c18dc56b -r 2e6a2228489139dd4e4d83478b196a76751511f2 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -59,7 +59,7 @@
                      sample_function *sampler,
                      void *data,
                      np.float64_t *return_t = *,
-                     np.float64_t enter_t = *) nogil
+                     np.float64_t max_t = *) nogil
 
 cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
     return (i*vc.dims[1]+j)*vc.dims[2]+k

diff -r 528eaddb85e8e33833535dc69664a494c18dc56b -r 2e6a2228489139dd4e4d83478b196a76751511f2 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -181,6 +181,7 @@
 
 cdef struct ImageContainer:
     np.float64_t *vp_pos, *vp_dir, *center, *image,
+    np.float64_t *zbuffer
     np.float64_t pdx, pdy, bounds[4]
     int nv[2]
     int vp_strides[3]
@@ -196,6 +197,7 @@
     cdef ImageContainer *image
     cdef sampler_function *sampler
     cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
     def __init__(self, 
@@ -210,6 +212,8 @@
                   *args, **kwargs):
         self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
         cdef ImageContainer *imagec = self.image
+        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        zbuffer = kwargs.pop("zbuffer", None)
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
@@ -220,12 +224,16 @@
         self.aimage = image
         self.ax_vec = x_vec
         self.ay_vec = y_vec
+        self.azbuffer = zbuffer
         imagec.vp_pos = <np.float64_t *> vp_pos.data
         imagec.vp_dir = <np.float64_t *> vp_dir.data
         imagec.center = <np.float64_t *> center.data
         imagec.image = <np.float64_t *> image.data
         imagec.x_vec = <np.float64_t *> x_vec.data
         imagec.y_vec = <np.float64_t *> y_vec.data
+        imagec.zbuffer = NULL
+        if zbuffer is not None:
+            imagec.zbuffer = <np.float64_t *> zbuffer.data
         imagec.nv[0] = image.shape[0]
         imagec.nv[1] = image.shape[1]
         for i in range(4): imagec.bounds[i] = bounds[i]
@@ -299,7 +307,7 @@
         cdef ImageContainer *im = self.image
         self.setup(pg)
         if self.sampler == NULL: raise RuntimeError
-        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4]
+        cdef np.float64_t *v_pos, *v_dir, rgba[6], extrema[4], max_t
         hit = 0
         cdef np.int64_t nx, ny, size
         if im.vd_strides[0] == -1:
@@ -339,8 +347,12 @@
                     v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
                     offset = im.im_strides[0] * vi + im.im_strides[1] * vj
                     for i in range(Nch): idata.rgba[i] = im.image[i + offset]
+                    if im.zbuffer != NULL:
+                        max_t = im.zbuffer[im.nv[0] * vi + vj]
+                    else:
+                        max_t = 1.0
                     walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata))
+                                (<void *> idata), NULL, max_t)
                     for i in range(Nch): im.image[i + offset] = idata.rgba[i]
                 free(idata)
                 free(v_pos)
@@ -359,8 +371,12 @@
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
                     for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
+                    if im.zbuffer != NULL:
+                        max_t = im.zbuffer[j]
+                    else:
+                        max_t = 1.0
                     walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata))
+                                (<void *> idata), NULL, max_t)
                     for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
                 free(v_dir)
                 free(idata)
@@ -450,9 +466,9 @@
                   np.ndarray[np.float64_t, ndim=1] x_vec,
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
-                  n_samples = 10):
+                  n_samples = 10, **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         # Now we handle tf_obj
         self.vra = <VolumeRenderAccumulator *> \
@@ -651,9 +667,9 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   tf_obj, n_samples = 10,
-                  star_list = None):
+                  star_list = None, **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         cdef np.ndarray[np.float64_t, ndim=1] temp
         # Now we handle tf_obj
@@ -723,9 +739,10 @@
                   np.ndarray[np.float64_t, ndim=1] width,
                   tf_obj, n_samples = 10,
                   light_dir=[1.,1.,1.],
-                  light_rgba=[1.,1.,1.,1.]):
+                  light_rgba=[1.,1.,1.,1.],
+                  **kwargs):
         ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image,
-                               x_vec, y_vec, width)
+                               x_vec, y_vec, width, **kwargs)
         cdef int i
         cdef np.ndarray[np.float64_t, ndim=1] temp
         # Now we handle tf_obj
@@ -780,12 +797,12 @@
                      sampler_function *sampler,
                      void *data,
                      np.float64_t *return_t = NULL,
-                     np.float64_t enter_t = -1.0) nogil:
+                     np.float64_t max_t = 1.0) nogil:
     cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
     cdef np.float64_t intersect_t = 1.1
     cdef np.float64_t iv_dir[3]
     cdef np.float64_t tmax[3], tdelta[3]
-    cdef np.float64_t dist, alpha, dt, exit_t
+    cdef np.float64_t dist, alpha, dt, exit_t, enter_t = -1.0
     cdef np.float64_t tr, tl, temp_x, temp_y, dv
     direction = -1
     if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
@@ -825,7 +842,7 @@
             direction = i
             intersect_t = tl
     if enter_t >= 0.0: intersect_t = enter_t 
-    if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
+    if not ((0.0 <= intersect_t) and (intersect_t < max_t)): return 0
     for i in range(3):
         # Two things have to be set inside this loop.
         # cur_ind[i], the current index of the grid cell the ray is in
@@ -869,12 +886,12 @@
                 i = 1
             else:
                 i = 2
-        exit_t = fmin(tmax[i], 1.0)
+        exit_t = fmin(tmax[i], max_t)
         sampler(vc, v_pos, v_dir, enter_t, exit_t, cur_ind, data)
         cur_ind[i] += step[i]
         enter_t = tmax[i]
         tmax[i] += tdelta[i]
-        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i] or enter_t >= 1.0:
+        if cur_ind[i] < 0 or cur_ind[i] >= vc.dims[i] or enter_t >= max_t:
             break
     if return_t != NULL: return_t[0] = exit_t
     return hit

diff -r 528eaddb85e8e33833535dc69664a494c18dc56b -r 2e6a2228489139dd4e4d83478b196a76751511f2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -113,6 +113,29 @@
                                          north_vector=north_vector)
         self._moved = True
 
+
+
+    # MOVE THIS
+    def get_sampler(self, args):
+        kwargs = {}
+        kwargs['zbuffer'] = np.ones((self.resolution[0], self.resolution[1]))
+        if self.star_trees is not None:
+            kwargs = {'star_list': self.star_trees}
+        if self.use_light:
+            if self.light_dir is None:
+                self.set_default_light_dir()
+            temp_dir = np.empty(3,dtype='float64')
+            temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
+                    self.light_dir[1] * self.orienter.unit_vectors[2] + \
+                    self.light_dir[2] * self.orienter.unit_vectors[0]
+            if self.light_rgba is None:
+                self.set_default_light_rgba()
+            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
+                    light_rgba=self.light_rgba, **kwargs)
+        else:
+            sampler = self._sampler_object(*args, **kwargs)
+        return sampler
+
     def pitch(self, theta):
         r"""Rotate by a given angle about the horizontal axis
 

diff -r 528eaddb85e8e33833535dc69664a494c18dc56b -r 2e6a2228489139dd4e4d83478b196a76751511f2 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -553,6 +553,7 @@
     star_trees = None
     def get_sampler(self, args):
         kwargs = {}
+        kwargs['zbuffer'] = np.ones((self.resolution[0], self.resolution[1]))
         if self.star_trees is not None:
             kwargs = {'star_list': self.star_trees}
         if self.use_light:
@@ -586,7 +587,7 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.back_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
@@ -2173,8 +2174,8 @@
             pass
 
     def get_sampler_args(self, image):
-        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.front_center.ravel()])
+        args = (rotp, -self.box_vectors[2], self.front_center,
             (-self.width[0]/2., self.width[0]/2.,
              -self.width[1]/2., self.width[1]/2.),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],


https://bitbucket.org/yt_analysis/yt/commits/e66249876050/
Changeset:   e66249876050
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-28 20:17:20+00:00
Summary:     Moving around where pieces exist. About to change from Engine to Lens, which makes more sense from a naming perspective..
Affected #:  5 files

diff -r 2e6a2228489139dd4e4d83478b196a76751511f2 -r e66249876050d48f1574a68616ce12092e1fa1cb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -113,29 +113,6 @@
                                          north_vector=north_vector)
         self._moved = True
 
-
-
-    # MOVE THIS
-    def get_sampler(self, args):
-        kwargs = {}
-        kwargs['zbuffer'] = np.ones((self.resolution[0], self.resolution[1]))
-        if self.star_trees is not None:
-            kwargs = {'star_list': self.star_trees}
-        if self.use_light:
-            if self.light_dir is None:
-                self.set_default_light_dir()
-            temp_dir = np.empty(3,dtype='float64')
-            temp_dir = self.light_dir[0] * self.orienter.unit_vectors[1] + \
-                    self.light_dir[1] * self.orienter.unit_vectors[2] + \
-                    self.light_dir[2] * self.orienter.unit_vectors[0]
-            if self.light_rgba is None:
-                self.set_default_light_rgba()
-            sampler = LightSourceRenderSampler(*args, light_dir=temp_dir,
-                    light_rgba=self.light_rgba, **kwargs)
-        else:
-            sampler = self._sampler_object(*args, **kwargs)
-        return sampler
-
     def pitch(self, theta):
         r"""Rotate by a given angle about the horizontal axis
 

diff -r 2e6a2228489139dd4e4d83478b196a76751511f2 -r e66249876050d48f1574a68616ce12092e1fa1cb yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ b/yt/visualization/volume_rendering/engine.py
@@ -18,6 +18,7 @@
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import \
     VolumeRenderSampler
+from camera import Camera
 from yt.units.yt_array import YTArray
 import numpy as np
 
@@ -43,7 +44,6 @@
         self.scene = scene
         self.camera = scene.camera
         self.render_source = render_source
-        self.transfer_function = self.render_source.transfer_function
         self.sub_samples = 5
         self.num_threads = 0
         self.double_check = False
@@ -66,6 +66,17 @@
             self.sampler = self.get_sampler()
             self.camera._moved = False
 
+    def new_image(self):
+        cam = self.scene.camera
+        if cam is None:
+            cam = Camera(self.data_source)
+            self.scene.camera = cam
+        self.current_image = ImageArray(
+            np.zeros((cam.resolution[0], cam.resolution[1],
+                      4), dtype='float64', order='C'),
+            info={'imtype': 'rendering'})
+        return self.current_image
+
     def _setup_box_properties(self):
         unit_vectors = self.camera.unit_vectors
         width = self.camera.width
@@ -81,6 +92,10 @@
         mylog.debug(self.front_center)
 
     def get_sampler(self):
+        self._setup_box_properties()
+        kwargs = {}
+        if self.render_source.zbuffer is not None:
+            kwargs['zbuffer'] = self.render_source.zbuffer.z
         self.render_source.prepare()
         image = self.render_source.current_image
         rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
@@ -92,7 +107,7 @@
                     0], self.camera.unit_vectors[1],
                 np.array(self.camera.width, dtype='float64'),
                 self.render_source.transfer_function, self.sub_samples)
-        sampler = VolumeRenderSampler(*args)
+        sampler = VolumeRenderSampler(*args, **kwargs)
         return sampler
 
     def run(self):
@@ -114,11 +129,4 @@
             self.finalize_image(self.sampler.aimage)
         return
 
-    def finalize_image(self, image):
-        cam = self.scene.camera
-        view_pos = self.front_center + cam.unit_vectors[2] * \
-            1.0e6 * cam.width[2]
-        image = self.render_source.volume.reduce_tree_images(image, view_pos)
-        if self.transfer_function.grey_opacity is False:
-            image[:, :, 3] = 1.0
-        return image
+

diff -r 2e6a2228489139dd4e4d83478b196a76751511f2 -r e66249876050d48f1574a68616ce12092e1fa1cb yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -31,16 +31,34 @@
     def __init__(self):
         super(RenderSource, self).__init__()
         self.opaque = False
+        self.engine = None
+        self.zbuffer = None
 
     def setup(self):
         """Set up data needed to render"""
         pass
 
+    def set_scene(self, scene):
+        self.scene = scene
+        if self.engine is not None:
+            self.engine.set_camera(scene.camera)
+
     def render(self, zbuffer=None):
-        """docstring for request"""
-        self.prepare()
-        self.engine.run()
-        return self.current_image
+        pass
+
+    def validate(self):
+        pass
+
+    def new_image(self):
+        pass
+
+    def prepare(self):
+        pass
+
+    def get_default_camera(self):
+        """If possible, create a camera based on the render source"""
+        return None
+
 
 class OpaqueSource(RenderSource):
     """docstring for OpaqueSource"""
@@ -48,6 +66,8 @@
         super(OpaqueSource, self).__init__()
         self.opaque = True
 
+    def set_zbuffer(self, zbuffer):
+        self.zbuffer = zbuffer
 
 class VolumeSource(RenderSource):
 
@@ -98,6 +118,11 @@
         self.tfh.setup_default()
         self.transfer_function = self.tfh.tf
 
+    def prepare(self):
+        """prepare for rendering"""
+        self.scene.validate()
+        self.new_image()
+
     def build_default_engine(self):
         self.engine = PlaneParallelEngine(self.scene, self)
 
@@ -107,31 +132,10 @@
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
         self.volume.set_fields([self.field], log_fields, True)
 
-    def set_scene(self, scene):
-        self.scene = scene
-        if self.engine is not None:
-            self.engine.set_camera(scene.camera)
-
     def set_camera(self, camera):
         """Set camera in this object, as well as any attributes"""
         self.engine.set_camera(camera)
 
-    def prepare(self):
-        """prepare for rendering"""
-        self.scene.validate()
-        self.new_image()
-
-    def new_image(self):
-        cam = self.scene.camera
-        if cam is None:
-            cam = Camera(self.data_source)
-            self.scene.camera = cam
-        self.current_image = ImageArray(
-            np.zeros((cam.resolution[0], cam.resolution[1],
-                      4), dtype='float64', order='C'),
-            info={'imtype': 'rendering'})
-        return self.current_image
-
     def teardown(self):
         """docstring for teardown"""
         pass
@@ -142,6 +146,34 @@
 
     def render(self, zbuffer=None):
         """docstring for request"""
+        self.zbuffer = zbuffer
         self.prepare()
         self.engine.run()
+
+        self.camera_updated()
+        total_cells = 0
+        if self.double_check:
+            for brick in self.render_source.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center + \
+            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
+        for brick in self.render_source.volume.traverse(view_pos):
+            self.sampler(brick, num_threads=self.num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+
+        self.current_image = \
+            self.finalize_image(self.sampler.aimage)
+
         return self.current_image
+
+    def finalize_image(self, image):
+        cam = self.scene.camera
+        view_pos = self.front_center + cam.unit_vectors[2] * \
+            1.0e6 * cam.width[2]
+        image = self.render_source.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image

diff -r 2e6a2228489139dd4e4d83478b196a76751511f2 -r e66249876050d48f1574a68616ce12092e1fa1cb yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -20,6 +20,7 @@
 from zbuffer_array import ZBuffer
 import numpy as np
 
+
 class SceneHandle(object):
     """docstring for SceneHandle"""
     def __init__(self, scene, camera, source, engine):
@@ -41,6 +42,7 @@
         desc += ".engine: " + self.engine.__repr__() + "\n"
         return desc
 
+
 class Scene(object):
 
     """Skeleton Class for 3D Scenes"""
@@ -49,24 +51,8 @@
 
     def __init__(self):
         super(Scene, self).__init__()
-        self.camera = None
         self.sources = {}
-        self.camera_path = None
-
-    def set_camera(self, camera):
-        self.camera = camera
-
-        for source in self.sources.values():
-            source.set_camera(self.camera)
-
-    def get_handle(self, key=None):
-        """docstring for get_handle"""
-
-        if key is None:
-            key = self.sources.keys()[0]
-        handle = SceneHandle(self, self.camera, self.sources[key],
-                             self.sources[key].engine)
-        return handle
+        self.default_camera = None
 
     def iter_opaque_sources(self):
         """
@@ -86,38 +72,20 @@
             if not isinstance(source, OpaqueSource):
                 yield k, source
 
-
-    def validate(self):
-        if self.camera is None:
-            for k, source in self.sources.iteritems():
-                try:
-                    self.camera = Camera(source.data_source)
-                    return
-                except:
-                    pass
-                raise RuntimeError("Couldn't build default camera")
-        return
-
-    def composite(self):
-        opaque = ZBuffer(
-            np.zeros(self.camera.resolution[0],
-                     self.camera.resolution[1],
-                     4),
-            np.ones(self.camera.resolution) * np.inf)
-
-        for k, source in self.iter_opaque_sources():
-            opaque = opaque + source.zbuffer
-
-        for k, source in self.iter_transparent_sources():
-            source.render(zbuffer=opaque)
-            opaque = opaque + source.zbuffer
-        pass
-
-    @property
-    def current(self):
-        if self._current is None:
-            self.request()
-        return self._current
+    def get_default_camera(self):
+        """
+        Use exisiting sources and their data sources to
+        build a default camera. If no useful source is
+        available, create a default Camera at 1,1,1 in the
+        1,0,0 direction"""
+        for k, source in self.sources.iteritems():
+            cam = source.get_default_camera()
+            if cam is not None:
+                break
+        if cam is None:
+            cam = Camera()
+        self.default_camera = cam
+        return cam
 
     def add_source(self, render_source, keyname=None):
         """
@@ -151,6 +119,39 @@
             bmp.write_png(fname, clip_ratio=clip_ratio)
         return bmp
 
+    def validate(self):
+        for k, source in self.sources.iteritems():
+            source.validate()
+        return
+
+    def composite(self):
+        opaque = ZBuffer(
+            np.zeros(self.camera.resolution[0],
+                     self.camera.resolution[1],
+                     4),
+            np.ones(self.camera.resolution) * np.inf)
+
+        for k, source in self.iter_opaque_sources():
+            if source.zbuffer is not None:
+                opaque = opaque + source.zbuffer
+
+        for k, source in self.iter_transparent_sources():
+            source.render(zbuffer=opaque)
+            opaque = opaque + source.zbuffer
+        pass
+
+    def set_default_camera(self, camera):
+        self.default_camera = camera
+
+    def get_handle(self, key=None):
+        """docstring for get_handle"""
+
+        if key is None:
+            key = self.sources.keys()[0]
+        handle = SceneHandle(self, self.camera, self.sources[key],
+                             self.sources[key].engine)
+        return handle
+
 
 def create_volume_rendering(data_source, field=None):
     if isinstance(data_source, Dataset):
@@ -171,33 +172,3 @@
     sc.add_source(render_source)
     render_source.build_defaults()
     return sc
-
-class RenderScene(Scene):
-
-    """docstring for RenderScene"""
-
-    def __init__(self, data_source, field=None):
-        super(RenderScene, self).__init__()
-        if isinstance(data_source, Dataset):
-            self.ds = data_source
-            data_source = data_source.all_data()
-        else:
-            self.ds = data_source.pf
-
-        self.data_source = data_source
-        self.camera = Camera(data_source)
-        self.field = field
-        self.render_sources = {}
-        self.default_setup()
-
-    def default_setup(self):
-        """docstring for default_setup"""
-        if self.field is None:
-            self.ds.field_list
-            self.field = self.ds.field_list[0]
-            mylog.info('Setting default field to %s' % self.field.__repr__())
-
-        if self.data_source:
-            render_source = VolumeSource(self.data_source, self.field)
-            self.add_source(render_source)
-            render_source.build_defaults()

diff -r 2e6a2228489139dd4e4d83478b196a76751511f2 -r e66249876050d48f1574a68616ce12092e1fa1cb yt/visualization/volume_rendering/tests/test_composite.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -0,0 +1,30 @@
+from yt.mods import *
+from yt.testing import \
+    fake_random_pf
+from yt.visualization.volume_rendering.scene import Scene, RenderScene, \
+    create_volume_rendering
+from yt.visualization.volume_rendering.camera import Camera
+from yt.visualization.volume_rendering.zbuffer_array import ZBuffer
+from yt.visualization.volume_rendering.render_source import VolumeSource,\
+    OpaqueSource
+
+
+pf = fake_random_pf(64)
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
+
+sc = Scene()
+cam = Camera(ds)
+vr = VolumeSource(ds, field=('gas', 'density'))
+sc.add_source(vr)
+vr.build_defaults()
+
+op = OpaqueSource()
+op.set_scene(sc)
+empty = 0.0 * vr.new_image()
+z = np.ones(empty.shape[:2]) * np.inf
+zbuff = ZBuffer(empty, z)
+op.set_zbuffer(zbuff)
+
+sc.add_source(op)
+
+sc.render('test.png')


https://bitbucket.org/yt_analysis/yt/commits/58083bdc28ff/
Changeset:   58083bdc28ff
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-28 20:19:01+00:00
Summary:     Swapping in Lens for Engine
Affected #:  4 files

diff -r e66249876050d48f1574a68616ce12092e1fa1cb -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 yt/visualization/volume_rendering/engine.py
--- a/yt/visualization/volume_rendering/engine.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""
-Engine Classes
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import mylog
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.utilities.lib.grid_traversal import \
-    VolumeRenderSampler
-from camera import Camera
-from yt.units.yt_array import YTArray
-import numpy as np
-
-
-class Engine(ParallelAnalysisInterface):
-
-    """docstring for Engine"""
-
-    def __init__(self, ):
-        super(Engine, self).__init__()
-
-    def camera_updated(self):
-        """docstring for update_camera"""
-        pass
-
-
-class PlaneParallelEngine(Engine):
-
-    """docstring for PlaneParallelEngine"""
-
-    def __init__(self, scene, render_source):
-        super(PlaneParallelEngine, self).__init__()
-        self.scene = scene
-        self.camera = scene.camera
-        self.render_source = render_source
-        self.sub_samples = 5
-        self.num_threads = 0
-        self.double_check = False
-        self.box_vectors = None
-        self.origin = None
-        self.back_center = None
-        self.front_center = None
-
-        if scene.camera:
-            self._setup_box_properties()
-        self.sampler = self.get_sampler()
-
-    def set_camera(self, camera):
-        """set the camera for this engine"""
-        self.camera = camera
-
-    def camera_updated(self):
-        if self.camera._moved:
-            self._setup_box_properties()
-            self.sampler = self.get_sampler()
-            self.camera._moved = False
-
-    def new_image(self):
-        cam = self.scene.camera
-        if cam is None:
-            cam = Camera(self.data_source)
-            self.scene.camera = cam
-        self.current_image = ImageArray(
-            np.zeros((cam.resolution[0], cam.resolution[1],
-                      4), dtype='float64', order='C'),
-            info={'imtype': 'rendering'})
-        return self.current_image
-
-    def _setup_box_properties(self):
-        unit_vectors = self.camera.unit_vectors
-        width = self.camera.width
-        center = self.camera.focus
-        self.box_vectors = YTArray([unit_vectors[0] * width[0],
-                                    unit_vectors[1] * width[1],
-                                    unit_vectors[2] * width[2]])
-        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
-        self.back_center = center - 0.5 * width[2] * unit_vectors[2]
-        self.front_center = center + 0.5 * width[2] * unit_vectors[2]
-        mylog.debug('Setting box properties')
-        mylog.debug(self.back_center)
-        mylog.debug(self.front_center)
-
-    def get_sampler(self):
-        self._setup_box_properties()
-        kwargs = {}
-        if self.render_source.zbuffer is not None:
-            kwargs['zbuffer'] = self.render_source.zbuffer.z
-        self.render_source.prepare()
-        image = self.render_source.current_image
-        rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
-                               self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
-                (-self.camera.width[0] / 2.0, self.camera.width[0] / 2.0,
-                 -self.camera.width[1] / 2.0, self.camera.width[1] / 2.0),
-                image, self.camera.unit_vectors[
-                    0], self.camera.unit_vectors[1],
-                np.array(self.camera.width, dtype='float64'),
-                self.render_source.transfer_function, self.sub_samples)
-        sampler = VolumeRenderSampler(*args, **kwargs)
-        return sampler
-
-    def run(self):
-        self.camera_updated()
-        total_cells = 0
-        if self.double_check:
-            for brick in self.render_source.volume.bricks:
-                for data in brick.my_data:
-                    if np.any(np.isnan(data)):
-                        raise RuntimeError
-
-        view_pos = self.front_center + \
-            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
-        for brick in self.render_source.volume.traverse(view_pos):
-            self.sampler(brick, num_threads=self.num_threads)
-            total_cells += np.prod(brick.my_data[0].shape)
-
-        self.render_source.current_image = \
-            self.finalize_image(self.sampler.aimage)
-        return
-
-

diff -r e66249876050d48f1574a68616ce12092e1fa1cb -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 yt/visualization/volume_rendering/lens.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/lens.py
@@ -0,0 +1,132 @@
+"""
+Lens Classes
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import mylog
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface
+from yt.utilities.lib.grid_traversal import \
+    VolumeRenderSampler
+from camera import Camera
+from yt.units.yt_array import YTArray
+import numpy as np
+
+
+class Lens(ParallelAnalysisInterface):
+
+    """docstring for Lens"""
+
+    def __init__(self, ):
+        super(Lens, self).__init__()
+
+    def camera_updated(self):
+        """docstring for update_camera"""
+        pass
+
+
+class PlaneParallelLens(Lens):
+
+    """docstring for PlaneParallelLens"""
+
+    def __init__(self, scene, render_source):
+        super(PlaneParallelLens, self).__init__()
+        self.scene = scene
+        self.camera = scene.camera
+        self.render_source = render_source
+        self.sub_samples = 5
+        self.num_threads = 0
+        self.double_check = False
+        self.box_vectors = None
+        self.origin = None
+        self.back_center = None
+        self.front_center = None
+
+        if scene.camera:
+            self._setup_box_properties()
+        self.sampler = self.get_sampler()
+
+    def set_camera(self, camera):
+        """set the camera for this lens"""
+        self.camera = camera
+
+    def camera_updated(self):
+        if self.camera._moved:
+            self._setup_box_properties()
+            self.sampler = self.get_sampler()
+            self.camera._moved = False
+
+    def new_image(self):
+        cam = self.scene.camera
+        if cam is None:
+            cam = Camera(self.data_source)
+            self.scene.camera = cam
+        self.current_image = ImageArray(
+            np.zeros((cam.resolution[0], cam.resolution[1],
+                      4), dtype='float64', order='C'),
+            info={'imtype': 'rendering'})
+        return self.current_image
+
+    def _setup_box_properties(self):
+        unit_vectors = self.camera.unit_vectors
+        width = self.camera.width
+        center = self.camera.focus
+        self.box_vectors = YTArray([unit_vectors[0] * width[0],
+                                    unit_vectors[1] * width[1],
+                                    unit_vectors[2] * width[2]])
+        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
+        self.back_center = center - 0.5 * width[2] * unit_vectors[2]
+        self.front_center = center + 0.5 * width[2] * unit_vectors[2]
+        mylog.debug('Setting box properties')
+        mylog.debug(self.back_center)
+        mylog.debug(self.front_center)
+
+    def get_sampler(self):
+        self._setup_box_properties()
+        kwargs = {}
+        if self.render_source.zbuffer is not None:
+            kwargs['zbuffer'] = self.render_source.zbuffer.z
+        self.render_source.prepare()
+        image = self.render_source.current_image
+        rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
+                               self.back_center.ravel()])
+        args = (rotp, self.box_vectors[2], self.back_center,
+                (-self.camera.width[0] / 2.0, self.camera.width[0] / 2.0,
+                 -self.camera.width[1] / 2.0, self.camera.width[1] / 2.0),
+                image, self.camera.unit_vectors[
+                    0], self.camera.unit_vectors[1],
+                np.array(self.camera.width, dtype='float64'),
+                self.render_source.transfer_function, self.sub_samples)
+        sampler = VolumeRenderSampler(*args, **kwargs)
+        return sampler
+
+    def run(self):
+        self.camera_updated()
+        total_cells = 0
+        if self.double_check:
+            for brick in self.render_source.volume.bricks:
+                for data in brick.my_data:
+                    if np.any(np.isnan(data)):
+                        raise RuntimeError
+
+        view_pos = self.front_center + \
+            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
+        for brick in self.render_source.volume.traverse(view_pos):
+            self.sampler(brick, num_threads=self.num_threads)
+            total_cells += np.prod(brick.my_data[0].shape)
+
+        self.render_source.current_image = \
+            self.finalize_image(self.sampler.aimage)
+        return
+
+

diff -r e66249876050d48f1574a68616ce12092e1fa1cb -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -19,7 +19,7 @@
     ParallelAnalysisInterface
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from transfer_function_helper import TransferFunctionHelper
-from engine import PlaneParallelEngine
+from lens import PlaneParallelLens
 from camera import Camera
 
 
@@ -31,7 +31,7 @@
     def __init__(self):
         super(RenderSource, self).__init__()
         self.opaque = False
-        self.engine = None
+        self.lens = None
         self.zbuffer = None
 
     def setup(self):
@@ -40,8 +40,8 @@
 
     def set_scene(self, scene):
         self.scene = scene
-        if self.engine is not None:
-            self.engine.set_camera(scene.camera)
+        if self.lens is not None:
+            self.lens.set_camera(scene.camera)
 
     def render(self, zbuffer=None):
         pass
@@ -80,7 +80,7 @@
         self.scene = None
         self.volume = None
         self.current_image = None
-        self.engine = None
+        self.lens = None
 
         # In the future these will merge
         self.transfer_function = None
@@ -90,7 +90,7 @@
     def build_defaults(self):
         if self.data_source is not None:
             self.build_default_transfer_function()
-            self.build_default_engine()
+            self.build_default_lens()
 
     def validate(self):
         """Make sure that all dependencies have been met"""
@@ -103,8 +103,8 @@
         if self.volume is None:
             raise RuntimeError("Volume not initialized")
 
-        if self.engine is None:
-            raise RuntimeError("Engine not initialized")
+        if self.lens is None:
+            raise RuntimeError("Lens not initialized")
 
         if self.transfer_function is None:
             raise RuntimeError("Transfer Function not Supplied")
@@ -123,8 +123,8 @@
         self.scene.validate()
         self.new_image()
 
-    def build_default_engine(self):
-        self.engine = PlaneParallelEngine(self.scene, self)
+    def build_default_lens(self):
+        self.lens = PlaneParallelLens(self.scene, self)
 
     def build_default_volume(self):
         self.volume = AMRKDTree(self.data_source.pf,
@@ -134,7 +134,7 @@
 
     def set_camera(self, camera):
         """Set camera in this object, as well as any attributes"""
-        self.engine.set_camera(camera)
+        self.lens.set_camera(camera)
 
     def teardown(self):
         """docstring for teardown"""
@@ -148,7 +148,7 @@
         """docstring for request"""
         self.zbuffer = zbuffer
         self.prepare()
-        self.engine.run()
+        self.lens.run()
 
         self.camera_updated()
         total_cells = 0

diff -r e66249876050d48f1574a68616ce12092e1fa1cb -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -23,11 +23,11 @@
 
 class SceneHandle(object):
     """docstring for SceneHandle"""
-    def __init__(self, scene, camera, source, engine):
+    def __init__(self, scene, camera, source, lens):
         self.scene = scene
         self.camera = camera
         self.source = source
-        self.engine = engine
+        self.lens = lens
 
     def __repr__(self):
         desc = super(SceneHandle, self).__repr__()
@@ -39,7 +39,7 @@
         desc += ".scene: " + self.scene.__repr__() + "\n"
         desc += ".camera: " + self.camera.__repr__() + "\n"
         desc += ".source: " + self.source.__repr__() + "\n"
-        desc += ".engine: " + self.engine.__repr__() + "\n"
+        desc += ".lens: " + self.lens.__repr__() + "\n"
         return desc
 
 
@@ -149,7 +149,7 @@
         if key is None:
             key = self.sources.keys()[0]
         handle = SceneHandle(self, self.camera, self.sources[key],
-                             self.sources[key].engine)
+                             self.sources[key].lens)
         return handle
 
 


https://bitbucket.org/yt_analysis/yt/commits/db7eaea87011/
Changeset:   db7eaea87011
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-29 19:43:00+00:00
Summary:     Moving computation/composition around. test_scene runs, but i think the underpinning are too complex
Affected #:  7 files

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -26,9 +26,9 @@
 
     _moved = True
 
-    def __init__(self, data_source=None):
+    def __init__(self):
         """Initialize a Camera Instance"""
-        self.data_source = data_source
+        self.lens = None
         self.position = None
         self.north_vector = None
         self.resolution = (256, 256)
@@ -36,21 +36,25 @@
         self.width = None
         self.focus = np.zeros(3)
         self.position = np.ones(3)
-        if data_source is not None:
-            self.inherit_default_from_data_source()
-        else:
-            super(Camera, self).__init__(self.focus - self.position,
-                                         self.north_vector, steady_north=False)
+        super(Camera, self).__init__(self.focus - self.position,
+                                     self.north_vector, steady_north=False)
 
-    def inherit_default_from_data_source(self):
-        data_source = self.data_source
+    def set_lens(self, lens):
+        self.lens = lens
+
+    def get_lens(self, lens):
+        if self.lens is None:
+            raise RuntimeError("I have no lens!")
+        return self.lens
+
+    def set_defaults_from_data_source(self, data_source):
         self.position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
         (xmi, xma), (ymi, yma), (zmi, zma) = \
             data_source.quantities['Extrema'](['x', 'y', 'z'])
-        width = np.sqrt((xma-xmi)**2 + (yma-ymi)**2 + (zma-zmi)**2) /\
-            np.sqrt(3)
+        width = np.sqrt((xma - xmi) ** 2 + (yma - ymi) ** 2 +
+                        (zma - zmi) ** 2) / np.sqrt(3)
         focus = data_source.get_field_parameter('center')
 
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):
@@ -60,7 +64,7 @@
         if not iterable(width):
             width = (width, width, width)  # left/right, top/bottom, front/back
         if not isinstance(width, YTArray):
-            width = self.data_source.pf.arr(width, input_units="code_length")
+            width = data_source.pf.arr(width, input_units="code_length")
         if not isinstance(focus, YTArray):
             focus = self.pf.arr(focus, input_units="code_length")
 
@@ -72,10 +76,11 @@
         self._moved = True
 
     def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""Change the view direction based on any of the orientation parameters.
+        r"""
+        Change the view direction based on any of the orientation parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to an orientable object.
+        This will recalculate all the necessary vectors and vector planes
+        related to an orientable object.
 
         Parameters
         ----------
@@ -110,7 +115,7 @@
         if normal_vector is None:
             normal_vector = self.normal_vector
         self.switch_orientation(normal_vector=normal_vector,
-                                         north_vector=north_vector)
+                                north_vector=north_vector)
         self._moved = True
 
     def pitch(self, theta):
@@ -126,6 +131,7 @@
         Examples
         --------
 
+        >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
         rot_vector = self.unit_vectors[0]
@@ -149,6 +155,7 @@
         Examples
         --------
 
+        >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
         rot_vector = self.unit_vectors[1]
@@ -169,6 +176,7 @@
         Examples
         --------
 
+        >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
         rot_vector = self.unit_vectors[2]
@@ -178,4 +186,3 @@
             north_vector=np.dot(R, self.unit_vectors[1]))
         if self.steady_north:
             self.north_vector = np.dot(R, self.north_vector)
-

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -20,6 +20,7 @@
     VolumeRenderSampler
 from camera import Camera
 from yt.units.yt_array import YTArray
+from yt.data_objects.image_array import ImageArray
 import numpy as np
 
 
@@ -30,20 +31,13 @@
     def __init__(self, ):
         super(Lens, self).__init__()
 
-    def camera_updated(self):
-        """docstring for update_camera"""
-        pass
-
 
 class PlaneParallelLens(Lens):
 
     """docstring for PlaneParallelLens"""
 
-    def __init__(self, scene, render_source):
+    def __init__(self):
         super(PlaneParallelLens, self).__init__()
-        self.scene = scene
-        self.camera = scene.camera
-        self.render_source = render_source
         self.sub_samples = 5
         self.num_threads = 0
         self.double_check = False
@@ -51,82 +45,83 @@
         self.origin = None
         self.back_center = None
         self.front_center = None
+        self.sampler = None
 
-        if scene.camera:
-            self._setup_box_properties()
-        self.sampler = self.get_sampler()
+    def expose(self, scene, camera, render_source):
+        self.setup_box_properties(camera)
+        self.sampler = self.get_sampler(scene, camera, render_source)
+        self.cast_rays(camera, self.sampler, render_source)
 
-    def set_camera(self, camera):
-        """set the camera for this lens"""
-        self.camera = camera
-
-    def camera_updated(self):
-        if self.camera._moved:
-            self._setup_box_properties()
-            self.sampler = self.get_sampler()
-            self.camera._moved = False
-
-    def new_image(self):
-        cam = self.scene.camera
+    def new_image(self, camera):
+        cam = camera
         if cam is None:
             cam = Camera(self.data_source)
-            self.scene.camera = cam
         self.current_image = ImageArray(
             np.zeros((cam.resolution[0], cam.resolution[1],
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
-    def _setup_box_properties(self):
-        unit_vectors = self.camera.unit_vectors
-        width = self.camera.width
-        center = self.camera.focus
+    def setup_box_properties(self, camera):
+        unit_vectors = camera.unit_vectors
+        width = camera.width
+        center = camera.focus
         self.box_vectors = YTArray([unit_vectors[0] * width[0],
                                     unit_vectors[1] * width[1],
                                     unit_vectors[2] * width[2]])
         self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
-        mylog.debug('Setting box properties')
-        mylog.debug(self.back_center)
-        mylog.debug(self.front_center)
 
-    def get_sampler(self):
-        self._setup_box_properties()
+    def get_sampler(self, scene, camera, render_source):
         kwargs = {}
-        if self.render_source.zbuffer is not None:
-            kwargs['zbuffer'] = self.render_source.zbuffer.z
-        self.render_source.prepare()
-        image = self.render_source.current_image
-        rotp = np.concatenate([self.scene.camera.inv_mat.ravel('F'),
+        if render_source.zbuffer is not None:
+            kwargs['zbuffer'] = render_source.zbuffer.z
+        render_source.prepare()
+        image = render_source.current_image
+        image = self.new_image(camera)
+        rotp = np.concatenate([camera.inv_mat.ravel('F'),
                                self.back_center.ravel()])
         args = (rotp, self.box_vectors[2], self.back_center,
-                (-self.camera.width[0] / 2.0, self.camera.width[0] / 2.0,
-                 -self.camera.width[1] / 2.0, self.camera.width[1] / 2.0),
-                image, self.camera.unit_vectors[
-                    0], self.camera.unit_vectors[1],
-                np.array(self.camera.width, dtype='float64'),
-                self.render_source.transfer_function, self.sub_samples)
+                (-camera.width[0] / 2.0, camera.width[0] / 2.0,
+                 -camera.width[1] / 2.0, camera.width[1] / 2.0),
+                image, camera.unit_vectors[
+                    0], camera.unit_vectors[1],
+                np.array(camera.width, dtype='float64'),
+                render_source.transfer_function, self.sub_samples)
         sampler = VolumeRenderSampler(*args, **kwargs)
         return sampler
 
-    def run(self):
-        self.camera_updated()
+    def cast_rays(self, camera, sampler, render_source):
+        mylog.debug("Casting rays")
         total_cells = 0
         if self.double_check:
-            for brick in self.render_source.volume.bricks:
+            for brick in render_source.volume.bricks:
                 for data in brick.my_data:
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
+        # This is a hack that should be replaced by an alternate plane-parallel
+        # traversal. Put the camera really far away so that the effective
+        # viewpoint is infinitely far away, making for parallel rays.
         view_pos = self.front_center + \
-            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
-        for brick in self.render_source.volume.traverse(view_pos):
-            self.sampler(brick, num_threads=self.num_threads)
+            camera.unit_vectors[2] * 1.0e6 * camera.width[2]
+
+        for brick in render_source.volume.traverse(view_pos):
+            sampler(brick, num_threads=self.num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
+        mylog.debug("Done casting rays")
 
-        self.render_source.current_image = \
-            self.finalize_image(self.sampler.aimage)
+        render_source.current_image = \
+            self.finalize_image(camera, render_source,
+                                         self.sampler.aimage)
         return
 
-
+    def finalize_image(self, camera, render_source, image):
+        cam = camera
+        view_pos = self.front_center + cam.unit_vectors[2] * \
+            1.0e6 * cam.width[2]
+        image = render_source.volume.reduce_tree_images(image, view_pos)
+        if render_source.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.funcs import mylog
 from yt.data_objects.api import ImageArray
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
@@ -90,7 +91,6 @@
     def build_defaults(self):
         if self.data_source is not None:
             self.build_default_transfer_function()
-            self.build_default_lens()
 
     def validate(self):
         """Make sure that all dependencies have been met"""
@@ -103,13 +103,17 @@
         if self.volume is None:
             raise RuntimeError("Volume not initialized")
 
-        if self.lens is None:
-            raise RuntimeError("Lens not initialized")
+        #if self.lens is None:
+        #    raise RuntimeError("Lens not initialized")
 
         if self.transfer_function is None:
             raise RuntimeError("Transfer Function not Supplied")
         self.setup()
 
+    def switch_field(self, field):
+        self.volume.set_fields([self.field], log_fields, True)
+
+
     def build_default_transfer_function(self):
         self.tfh = \
             TransferFunctionHelper(self.data_source.pf)
@@ -123,13 +127,11 @@
         self.scene.validate()
         self.new_image()
 
-    def build_default_lens(self):
-        self.lens = PlaneParallelLens(self.scene, self)
-
     def build_default_volume(self):
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
+        mylog.debug('Log Fields:' + str(log_fields))
         self.volume.set_fields([self.field], log_fields, True)
 
     def set_camera(self, camera):
@@ -169,11 +171,3 @@
 
         return self.current_image
 
-    def finalize_image(self, image):
-        cam = self.scene.camera
-        view_pos = self.front_center + cam.unit_vectors[2] * \
-            1.0e6 * cam.width[2]
-        image = self.render_source.volume.reduce_tree_images(image, view_pos)
-        if self.transfer_function.grey_opacity is False:
-            image[:, :, 3] = 1.0
-        return image

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -52,7 +52,7 @@
     def __init__(self):
         super(Scene, self).__init__()
         self.sources = {}
-        self.default_camera = None
+        self.camera = None
 
     def iter_opaque_sources(self):
         """

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -17,59 +17,50 @@
 from yt.mods import *
 from yt.testing import \
     fake_random_pf
-from yt.visualization.volume_rendering.scene import Scene, RenderScene, \
+from yt.visualization.volume_rendering.scene import Scene, \
     create_volume_rendering
+from yt.visualization.volume_rendering.lens import PlaneParallelLens
 from yt.visualization.volume_rendering.camera import Camera
 from yt.visualization.volume_rendering.render_source import VolumeSource
 
+#pf = fake_random_pf(64)
+pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 50)
 
-# def test_default(pf):
-#     sc = RenderScene(pf)
-#     for k, im in sc.render().iteritems():
-#         write_bitmap(im, 'scene_%s.png' % k)
-#     return sc
-# 
-# 
-# def test_data_source(pf):
-#     sc = Scene()
-#     ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 3)
-#     vol = VolumeSource(ds, field=('gas', 'density'))
-#     cam = Camera(ds)
-#     sc.set_camera(cam)
-#     sc.add_source(vol)
-# 
-#     vol.build_defaults()
-#     for k, im in sc.render().iteritems():
-#         write_bitmap(im, 'data_scene_%s.png' % k)
-# 
-# 
-# def test_two_source(pf):
-#     sc = Scene()
-#     vol = VolumeSource(pf.h.sphere(pf.domain_center, pf.domain_width[0] / 3),
-#                        field=('gas', 'density'))
-#     sc.add_source(vol)
-#     vol.build_defaults()
-# 
-#     vol = VolumeSource(pf.h.sphere(pf.domain_center / 3,
-#                                    pf.domain_width[0] / 3),
-#                        field=('gas', 'density'))
-#     sc.add_source(vol)
-#     for k, im in sc.render().iteritems():
-#         write_bitmap(im, 'muliple_scene_%s.png' % k)
-# 
 
-#pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
-pf = fake_random_pf(64)
-ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
-sc = create_volume_rendering(ds, field=('gas', 'density'))
-sc.render('test.png')
+sc = Scene()
+vol = VolumeSource(ds, field=('gas', 'density'))
+cam = Camera()
+cam.resolution = (512, 512)
+lens = PlaneParallelLens()
+sc.camera = cam
 
-h = sc.get_handle()
-h.source.transfer_function.grey_opacity = True
-h.source.transfer_function.map_to_colormap(-2, 0.0, scale=50.0, colormap='RdBu_r')
+vol.build_defaults()
+vol.transfer_function.grey_opacity=False
+sc.add_source(vol)
+cam.set_defaults_from_data_source(ds)
+cam.set_lens(lens)
 
-cam = h.camera
-for i in range(36):
-    cam.pitch(-2*np.pi / 36.)
-    sc.render('test_%04i.png' % i)
+lens.expose(sc, cam, vol)
+vol.current_image.write_png('test.png', clip_ratio=6.0)
 
+vol.set_field(('io','Density'))
+vol.build_defaults()
+lens.expose(sc, cam, vol)
+vol.current_image.write_png('test_op.png', clip_ratio=6.0)
+
+#sc.render()
+
+
+#sc = create_volume_rendering(ds, field=('gas', 'density'))
+#sc.render('test.png')
+#
+#h = sc.get_handle()
+#h.source.transfer_function.grey_opacity = True
+#h.source.transfer_function.map_to_colormap(-2, 0.0, scale=50.0, colormap='RdBu_r')
+#
+#cam = h.camera
+#for i in range(36):
+#    cam.pitch(-2*np.pi / 36.)
+#    sc.render('test_%04i.png' % i)
+

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -139,7 +139,7 @@
             mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
         else:
             mi, ma = self.bounds
-        self.tf.add_layers(7, col_bounds=[mi, ma], colormap='RdBu_r')
+        self.tf.add_layers(10, colormap='spectral')
 
     def plot(self, fn=None, profile_field=None, profile_weight=None):
         """

diff -r 58083bdc28ff39f27df92f87d13d6ed0c3fca848 -r db7eaea8701162df370ca685f68a5261a6934943 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -740,7 +740,9 @@
             dist = (col_bounds[1] - col_bounds[0])
             if mi is None: mi = col_bounds[0] + dist/(10.0*N)
             if ma is None: ma = col_bounds[1] - dist/(10.0*N)
-        if w is None: w = 0.001 * (ma-mi)/N
+        if w is None:
+            w = 0.001 * (ma - mi) / N
+            w = max(w, 1.0 / self.nbins)
         if alpha is None and self.grey_opacity:
             alpha = np.ones(N, dtype="float64")
         elif alpha is None and not self.grey_opacity:


https://bitbucket.org/yt_analysis/yt/commits/f82283884e6a/
Changeset:   f82283884e6a
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-29 21:46:22+00:00
Summary:     I would go so far to call this alpha. The reorg is nearly complete. This reduces the amount of work done in __init__s. Bundling
a few things in utils.py
Affected #:  6 files

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -13,10 +13,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import iterable
+from yt.funcs import iterable, mylog
 from yt.utilities.orientation import Orientation
 from yt.units.yt_array import YTArray
 from yt.utilities.math_utils import get_rotation_matrix
+from utils import data_source_or_all
+from lens import lenses
 import numpy as np
 
 
@@ -26,7 +28,8 @@
 
     _moved = True
 
-    def __init__(self):
+    def __init__(self, data_source=None, lens_type='plane-parallel'):
+        mylog.debug("Entering %s" % str(self))
         """Initialize a Camera Instance"""
         self.lens = None
         self.position = None
@@ -36,16 +39,24 @@
         self.width = None
         self.focus = np.zeros(3)
         self.position = np.ones(3)
+        self.set_lens(lens_type)
+        if data_source is not None:
+            data_source = data_source_or_all(data_source)
+            self.set_defaults_from_data_source(data_source)
+
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=False)
 
-    def set_lens(self, lens):
-        self.lens = lens
+    def get_sampler_params(self):
+        lens_params = self.lens.get_sampler_params(self)
+        lens_params.update(width=self.width)
+        return lens_params
 
-    def get_lens(self, lens):
-        if self.lens is None:
-            raise RuntimeError("I have no lens!")
-        return self.lens
+    def set_lens(self, lens_type):
+        if lens_type not in lenses:
+            mylog.error("Lens type not available")
+            raise RuntimeError()
+        self.lens = lenses[lens_type]()
 
     def set_defaults_from_data_source(self, data_source):
         self.position = data_source.pf.domain_right_edge

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -16,9 +16,6 @@
 from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
-from yt.utilities.lib.grid_traversal import \
-    VolumeRenderSampler
-from camera import Camera
 from yt.units.yt_array import YTArray
 from yt.data_objects.image_array import ImageArray
 import numpy as np
@@ -29,7 +26,9 @@
     """docstring for Lens"""
 
     def __init__(self, ):
+        mylog.debug("Entering %s" % str(self))
         super(Lens, self).__init__()
+        self.viewpoint = None
 
 
 class PlaneParallelLens(Lens):
@@ -37,6 +36,7 @@
     """docstring for PlaneParallelLens"""
 
     def __init__(self):
+        mylog.debug("Entering %s" % str(self))
         super(PlaneParallelLens, self).__init__()
         self.sub_samples = 5
         self.num_threads = 0
@@ -46,6 +46,10 @@
         self.back_center = None
         self.front_center = None
         self.sampler = None
+        self.viewpoint = None
+
+    def set_camera(self, camera):
+        self.setup_box_properties(camera)
 
     def expose(self, scene, camera, render_source):
         self.setup_box_properties(camera)
@@ -53,11 +57,8 @@
         self.cast_rays(camera, self.sampler, render_source)
 
     def new_image(self, camera):
-        cam = camera
-        if cam is None:
-            cam = Camera(self.data_source)
         self.current_image = ImageArray(
-            np.zeros((cam.resolution[0], cam.resolution[1],
+            np.zeros((camera.resolution[0], camera.resolution[1],
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
@@ -73,55 +74,40 @@
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
 
-    def get_sampler(self, scene, camera, render_source):
-        kwargs = {}
-        if render_source.zbuffer is not None:
-            kwargs['zbuffer'] = render_source.zbuffer.z
-        render_source.prepare()
-        image = render_source.current_image
-        image = self.new_image(camera)
-        rotp = np.concatenate([camera.inv_mat.ravel('F'),
-                               self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
-                (-camera.width[0] / 2.0, camera.width[0] / 2.0,
-                 -camera.width[1] / 2.0, camera.width[1] / 2.0),
-                image, camera.unit_vectors[
-                    0], camera.unit_vectors[1],
-                np.array(camera.width, dtype='float64'),
-                render_source.transfer_function, self.sub_samples)
-        sampler = VolumeRenderSampler(*args, **kwargs)
-        return sampler
-
-    def cast_rays(self, camera, sampler, render_source):
-        mylog.debug("Casting rays")
-        total_cells = 0
-        if self.double_check:
-            for brick in render_source.volume.bricks:
-                for data in brick.my_data:
-                    if np.any(np.isnan(data)):
-                        raise RuntimeError
-
         # This is a hack that should be replaced by an alternate plane-parallel
         # traversal. Put the camera really far away so that the effective
         # viewpoint is infinitely far away, making for parallel rays.
-        view_pos = self.front_center + \
+        self.viewpoint = self.front_center + \
             camera.unit_vectors[2] * 1.0e6 * camera.width[2]
 
-        for brick in render_source.volume.traverse(view_pos):
-            sampler(brick, num_threads=self.num_threads)
-            total_cells += np.prod(brick.my_data[0].shape)
-        mylog.debug("Done casting rays")
+    def get_sampler_params(self, camera):
+        sampler_params =\
+            dict(vp_pos=np.concatenate([camera.inv_mat.ravel('F'),
+                                        self.back_center.ravel()]),
+                 vp_dir=self.box_vectors[2],  # All the same
+                 center=self.back_center,
+                 bounds=(-camera.width[0] / 2.0, camera.width[0] / 2.0,
+                         -camera.width[1] / 2.0, camera.width[1] / 2.0),
+                 x_vec=camera.unit_vectors[0],
+                 y_vec=camera.unit_vectors[1],
+                 width=np.array(camera.width, dtype='float64'),
+                 image=self.new_image(camera))
+        return sampler_params
 
-        render_source.current_image = \
-            self.finalize_image(camera, render_source,
-                                         self.sampler.aimage)
-        return
 
-    def finalize_image(self, camera, render_source, image):
-        cam = camera
-        view_pos = self.front_center + cam.unit_vectors[2] * \
-            1.0e6 * cam.width[2]
-        image = render_source.volume.reduce_tree_images(image, view_pos)
-        if render_source.transfer_function.grey_opacity is False:
-            image[:, :, 3] = 1.0
-        return image
+class PerspectiveLens(Lens):
+    """docstring for PerspectiveLens"""
+    def __init__(self):
+        super(PerspectiveLens, self).__init__()
+        raise NotImplementedError
+
+
+class FisheyeLens(Lens):
+    """docstring for FisheyeLens"""
+    def __init__(self):
+        super(FisheyeLens, self).__init__()
+        raise NotImplementedError
+
+lenses = {'plane-parallel': PlaneParallelLens,
+          'perspective': PerspectiveLens,
+          'fisheye': FisheyeLens}

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -15,13 +15,11 @@
 
 import numpy as np
 from yt.funcs import mylog
-from yt.data_objects.api import ImageArray
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from transfer_function_helper import TransferFunctionHelper
-from lens import PlaneParallelLens
-from camera import Camera
+from utils import new_volume_render_sampler, data_source_or_all
 
 
 class RenderSource(ParallelAnalysisInterface):
@@ -30,89 +28,69 @@
        streamlines, etc"""
 
     def __init__(self):
+        mylog.debug("Entering %s" % str(self))
         super(RenderSource, self).__init__()
         self.opaque = False
-        self.lens = None
         self.zbuffer = None
 
-    def setup(self):
-        """Set up data needed to render"""
-        pass
-
-    def set_scene(self, scene):
-        self.scene = scene
-        if self.lens is not None:
-            self.lens.set_camera(scene.camera)
-
     def render(self, zbuffer=None):
         pass
 
     def validate(self):
         pass
 
-    def new_image(self):
-        pass
-
-    def prepare(self):
-        pass
-
-    def get_default_camera(self):
-        """If possible, create a camera based on the render source"""
-        return None
-
 
 class OpaqueSource(RenderSource):
     """docstring for OpaqueSource"""
     def __init__(self):
+        mylog.debug("Entering %s" % str(self))
         super(OpaqueSource, self).__init__()
         self.opaque = True
 
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
+
 class VolumeSource(RenderSource):
 
     """docstring for VolumeSource"""
+    _image = None
 
-    def __init__(self, data_source, field=None):
+    def __init__(self, data_source, field, auto=True):
+        mylog.debug("Entering %s" % str(self))
         super(VolumeSource, self).__init__()
-        self.data_source = data_source
+        self.data_source = data_source_or_all(data_source)
         self.field = field
-        self.scene = None
         self.volume = None
         self.current_image = None
-        self.lens = None
+        self.double_check = False
+        self.num_threads = 0
+        self.num_samples = 10
+
+        # Error checking
+        assert(self.field is not None)
+        assert(self.data_source is not None)
 
         # In the future these will merge
         self.transfer_function = None
         self.tfh = None
-        self.build_default_volume()
+        if auto:
+            self.build_defaults()
 
     def build_defaults(self):
-        if self.data_source is not None:
-            self.build_default_transfer_function()
+        self.build_default_volume()
+        self.build_default_transfer_function()
 
     def validate(self):
         """Make sure that all dependencies have been met"""
-        if self.scene is None:
-            raise RuntimeError("Scene not initialized")
-
         if self.data_source is None:
             raise RuntimeError("Data source not initialized")
 
         if self.volume is None:
             raise RuntimeError("Volume not initialized")
 
-        #if self.lens is None:
-        #    raise RuntimeError("Lens not initialized")
-
         if self.transfer_function is None:
             raise RuntimeError("Transfer Function not Supplied")
-        self.setup()
-
-    def switch_field(self, field):
-        self.volume.set_fields([self.field], log_fields, True)
-
 
     def build_default_transfer_function(self):
         self.tfh = \
@@ -122,11 +100,6 @@
         self.tfh.setup_default()
         self.transfer_function = self.tfh.tf
 
-    def prepare(self):
-        """prepare for rendering"""
-        self.scene.validate()
-        self.new_image()
-
     def build_default_volume(self):
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
@@ -134,40 +107,49 @@
         mylog.debug('Log Fields:' + str(log_fields))
         self.volume.set_fields([self.field], log_fields, True)
 
-    def set_camera(self, camera):
-        """Set camera in this object, as well as any attributes"""
-        self.lens.set_camera(camera)
+    def set_volume(self, volume):
+        assert(isinstance(volume, AMRKDTree))
+        del self.volume
+        self.volume = volume
 
-    def teardown(self):
-        """docstring for teardown"""
-        pass
+    def set_fields(self, fields, no_ghost=True):
+        log_fields = [self.data_source.pf.field_info[self.field].take_log
+                      for field in fields]
+        self.volume.set_fields(fields, log_fields, no_ghost)
 
-    def add_sampler(self, sampler):
+    def set_sampler(self, camera, sampler_type='volume-render'):
         """docstring for add_sampler"""
-        pass
+        if sampler_type == 'volume-render':
+            sampler = new_volume_render_sampler(camera, self)
+        else:
+            NotImplementedError("%s not implemented yet" % sampler_type)
+        self.sampler = sampler
+        assert(self.sampler is not None)
 
-    def render(self, zbuffer=None):
-        """docstring for request"""
-        self.zbuffer = zbuffer
-        self.prepare()
-        self.lens.run()
+    def render(self, camera):
+        camera.lens.set_camera(camera)
+        self.set_sampler(camera)
+        assert (self.sampler is not None)
 
-        self.camera_updated()
+        mylog.debug("Casting rays")
         total_cells = 0
         if self.double_check:
-            for brick in self.render_source.volume.bricks:
+            for brick in self.volume.bricks:
                 for data in brick.my_data:
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center + \
-            self.camera.unit_vectors[2] * 1.0e6 * self.camera.width[2]
-        for brick in self.render_source.volume.traverse(view_pos):
+        for brick in self.volume.traverse(camera.lens.viewpoint):
             self.sampler(brick, num_threads=self.num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
+        mylog.debug("Done casting rays")
 
-        self.current_image = \
-            self.finalize_image(self.sampler.aimage)
-
+        self.current_image = self.finalize_image(camera, self.sampler.aimage)
         return self.current_image
 
+    def finalize_image(self, camera, image):
+        image = self.volume.reduce_tree_images(image,
+                                               camera.lens.viewpoint)
+        if self.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -13,17 +13,18 @@
 
 
 from yt.funcs import mylog
-from yt.data_objects.static_output import Dataset
 from camera import Camera
 from render_source import VolumeSource, OpaqueSource
 from yt.data_objects.api import ImageArray
 from zbuffer_array import ZBuffer
+from .utils import data_source_or_all
 import numpy as np
 
 
 class SceneHandle(object):
     """docstring for SceneHandle"""
     def __init__(self, scene, camera, source, lens):
+        mylog.debug("Entering %s" % str(self))
         self.scene = scene
         self.camera = camera
         self.source = source
@@ -50,9 +51,10 @@
     _current = None
 
     def __init__(self):
+        mylog.debug("Entering %s" % str(self))
         super(Scene, self).__init__()
         self.sources = {}
-        self.camera = None
+        self.default_camera = None
 
     def iter_opaque_sources(self):
         """
@@ -95,19 +97,20 @@
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)
 
-        render_source.set_scene(self)
-
         self.sources[keyname] = render_source
 
         return self
 
-    def render(self, fname=None, clip_ratio=None):
+    def render(self, fname=None, clip_ratio=None, camera=None):
+        if camera is None:
+            camera = self.default_camera
+        assert(camera is not None)
         self.validate()
         ims = {}
         for k, v in self.sources.iteritems():
             v.validate()
             print 'Running', k, v
-            ims[k] = v.render()
+            ims[k] = v.render(camera)
 
         bmp = np.zeros_like(ims.values()[0])
         for k, v in ims.iteritems():
@@ -153,22 +156,16 @@
         return handle
 
 
-def create_volume_rendering(data_source, field=None):
-    if isinstance(data_source, Dataset):
-        pf = data_source
-        data_source = data_source.all_data()
-    else:
-        pf = data_source.pf
+def volume_render(data_source, field=None, fname=None):
+    data_source = data_source_or_all(data_source)
+    sc = Scene()
+    if field is None:
+        data_source.pf.index
+        field = data_source.pf.field_list[0]
+        mylog.info('Setting default field to %s' % field.__repr__())
 
-    sc = Scene()
-    camera = Camera(data_source)
-    if field is None:
-        pf.field_list
-        field = pf.field_list[0]
-        mylog.info('Setting default field to %s' % field.__repr__())
-    render_source = VolumeSource(data_source, field)
-
-    sc.set_camera(camera)
-    sc.add_source(render_source)
-    render_source.build_defaults()
-    return sc
+    vol = VolumeSource(data_source, field=field)
+    cam = Camera(data_source)
+    sc.set_default_camera(cam)
+    sc.add_source(vol)
+    return sc.render(fname=fname), sc

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -18,8 +18,7 @@
 from yt.testing import \
     fake_random_pf
 from yt.visualization.volume_rendering.scene import Scene, \
-    create_volume_rendering
-from yt.visualization.volume_rendering.lens import PlaneParallelLens
+    volume_render
 from yt.visualization.volume_rendering.camera import Camera
 from yt.visualization.volume_rendering.render_source import VolumeSource
 
@@ -30,37 +29,10 @@
 
 sc = Scene()
 vol = VolumeSource(ds, field=('gas', 'density'))
-cam = Camera()
-cam.resolution = (512, 512)
-lens = PlaneParallelLens()
-sc.camera = cam
+cam = Camera(ds)
+sc.set_default_camera(cam)
+sc.add_source(vol)
+sc.render('test.png')
 
-vol.build_defaults()
-vol.transfer_function.grey_opacity=False
-sc.add_source(vol)
-cam.set_defaults_from_data_source(ds)
-cam.set_lens(lens)
-
-lens.expose(sc, cam, vol)
-vol.current_image.write_png('test.png', clip_ratio=6.0)
-
-vol.set_field(('io','Density'))
-vol.build_defaults()
-lens.expose(sc, cam, vol)
-vol.current_image.write_png('test_op.png', clip_ratio=6.0)
-
-#sc.render()
-
-
-#sc = create_volume_rendering(ds, field=('gas', 'density'))
-#sc.render('test.png')
-#
-#h = sc.get_handle()
-#h.source.transfer_function.grey_opacity = True
-#h.source.transfer_function.map_to_colormap(-2, 0.0, scale=50.0, colormap='RdBu_r')
-#
-#cam = h.camera
-#for i in range(36):
-#    cam.pitch(-2*np.pi / 36.)
-#    sc.render('test_%04i.png' % i)
-
+im, sc2 = volume_render(ds, field=('gas', 'temperature'))
+im.write_png('test2.png')

diff -r db7eaea8701162df370ca685f68a5261a6934943 -r f82283884e6af705ed15a94e51c2d87f42ebde45 yt/visualization/volume_rendering/utils.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/utils.py
@@ -0,0 +1,34 @@
+
+from yt.data_objects.static_output import Dataset
+from yt.utilities.lib.grid_traversal import \
+    VolumeRenderSampler
+
+
+def data_source_or_all(data_source):
+    if isinstance(data_source, Dataset):
+        data_source = data_source.all_data()
+    return data_source
+
+
+def new_volume_render_sampler(camera, render_source):
+    params = camera.get_sampler_params()
+    params.update(transfer_function=render_source.transfer_function)
+    params.update(transfer_function=render_source.transfer_function)
+    params.update(num_samples=render_source.num_samples)
+    args = (
+        params['vp_pos'],
+        params['vp_dir'],
+        params['center'],
+        params['bounds'],
+        params['image'],
+        params['x_vec'],
+        params['y_vec'],
+        params['width'],
+        params['transfer_function'],
+        params['num_samples'],
+    )
+    kwargs = {}
+    if render_source.zbuffer is not None:
+        kwargs['zbuffer'] = render_source.zbuffer.z
+    sampler = VolumeRenderSampler(*args, **kwargs)
+    return sampler


https://bitbucket.org/yt_analysis/yt/commits/d8485baaf019/
Changeset:   d8485baaf019
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-29 22:43:10+00:00
Summary:     Add a get_source(source_num) to scene. Might want to switch around whether the dict or list is the primary source holder. also, testing out simple compositing...works remarkably well
Affected #:  2 files

diff -r f82283884e6af705ed15a94e51c2d87f42ebde45 -r d8485baaf019f01eb5fd76f743b8ae920b4c4ad8 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -56,6 +56,9 @@
         self.sources = {}
         self.default_camera = None
 
+    def get_source(self, source_num):
+        return self.sources.values()[source_num]
+
     def iter_opaque_sources(self):
         """
         Iterate over opaque RenderSource objects,

diff -r f82283884e6af705ed15a94e51c2d87f42ebde45 -r d8485baaf019f01eb5fd76f743b8ae920b4c4ad8 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -23,16 +23,31 @@
 from yt.visualization.volume_rendering.render_source import VolumeSource
 
 #pf = fake_random_pf(64)
-pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
-ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 50)
+#pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
+pf = load('/home/skillman/kipac/data/enzo_cosmology_plus/DD0046/DD0046')
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
 
+im, sc = volume_render(ds, field=('gas', 'density'))
+im.write_png('test.png')
 
-sc = Scene()
-vol = VolumeSource(ds, field=('gas', 'density'))
-cam = Camera(ds)
-sc.set_default_camera(cam)
-sc.add_source(vol)
-sc.render('test.png')
+sc.default_camera.resolution = (512, 512)
+vol = sc.get_source(0)
+tf = vol.transfer_function
+tf.clear()
+tf.map_to_colormap(-30.5, -28, scale=0.001, colormap='Blues_r')
 
-im, sc2 = volume_render(ds, field=('gas', 'temperature'))
-im.write_png('test2.png')
+vol2 = VolumeSource(ds, field=('gas', 'temperature'))
+sc.add_source(vol2)
+
+tf = vol2.transfer_function
+tf.clear()
+tf.map_to_colormap(6.0, 7.0, scale=0.01, colormap='Reds_r')
+
+sc.render('test_composite.png', clip_ratio=6.0)
+
+nrot = 10
+for i in range(10):
+    sc.default_camera.pitch(2*np.pi/4/nrot)
+    sc.render('test_rot_%04i.png' % i, clip_ratio=6.0)
+
+


https://bitbucket.org/yt_analysis/yt/commits/94483d7d519a/
Changeset:   94483d7d519a
Branch:      yt-3.0
User:        samskillman
Date:        2014-02-23 19:08:42+00:00
Summary:     front-to-back ordering of bricks, and modifying the alpha-compositing.
Affected #:  2 files

diff -r 18177fadc5e944aab2263e3a5c27249f15df1547 -r 94483d7d519a54f860377f63b4dc94a495d7d878 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -86,13 +86,13 @@
         trgba[i] = istorage[field_table_ids[i]]
 
     if grey_opacity == 1:
-        ta = fmax(1.0 - dt*trgba[3],0.0)
+        ta = fmax(1.0 - rgba[3],0.0)
         for i in range(4):
-            rgba[i] = dt*trgba[i] + ta*rgba[i]
+            rgba[i] = ta*dt*trgba[i] + rgba[i]
     else:
         for i in range(3):
             ta = fmax(1.0-dt*trgba[i], 0.0)
-            rgba[i] = dt*trgba[i] + ta*rgba[i]
+            rgba[i] = ta*dt*trgba[i] + rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 18177fadc5e944aab2263e3a5c27249f15df1547 -r 94483d7d519a54f860377f63b4dc94a495d7d878 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -569,7 +569,7 @@
         return sampler
 
     def finalize_image(self, image):
-        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.front_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         image = self.volume.reduce_tree_images(image, view_pos)
         if self.transfer_function.grey_opacity is False:
             image[:,:,3]=1.0
@@ -584,7 +584,7 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.back_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)


https://bitbucket.org/yt_analysis/yt/commits/ebc9d5de40bb/
Changeset:   ebc9d5de40bb
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 15:33:34+00:00
Summary:     Re-implementing Perspective renderings with a PerspectiveLens.
Affected #:  3 files

diff -r d8485baaf019f01eb5fd76f743b8ae920b4c4ad8 -r ebc9d5de40bbde03910ac0d8de6ae0e39c20210d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -69,7 +69,7 @@
         focus = data_source.get_field_parameter('center')
 
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):
-            width = self.pf.quan(width[0], input_units=width[1])
+            width = data_source.pf.quan(width[0], input_units=width[1])
             # Now convert back to code length for subsequent manipulation
             width = width.in_units("code_length").value
         if not iterable(width):
@@ -86,6 +86,12 @@
                                      self.north_vector, steady_north=False)
         self._moved = True
 
+    def set_width(self, width):
+        if not iterable(width):
+            width = YTArray([width, width, width], input_units="code_length")
+        self.width = width
+        self.switch_orientation()
+
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""
         Change the view direction based on any of the orientation parameters.

diff -r d8485baaf019f01eb5fd76f743b8ae920b4c4ad8 -r ebc9d5de40bbde03910ac0d8de6ae0e39c20210d yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -29,15 +29,6 @@
         mylog.debug("Entering %s" % str(self))
         super(Lens, self).__init__()
         self.viewpoint = None
-
-
-class PlaneParallelLens(Lens):
-
-    """docstring for PlaneParallelLens"""
-
-    def __init__(self):
-        mylog.debug("Entering %s" % str(self))
-        super(PlaneParallelLens, self).__init__()
         self.sub_samples = 5
         self.num_threads = 0
         self.double_check = False
@@ -46,16 +37,10 @@
         self.back_center = None
         self.front_center = None
         self.sampler = None
-        self.viewpoint = None
 
     def set_camera(self, camera):
         self.setup_box_properties(camera)
 
-    def expose(self, scene, camera, render_source):
-        self.setup_box_properties(camera)
-        self.sampler = self.get_sampler(scene, camera, render_source)
-        self.cast_rays(camera, self.sampler, render_source)
-
     def new_image(self, camera):
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0], camera.resolution[1],
@@ -74,11 +59,24 @@
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
 
-        # This is a hack that should be replaced by an alternate plane-parallel
-        # traversal. Put the camera really far away so that the effective
-        # viewpoint is infinitely far away, making for parallel rays.
-        self.viewpoint = self.front_center + \
-            camera.unit_vectors[2] * 1.0e6 * camera.width[2]
+        self.set_viewpoint(camera)
+
+    def set_viewpoint(self, camera):
+        """
+        Set the viewpoint used for AMRKDTree traversal such that you yield
+        bricks from back to front or front to back from with respect to this
+        point.  Must be implemented for each Lens type.
+        """
+        raise NotImplementedError("Need to choose viewpoint for this class")
+
+
+class PlaneParallelLens(Lens):
+
+    """docstring for PlaneParallelLens"""
+
+    def __init__(self, ):
+        mylog.debug("Entering %s" % str(self))
+        super(PlaneParallelLens, self).__init__()
 
     def get_sampler_params(self, camera):
         sampler_params =\
@@ -94,16 +92,92 @@
                  image=self.new_image(camera))
         return sampler_params
 
+    def set_viewpoint(self, camera):
+        # This is a hack that should be replaced by an alternate plane-parallel
+        # traversal. Put the camera really far away so that the effective
+        # viewpoint is infinitely far away, making for parallel rays.
+        self.viewpoint = self.front_center + \
+            camera.unit_vectors[2] * 1.0e6 * camera.width[2]
+
 
 class PerspectiveLens(Lens):
+
     """docstring for PerspectiveLens"""
+
     def __init__(self):
         super(PerspectiveLens, self).__init__()
-        raise NotImplementedError
+        self.expand_factor = 1.5
+
+    def new_image(self, camera):
+        self.current_image = ImageArray(
+            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
+                      4), dtype='float64', order='C'),
+            info={'imtype': 'rendering'})
+        return self.current_image
+
+    def get_sampler_params(self, camera):
+        # We should move away from pre-generation of vectors like this and into
+        # the usage of on-the-fly generation in the VolumeIntegrator module
+        # We might have a different width and back_center
+        #dl = (self.back_center - self.front_center)
+        #self.front_center += self.expand_factor*dl
+        #self.back_center -= dl
+
+        px = np.linspace(-camera.width[0]/2.0, camera.width[0]/2.0,
+                         camera.resolution[0])[:, None].d
+        py = np.linspace(-camera.width[1]/2.0, camera.width[1]/2.0,
+                         camera.resolution[1])[None, :].d
+        inv_mat = camera.inv_mat.d
+        positions = np.zeros((camera.resolution[0], camera.resolution[1], 3),
+                             dtype='float64', order='C')
+        positions[:, :, 0] = inv_mat[0, 0]*px + \
+            inv_mat[0, 1]*py + self.back_center.d[0]
+        positions[:, :, 1] = inv_mat[1, 0]*px + \
+            inv_mat[1, 1]*py + self.back_center.d[1]
+        positions[:, :, 2] = inv_mat[2, 0]*px + \
+            inv_mat[2, 1]*py + self.back_center.d[2]
+        # Can we use bounds for anything here?
+        # bounds = (px.min(), px.max(), py.min(), py.max())
+
+        # We are likely adding on an odd cutting condition here
+        vectors = self.front_center.d - positions
+        vectors = vectors / (vectors**2).sum()**0.5
+        positions = self.front_center.d - 1.0 * \
+            (((self.back_center.d-self.front_center.d)**2).sum())**0.5*vectors
+        vectors = (self.back_center.d - positions)
+
+        uv = np.ones(3, dtype='float64')
+        vectors.shape = (camera.resolution[0]**2, 1, 3)
+        positions.shape = (camera.resolution[0]**2, 1, 3)
+        image = self.new_image(camera)
+
+        sampler_params =\
+            dict(vp_pos=positions,
+                 vp_dir=vectors,
+                 center=self.back_center.d,
+                 bounds=(0.0, 1.0, 0.0, 1.0),
+                 x_vec=uv,
+                 y_vec=uv,
+                 width=np.zeros(3, dtype='float64'),
+                 image=image
+                 )
+
+        mylog.debug(positions)
+        mylog.debug(vectors)
+
+        return sampler_params
+
+    def set_viewpoint(self, camera):
+        """
+        For a PerspectiveLens, the viewpoint is the front center.
+        """
+        self.viewpoint = self.front_center
 
 
 class FisheyeLens(Lens):
+
     """docstring for FisheyeLens"""
+
     def __init__(self):
         super(FisheyeLens, self).__init__()
         raise NotImplementedError

diff -r d8485baaf019f01eb5fd76f743b8ae920b4c4ad8 -r ebc9d5de40bbde03910ac0d8de6ae0e39c20210d yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -148,6 +148,7 @@
         return self.current_image
 
     def finalize_image(self, camera, image):
+        image.shape = camera.resolution[0], camera.resolution[1], 4
         image = self.volume.reduce_tree_images(image,
                                                camera.lens.viewpoint)
         if self.transfer_function.grey_opacity is False:


https://bitbucket.org/yt_analysis/yt/commits/13158900ee72/
Changeset:   13158900ee72
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 15:34:48+00:00
Summary:     I dont know how these images were added...argh.
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt/commits/6e7fc1b2f37d/
Changeset:   6e7fc1b2f37d
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 15:52:29+00:00
Summary:     Merging in front-to-back ordering. Will be more things to fix
Affected #:  4 files

diff -r 13158900ee7242ca171d64004b46bae0d0f30ff7 -r 6e7fc1b2f37d27c4d995aa7e0f1049508e477319 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -86,13 +86,13 @@
         trgba[i] = istorage[field_table_ids[i]]
 
     if grey_opacity == 1:
-        ta = fmax(1.0 - dt*trgba[3],0.0)
+        ta = fmax(1.0 - rgba[3],0.0)
         for i in range(4):
-            rgba[i] = dt*trgba[i] + ta*rgba[i]
+            rgba[i] = ta*dt*trgba[i] + rgba[i]
     else:
         for i in range(3):
             ta = fmax(1.0-dt*trgba[i], 0.0)
-            rgba[i] = dt*trgba[i] + ta*rgba[i]
+            rgba[i] = ta*dt*trgba[i] + rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 13158900ee7242ca171d64004b46bae0d0f30ff7 -r 6e7fc1b2f37d27c4d995aa7e0f1049508e477319 yt/visualization/volume_rendering/camera.py.orig
--- /dev/null
+++ b/yt/visualization/volume_rendering/camera.py.orig
@@ -0,0 +1,205 @@
+"""
+Import the components of the volume rendering extension
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import iterable, mylog
+from yt.utilities.orientation import Orientation
+from yt.units.yt_array import YTArray
+from yt.utilities.math_utils import get_rotation_matrix
+from utils import data_source_or_all
+from lens import lenses
+import numpy as np
+
+
+class Camera(Orientation):
+
+    r"""    """
+
+    _moved = True
+
+    def __init__(self, data_source=None, lens_type='plane-parallel'):
+        mylog.debug("Entering %s" % str(self))
+        """Initialize a Camera Instance"""
+        self.lens = None
+        self.position = None
+        self.north_vector = None
+        self.resolution = (256, 256)
+        self.light = None
+        self.width = None
+        self.focus = np.zeros(3)
+        self.position = np.ones(3)
+        self.set_lens(lens_type)
+        if data_source is not None:
+            data_source = data_source_or_all(data_source)
+            self.set_defaults_from_data_source(data_source)
+
+        super(Camera, self).__init__(self.focus - self.position,
+                                     self.north_vector, steady_north=False)
+
+    def get_sampler_params(self):
+        lens_params = self.lens.get_sampler_params(self)
+        lens_params.update(width=self.width)
+        return lens_params
+
+    def set_lens(self, lens_type):
+        if lens_type not in lenses:
+            mylog.error("Lens type not available")
+            raise RuntimeError()
+        self.lens = lenses[lens_type]()
+
+    def set_defaults_from_data_source(self, data_source):
+        self.position = data_source.pf.domain_right_edge
+
+        width = 1.5 * data_source.pf.domain_width.max()
+        (xmi, xma), (ymi, yma), (zmi, zma) = \
+            data_source.quantities['Extrema'](['x', 'y', 'z'])
+        width = np.sqrt((xma - xmi) ** 2 + (yma - ymi) ** 2 +
+                        (zma - zmi) ** 2) / np.sqrt(3)
+        focus = data_source.get_field_parameter('center')
+
+        if iterable(width) and len(width) > 1 and isinstance(width[1], str):
+            width = data_source.pf.quan(width[0], input_units=width[1])
+            # Now convert back to code length for subsequent manipulation
+            width = width.in_units("code_length").value
+        if not iterable(width):
+            width = (width, width, width)  # left/right, top/bottom, front/back
+        if not isinstance(width, YTArray):
+            width = data_source.pf.arr(width, input_units="code_length")
+        if not isinstance(focus, YTArray):
+            focus = self.pf.arr(focus, input_units="code_length")
+
+        self.width = width
+        self.focus = focus
+
+        super(Camera, self).__init__(self.focus - self.position,
+                                     self.north_vector, steady_north=False)
+        self._moved = True
+
+    def set_width(self, width):
+        if not iterable(width):
+            width = YTArray([width, width, width], input_units="code_length")
+        self.width = width
+        self.switch_orientation()
+
+    def switch_orientation(self, normal_vector=None, north_vector=None):
+        r"""
+        Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes
+        related to an orientable object.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def switch_view(self, normal_vector=None, north_vector=None):
+        r"""Change the view based on any of the view parameters.
+
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self.switch_orientation(normal_vector=normal_vector,
+                                north_vector=north_vector)
+        self._moved = True
+
+    def pitch(self, theta):
+        r"""Rotate by a given angle about the horizontal axis
+
+        Pitch the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to pitch the view.
+
+        Examples
+        --------
+
+        >>> cam = Camera()
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[0]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]),
+            north_vector=np.dot(R, self.unit_vectors[1]))
+        if self.steady_north:
+            self.north_vector = self.unit_vectors[1]
+
+    def yaw(self, theta):
+        r"""Rotate by a given angle about the vertical axis
+
+        Yaw the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to yaw the view.
+
+        Examples
+        --------
+
+        >>> cam = Camera()
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[1]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]))
+
+    def roll(self, theta):
+        r"""Rotate by a given angle about the view normal axis
+
+        Roll the view.
+
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to roll the view.
+
+        Examples
+        --------
+
+        >>> cam = Camera()
+        >>> cam.roll(np.pi/4)
+        """
+        rot_vector = self.unit_vectors[2]
+        R = get_rotation_matrix(theta, rot_vector)
+        self.switch_view(
+            normal_vector=np.dot(R, self.unit_vectors[2]),
+            north_vector=np.dot(R, self.unit_vectors[1]))
+        if self.steady_north:
+            self.north_vector = np.dot(R, self.north_vector)

diff -r 13158900ee7242ca171d64004b46bae0d0f30ff7 -r 6e7fc1b2f37d27c4d995aa7e0f1049508e477319 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -572,7 +572,7 @@
         return sampler
 
     def finalize_image(self, image):
-        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.front_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         image = self.volume.reduce_tree_images(image, view_pos)
         if self.transfer_function.grey_opacity is False:
             image[:,:,3]=1.0
@@ -587,7 +587,7 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.back_center - self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
+        view_pos = self.front_center + self.orienter.unit_vectors[2] * 1.0e6 * self.width[2]
         for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)


https://bitbucket.org/yt_analysis/yt/commits/27a254eb1fa5/
Changeset:   27a254eb1fa5
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 15:54:02+00:00
Summary:     Do not know how a .orig got committed here
Affected #:  1 file

diff -r 6e7fc1b2f37d27c4d995aa7e0f1049508e477319 -r 27a254eb1fa5893267945448fc70991e8e904b97 yt/visualization/volume_rendering/camera.py.orig
--- a/yt/visualization/volume_rendering/camera.py.orig
+++ /dev/null
@@ -1,205 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import iterable, mylog
-from yt.utilities.orientation import Orientation
-from yt.units.yt_array import YTArray
-from yt.utilities.math_utils import get_rotation_matrix
-from utils import data_source_or_all
-from lens import lenses
-import numpy as np
-
-
-class Camera(Orientation):
-
-    r"""    """
-
-    _moved = True
-
-    def __init__(self, data_source=None, lens_type='plane-parallel'):
-        mylog.debug("Entering %s" % str(self))
-        """Initialize a Camera Instance"""
-        self.lens = None
-        self.position = None
-        self.north_vector = None
-        self.resolution = (256, 256)
-        self.light = None
-        self.width = None
-        self.focus = np.zeros(3)
-        self.position = np.ones(3)
-        self.set_lens(lens_type)
-        if data_source is not None:
-            data_source = data_source_or_all(data_source)
-            self.set_defaults_from_data_source(data_source)
-
-        super(Camera, self).__init__(self.focus - self.position,
-                                     self.north_vector, steady_north=False)
-
-    def get_sampler_params(self):
-        lens_params = self.lens.get_sampler_params(self)
-        lens_params.update(width=self.width)
-        return lens_params
-
-    def set_lens(self, lens_type):
-        if lens_type not in lenses:
-            mylog.error("Lens type not available")
-            raise RuntimeError()
-        self.lens = lenses[lens_type]()
-
-    def set_defaults_from_data_source(self, data_source):
-        self.position = data_source.pf.domain_right_edge
-
-        width = 1.5 * data_source.pf.domain_width.max()
-        (xmi, xma), (ymi, yma), (zmi, zma) = \
-            data_source.quantities['Extrema'](['x', 'y', 'z'])
-        width = np.sqrt((xma - xmi) ** 2 + (yma - ymi) ** 2 +
-                        (zma - zmi) ** 2) / np.sqrt(3)
-        focus = data_source.get_field_parameter('center')
-
-        if iterable(width) and len(width) > 1 and isinstance(width[1], str):
-            width = data_source.pf.quan(width[0], input_units=width[1])
-            # Now convert back to code length for subsequent manipulation
-            width = width.in_units("code_length").value
-        if not iterable(width):
-            width = (width, width, width)  # left/right, top/bottom, front/back
-        if not isinstance(width, YTArray):
-            width = data_source.pf.arr(width, input_units="code_length")
-        if not isinstance(focus, YTArray):
-            focus = self.pf.arr(focus, input_units="code_length")
-
-        self.width = width
-        self.focus = focus
-
-        super(Camera, self).__init__(self.focus - self.position,
-                                     self.north_vector, steady_north=False)
-        self._moved = True
-
-    def set_width(self, width):
-        if not iterable(width):
-            width = YTArray([width, width, width], input_units="code_length")
-        self.width = width
-        self.switch_orientation()
-
-    def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""
-        Change the view direction based on any of the orientation parameters.
-
-        This will recalculate all the necessary vectors and vector planes
-        related to an orientable object.
-
-        Parameters
-        ----------
-        normal_vector: array_like, optional
-            The new looking vector.
-        north_vector : array_like, optional
-            The 'up' direction for the plane of rays.  If not specific,
-            calculated automatically.
-        """
-        if north_vector is None:
-            north_vector = self.north_vector
-        if normal_vector is None:
-            normal_vector = self.normal_vector
-        self._setup_normalized_vectors(normal_vector, north_vector)
-
-    def switch_view(self, normal_vector=None, north_vector=None):
-        r"""Change the view based on any of the view parameters.
-
-        This will recalculate the orientation and width based on any of
-        normal_vector, width, center, and north_vector.
-
-        Parameters
-        ----------
-        normal_vector: array_like, optional
-            The new looking vector.
-        north_vector : array_like, optional
-            The 'up' direction for the plane of rays.  If not specific,
-            calculated automatically.
-        """
-        if north_vector is None:
-            north_vector = self.north_vector
-        if normal_vector is None:
-            normal_vector = self.normal_vector
-        self.switch_orientation(normal_vector=normal_vector,
-                                north_vector=north_vector)
-        self._moved = True
-
-    def pitch(self, theta):
-        r"""Rotate by a given angle about the horizontal axis
-
-        Pitch the view.
-
-        Parameters
-        ----------
-        theta : float, in radians
-             Angle (in radians) by which to pitch the view.
-
-        Examples
-        --------
-
-        >>> cam = Camera()
-        >>> cam.roll(np.pi/4)
-        """
-        rot_vector = self.unit_vectors[0]
-        R = get_rotation_matrix(theta, rot_vector)
-        self.switch_view(
-            normal_vector=np.dot(R, self.unit_vectors[2]),
-            north_vector=np.dot(R, self.unit_vectors[1]))
-        if self.steady_north:
-            self.north_vector = self.unit_vectors[1]
-
-    def yaw(self, theta):
-        r"""Rotate by a given angle about the vertical axis
-
-        Yaw the view.
-
-        Parameters
-        ----------
-        theta : float, in radians
-             Angle (in radians) by which to yaw the view.
-
-        Examples
-        --------
-
-        >>> cam = Camera()
-        >>> cam.roll(np.pi/4)
-        """
-        rot_vector = self.unit_vectors[1]
-        R = get_rotation_matrix(theta, rot_vector)
-        self.switch_view(
-            normal_vector=np.dot(R, self.unit_vectors[2]))
-
-    def roll(self, theta):
-        r"""Rotate by a given angle about the view normal axis
-
-        Roll the view.
-
-        Parameters
-        ----------
-        theta : float, in radians
-             Angle (in radians) by which to roll the view.
-
-        Examples
-        --------
-
-        >>> cam = Camera()
-        >>> cam.roll(np.pi/4)
-        """
-        rot_vector = self.unit_vectors[2]
-        R = get_rotation_matrix(theta, rot_vector)
-        self.switch_view(
-            normal_vector=np.dot(R, self.unit_vectors[2]),
-            north_vector=np.dot(R, self.unit_vectors[1]))
-        if self.steady_north:
-            self.north_vector = np.dot(R, self.north_vector)


https://bitbucket.org/yt_analysis/yt/commits/0f292d9717d9/
Changeset:   0f292d9717d9
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 16:04:15+00:00
Summary:     Adding a lens test script, not sure what is going on with units and width...
Affected #:  1 file

diff -r 27a254eb1fa5893267945448fc70991e8e904b97 -r 0f292d9717d99995428d9eb53688c541ac76dff9 yt/visualization/volume_rendering/tests/test_lenses.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -0,0 +1,34 @@
+"""
+Test for Volume Rendering Lenses.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.mods import *
+from yt.testing import \
+    fake_random_pf
+from yt.visualization.volume_rendering.scene import Scene
+from yt.visualization.volume_rendering.camera import Camera
+from yt.visualization.volume_rendering.render_source import VolumeSource
+
+pf = fake_random_pf(64)
+sc = Scene()
+cam = Camera(pf, lens_type='perspective')
+w = (pf.domain_width[0]*1000).in_units('code_length')
+print "WIDTH: ", w
+cam.set_width(w)
+vol = VolumeSource(pf, field=('gas', 'density'))
+vol.transfer_function.clear()
+vol.transfer_function.grey_opacity = True
+vol.transfer_function.map_to_colormap(-1., 0., scale=100000.)
+sc.set_default_camera(cam)
+sc.add_source(vol)
+sc.render('test_perspective.png')


https://bitbucket.org/yt_analysis/yt/commits/1092314c05cd/
Changeset:   1092314c05cd
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 17:22:28+00:00
Summary:     This was mistakenly swapped back. *THIS* is correct for front-to-back.
Affected #:  1 file

diff -r 0f292d9717d99995428d9eb53688c541ac76dff9 -r 1092314c05cda57553719ae7e02e5ec0c51c6b05 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -86,13 +86,13 @@
         trgba[i] = istorage[field_table_ids[i]]
 
     if grey_opacity == 1:
-        ta = fmax(1.0 - rgba[3],0.0)
+        ta = fmax(1.0 - dt*trgba[3],0.0)
         for i in range(4):
-            rgba[i] = ta*dt*trgba[i] + rgba[i]
+            rgba[i] = dt*trgba[i] + ta*rgba[i]
     else:
         for i in range(3):
             ta = fmax(1.0-dt*trgba[i], 0.0)
-            rgba[i] = ta*dt*trgba[i] + rgba[i]
+            rgba[i] = dt*trgba[i] + ta*rgba[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/bcbf96d9504f/
Changeset:   bcbf96d9504f
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 17:45:51+00:00
Summary:     FisheyeLens implemented. Needs modification functions.
Affected #:  3 files

diff -r 1092314c05cda57553719ae7e02e5ec0c51c6b05 -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -92,6 +92,10 @@
         self.width = width
         self.switch_orientation()
 
+    def set_position(self, position):
+        self.position = position
+        self.switch_orientation()
+
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""
         Change the view direction based on any of the orientation parameters.

diff -r 1092314c05cda57553719ae7e02e5ec0c51c6b05 -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -20,6 +20,9 @@
 from yt.data_objects.image_array import ImageArray
 import numpy as np
 
+from yt.utilities.lib.grid_traversal import \
+    arr_fisheye_vectors
+
 
 class Lens(ParallelAnalysisInterface):
 
@@ -180,7 +183,55 @@
 
     def __init__(self):
         super(FisheyeLens, self).__init__()
-        raise NotImplementedError
+        self.fov = 180.0
+        self.radius = 1.0
+        self.center = None
+        self.rotation_matrix = np.eye(3)
+
+    def setup_box_properties(self, camera):
+        self.radius = camera.width.max()
+        super(FisheyeLens, self).setup_box_properties(camera)
+
+    def new_image(self, camera):
+        self.current_image = ImageArray(
+            np.zeros((camera.resolution[0]**2, 1,
+                      4), dtype='float64', order='C'),
+            info={'imtype': 'rendering'})
+        return self.current_image
+
+    def get_sampler_params(self, camera):
+        vp = arr_fisheye_vectors(camera.resolution[0], self.fov)
+        vp.shape = (camera.resolution[0]**2, 1, 3)
+        vp2 = vp.copy()
+        for i in range(3):
+            vp[:, :, i] = (vp2 * self.rotation_matrix[:, i]).sum(axis=2)
+        del vp2
+        vp *= self.radius
+        uv = np.ones(3, dtype='float64')
+        positions = np.ones((camera.resolution[0]**2, 1, 3),
+                            dtype='float64') * camera.position
+
+        image = self.new_image(camera)
+
+        sampler_params =\
+            dict(vp_pos=positions,
+                 vp_dir=vp,
+                 center=self.center,
+                 bounds=(0.0, 1.0, 0.0, 1.0),
+                 x_vec=uv,
+                 y_vec=uv,
+                 width=np.zeros(3, dtype='float64'),
+                 image=image
+                 )
+
+        return sampler_params
+
+    def set_viewpoint(self, camera):
+        """
+        For a PerspectiveLens, the viewpoint is the front center.
+        """
+        self.viewpoint = self.center
+
 
 lenses = {'plane-parallel': PlaneParallelLens,
           'perspective': PerspectiveLens,

diff -r 1092314c05cda57553719ae7e02e5ec0c51c6b05 -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -18,17 +18,42 @@
 from yt.visualization.volume_rendering.scene import Scene
 from yt.visualization.volume_rendering.camera import Camera
 from yt.visualization.volume_rendering.render_source import VolumeSource
+from time import time
 
-pf = fake_random_pf(64)
+#pf = fake_random_pf(8)
+#w = (pf.domain_width[0]*30).in_units('code_length')
+#sc = Scene()
+#cam = Camera(pf, lens_type='perspective')
+#print "WIDTH: ", w
+#cam.set_width(w)
+#vol = VolumeSource(pf, field=('gas', 'density'))
+#sc.set_default_camera(cam)
+#sc.add_source(vol)
+#
+#t = -time()
+#sc.render('test_perspective.png', clip_ratio=None)
+#t += time()
+#print 'Total time: %e' % t
+
+pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 100)
 sc = Scene()
-cam = Camera(pf, lens_type='perspective')
-w = (pf.domain_width[0]*1000).in_units('code_length')
-print "WIDTH: ", w
-cam.set_width(w)
+cam = Camera(pf, lens_type='fisheye')
+cam.lens.fov=180.0
+cam.resolution=(512,512)
+cam.set_width(1.0)
+v,c = pf.find_max('density')
+p = pf.domain_center.copy()
+cam.set_position(c)
+#pf.field_info[('gas','density')].take_log=False
 vol = VolumeSource(pf, field=('gas', 'density'))
-vol.transfer_function.clear()
-vol.transfer_function.grey_opacity = True
-vol.transfer_function.map_to_colormap(-1., 0., scale=100000.)
+tf = vol.transfer_function
+tf.grey_opacity=True
+#tf.map_to_colormap(tf.x_bounds[0], tf.x_bounds[1], scale=3000.0, colormap='RdBu')
 sc.set_default_camera(cam)
 sc.add_source(vol)
-sc.render('test_perspective.png')
+
+t = -time()
+sc.render('test_fisheye.png', clip_ratio=6.0)
+t += time()
+print 'Total time: %e' % t


https://bitbucket.org/yt_analysis/yt/commits/4b557418552d/
Changeset:   4b557418552d
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-30 23:57:43+00:00
Summary:     Merging in from experimental into scene work
Affected #:  67 files

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -1,9 +1,10 @@
-import os, shutil, string, glob
+import os, shutil, string, glob, re
 from sphinx.util.compat import Directive
 from docutils import nodes
 from docutils.parsers.rst import directives
 from IPython.nbconvert import html, python
-from runipy.notebook_runner import NotebookRunner
+from IPython.nbformat.current import read, write
+from runipy.notebook_runner import NotebookRunner, NotebookError
 
 class NotebookDirective(Directive):
     """Insert an evaluated notebook into a document
@@ -57,12 +58,8 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        try:
-            evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
-                                               skip_exceptions=skip_exceptions)
-        except:
-            # bail
-            return []
+        evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
+                                           skip_exceptions=skip_exceptions)
 
         # Create link to notebook and script files
         link_rst = "(" + \
@@ -138,11 +135,20 @@
     # Create evaluated version and save it to the dest path.
     # Always use --pylab so figures appear inline
     # perhaps this is questionable?
-    nb_runner = NotebookRunner(nb_path, pylab=False)
-    nb_runner.run_notebook(skip_exceptions=skip_exceptions)
+    notebook = read(open(nb_path), 'json')
+    nb_runner = NotebookRunner(notebook, pylab=False)
+    try:
+        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
+    except NotebookError as e:
+        print ''
+        print e
+        # Return the traceback, filtering out ANSI color codes.
+        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
+        return 'Notebook conversion failed with the following traceback: \n%s' % \
+            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '', str(e))
     if dest_path is None:
         dest_path = 'temp_evaluated.ipynb'
-    nb_runner.save_notebook(dest_path)
+    write(nb_runner.nb, open(dest_path, 'w'), 'json')
     ret = nb_to_html(dest_path)
     if dest_path is 'temp_evaluated.ipynb':
         os.remove(dest_path)

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ b/doc/extensions/notebookcell_sphinxext.py
@@ -35,12 +35,7 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        try:
-            evaluated_text = \
-                evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
-        except:
-            # bail
-            return []
+        evaluated_text = evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
 
         # create notebook node
         attributes = {'format': 'html', 'source': 'nb_path'}

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -242,15 +242,15 @@
 .. notebook-cell::
 
    from yt.mods import *
-   pf = load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = pf.h.all_data()
-   total_mass = ad.quantities["TotalQuantity"]("cell_mass")
+   ds = load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+   total_mass = ad.quantities.total_mass()
    # now select only gas with 1e5 K < T < 1e7 K.
    new_region = ad.cut_region(['obj["temperature"] > 1e5',
                                'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities["TotalQuantity"]("cell_mass")
+   cut_mass = new_region.quantities.total_mass()
    print "The fraction of mass in this temperature range is %f." % \
-     (cut_mass[0] / total_mass[0])
+     (cut_mass / total_mass)
 
 The ``cut_region`` function generates a new object containing only the cells 
 that meet all of the specified criteria.  The sole argument to ``cut_region`` 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ b/doc/source/bootcamp/3)_Simple_Visualization.ipynb
@@ -243,7 +243,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "s = SlicePlot(pf, \"x\", [\"density\"], center=\"max\")\n",
+      "s = SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
       "s.annotate_contour(\"temperature\")\n",
       "s.zoom(2.5)"
      ],
@@ -272,4 +272,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -68,7 +68,7 @@
       "for ds in ts:\n",
       "    dd = ds.all_data()\n",
       "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(pf.current_time.in_units(\"Gyr\"))\n",
+      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
       "rho_ex = np.array(rho_ex)"
      ],
      "language": "python",
@@ -211,7 +211,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "v, c = ds.find_max(\"density\")\n",
       "sl = ds.slice(0, c[0])\n",
       "print sl[\"index\", \"x\"], sl[\"index\", \"z\"], sl[\"pdx\"]\n",
@@ -361,4 +361,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/cookbook/embedded_javascript_animation.ipynb
--- a/doc/source/cookbook/embedded_javascript_animation.ipynb
+++ b/doc/source/cookbook/embedded_javascript_animation.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:578ca4fbc3831e9093489f06939abce9cde845b6cf75d901a3c429abc270f550"
+  "signature": "sha256:4f7d409d15ecc538096d15212923312e2cb4a911ebf5a9cf7edc9bd63a8335e9"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -47,7 +47,8 @@
       "import matplotlib.pyplot as plt\n",
       "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
       "\n",
-      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'mpccm'))\n",
+      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
+      "prj.set_figure_size(5)\n",
       "prj.set_zlim('density',1e-32,1e-26)\n",
       "fig = prj.plots['density'].figure\n",
       "fig.canvas = FigureCanvasAgg(fig)\n",

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ b/doc/source/cookbook/embedded_webm_animation.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:67844f8c2c184fc51aa62440cc05623ee85f252edde6faaa0d7b6617c3f33dfe"
+  "signature": "sha256:0090176ae6299b2310bf613404cbfbb42a54e19a03d1469d1429a01170a63aa0"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -96,7 +96,7 @@
       "import matplotlib.pyplot as plt\n",
       "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
       "\n",
-      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'mpccm'))\n",
+      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
       "prj.set_zlim('density',1e-32,1e-26)\n",
       "fig = prj.plots['density'].figure\n",
       "fig.canvas = FigureCanvasAgg(fig)\n",

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -367,35 +367,11 @@
 yt also supports loading Tipsy data.  Many of its characteristics are similar
 to how Gadget data is loaded; specifically, it shares its definition of
 indexing and mesh-identification with that described in
-:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
-has not yet implemented header specifications, field specifications, or
-particle type specifications.  *These are all excellent projects for new
-contributors!*
-
-Tipsy data cannot be automatically detected.  You can load it with a command
-similar to the following:
+:ref:`particle-indexing-criteria`.  
 
 .. code-block:: python
 
-    ds = TipsyDataset('test.00169',
-        parameter_file='test.param',
-        endian = '<',
-        domain_left_edge = domain_left_edge,
-        domain_right_edge = domain_right_edge,
-    )
-
-Not all of these arguments are necessary; additionally, yt accepts the
-arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
-``unit_base``.  By default, yt will not utilize a parameter file, and by
-default it will assume the data is "big" endian (`>`).  Optionally, you may
-specify ``field_dtypes``, which describe the size of various fields.  For
-example, if you have stored positions as 64-bit floats, you can specify this
-with:
-
-.. code-block:: python
-
-    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
-                           field_dtypes = {"Coordinates": "d"})
+    ds = load("./halo1e11_run1.00400")
 
 .. _specifying-cosmology-tipsy:
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -604,13 +604,12 @@
 
    ~yt.data_objects.derived_quantities.DerivedQuantity
    ~yt.data_objects.derived_quantities.DerivedQuantityCollection
-   ~yt.data_objects.derived_quantities.WeightedAverage
-   ~yt.data_objects.derived_quantities.TotalValue
+   ~yt.data_objects.derived_quantities.WeightedAverageQuantity
+   ~yt.data_objects.derived_quantities.TotalQuantity
    ~yt.data_objects.derived_quantities.TotalMass
    ~yt.data_objects.derived_quantities.CenterOfMass
    ~yt.data_objects.derived_quantities.BulkVelocity
    ~yt.data_objects.derived_quantities.AngularMomentumVector
-   ~yt.data_objects.derived_quantities.ParticleAngularMomentumVector
    ~yt.data_objects.derived_quantities.Extrema
    ~yt.data_objects.derived_quantities.MaxLocation
    ~yt.data_objects.derived_quantities.MinLocation
@@ -719,12 +718,11 @@
 
    ~yt.config.YTConfigParser
    ~yt.utilities.parameter_file_storage.ParameterFileStore
-   ~yt.data_objects.data_containers.FakeGridForParticles
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.index_subset.index_subset.ConstructedRootGrid
-   ~yt.analysis_modules.index_subset.index_subset.ExtractedHierarchy
+   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
+   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -9,9 +9,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
-   slc.annotate_arrow((0.53, 0.53, 0.53), 1/pf['kpc'])
+   slc.annotate_arrow((0.5, 0.5, 0.5), (1, 'kpc'))
    slc.save()
 
 -------------
@@ -30,7 +30,7 @@
 
    pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                           8./pf.units['kpc'], 1./pf.units['kpc'])
+                           (8., 'kpc'), (1., 'kpc'))
 
    c_min = 10**na.floor(na.log10(data_source['density']).min()  )
    c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
@@ -79,7 +79,7 @@
    from yt.mods import *
    pf = load("Enzo_64/DD0043/data0043")
    s = OffAxisSlicePlot(pf, [1,1,0], ["density"], center="c")
-   s.annotate_cquiver('CuttingPlaneVelocityX', 'CuttingPlaneVelocityY', 10)
+   s.annotate_cquiver('cutting_plane_velocity_x', 'cutting_plane_velocity_y', 10)
    s.zoom(1.5)
    s.save()
 
@@ -97,7 +97,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
    slc.annotate_grids()
    slc.save()
@@ -153,7 +153,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_image_line((0.3, 0.4), (0.8, 0.9), plot_args={'linewidth':5})
    p.save()
@@ -169,7 +169,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_line([-6, -4, -2, 0, 2, 4, 6], [3.6, 1.6, 0.4, 0, 0.4, 1.6, 3.6], plot_args={'linewidth':5})
    p.save()
@@ -212,9 +212,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   s.annotate_marker([0.53, 0.53, 0.53], plot_args={'s':10000})
+   s.annotate_marker([0.5, 0.5, 0.5], plot_args={'s':10000})
    s.save()   
 
 -------------
@@ -237,7 +237,7 @@
    from yt.mods import *
    pf = load("Enzo_64/DD0043/data0043")
    p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
-   p.annotate_particles(10/pf['Mpc'])
+   p.annotate_particles((10, 'Mpc'))
    p.save()
 
 -------------
@@ -253,9 +253,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   p.annotate_point([0.53, 0.526, 0.53], "What's going on here?", text_args={'size':'xx-large', 'color':'w'})
+   p.annotate_point([0.5, 0.496, 0.5], "What's going on here?", text_args={'size':'xx-large', 'color':'w'})
    p.save()
 
 -------------
@@ -273,8 +273,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], 
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center=[0.5, 0.5, 0.5], 
                       weight_field='density', width=(20, 'kpc'))
    p.annotate_quiver('velocity_x', 'velocity_y', 16)
    p.save()
@@ -292,9 +292,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
-   p.annotate_sphere([0.53, 0.53, 0.53], 2/pf['kpc'], {'fill':True})
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
+   p.annotate_sphere([0.5, 0.5, 0.5], (2, 'kpc'), {'fill':True})
    p.save()
 
 -------------
@@ -314,8 +314,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   s = SlicePlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = SlicePlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
    s.annotate_streamlines('velocity_x', 'velocity_y')
    s.save()
 
@@ -333,9 +333,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   s.annotate_text((0.53, 0.53), 'Sample text', text_args={'size':'xx-large', 'color':'w'})
+   s.annotate_text((0.5, 0.5), 'Sample text', text_args={'size':'xx-large', 'color':'w'})
    s.save()
 
 -------------
@@ -349,8 +349,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
    p.annotate_title('Density plot')
    p.save()
 
@@ -373,7 +373,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_velocity()
    p.save()

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -39,13 +39,13 @@
    pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
    c = pf.h.find_max('density')[1]
-   proj = pf.proj(0, 'density')
+   proj = pf.proj('density', 0)
 
-   width = 10/pf['kpc'] # we want a 1.5 mpc view
+   width = (10, 'kpc') # we want a 1.5 mpc view
    res = [1000, 1000] # create an image with 1000x1000 pixels
    frb = proj.to_frb(width, res, center=c)
 
-   P.imshow(frb['density'])
+   P.imshow(np.array(frb['density']))
    P.savefig('my_perfect_figure.png')
    
 The FRB is a very small object that can be deleted and recreated quickly (in
@@ -76,10 +76,10 @@
    ray = pf.ortho_ray(ax, (c[1], c[2])) # cutting through the y0,z0 such that we hit the max density
 
    P.subplot(211)
-   P.semilogy(ray['x'], ray['density'])
+   P.semilogy(np.array(ray['x']), np.array(ray['density']))
    P.ylabel('density')
    P.subplot(212)
-   P.semilogy(ray['x'], ray['temperature'])
+   P.semilogy(np.array(ray['x']), np.array(ray['temperature']))
    P.xlabel('x')
    P.ylabel('temperature')
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -108,13 +108,13 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20,'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', center=[0.5, 0.5, 0.5], width=(20,'kpc'))
    slc.save()
 
 The above example will display an annotated plot of a slice of the
 Density field in a 20 kpc square window centered on the coordinate
-(0.53,0.53) in the x-y plane.  The axis to slice along is keyed to the
+(0.5, 0.5, 0.5) in the x-y plane.  The axis to slice along is keyed to the
 letter 'z', corresponding to the z-axis.  Finally, the image is saved to
 a png file.
 
@@ -124,18 +124,19 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z','Pressure', center=[0.53, 0.53, 0.53])
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'pressure', center='c')
    slc.save()
    slc.zoom(30)
    slc.save('zoom')
 
-will save a slice of the pressure field in a slice along the z
+will save a plot of the pressure field in a slice along the z
 axis across the entire simulation domain followed by another plot that
 is zoomed in by a factor of 30 with respect to the original
-image. With these sorts of manipulations, one can easily pan and zoom
-onto an interesting region in the simulation and adjust the
-boundaries of the region to visualize on the fly.
+image. Both plots will be centered on the center of the simulation box. 
+With these sorts of manipulations, one can easily pan and zoom onto an 
+interesting region in the simulation and adjust the boundaries of the
+region to visualize on the fly.
 
 A slice object can also add annotations like a title, an overlying
 quiver plot, the location of grid boundaries, halo-finder annotations,
@@ -145,12 +146,12 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.annotate_grids()
    slc.save()
 
-will plot the VorticitySquared in a 10 kiloparsec slice through the
+will plot the density field in a 10 kiloparsec slice through the
 z-axis centered on the highest density point in the simulation domain.
 Before saving the plot, the script annotates it with the grid
 boundaries, which are drawn as thick black lines by default.
@@ -174,9 +175,9 @@
 .. python-script::
  
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   prj = ProjectionPlot(pf, 2, 'density', center=[0.53, 0.53, 0.53],
-                        width=(25, 'kpc'), weight_field=None)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   prj = ProjectionPlot(pf, 2, 'density', width=(25, 'kpc'), 
+                        weight_field=None)
    prj.save()
 
 will create a projection of Density field along the x axis, plot it,
@@ -205,11 +206,11 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
-   cut = OffAxisSlicePlot(pf, L, 'density', width=(25, 'kpc'),
-                          center=[0.53, 0.53, 0.53], north_vector=north_vector)
+   cut = OffAxisSlicePlot(pf, L, 'density', width=(25, 'kpc'), 
+                          north_vector=north_vector)
    cut.save()
 
 creates an off-axis slice in the plane perpendicular to ``L``,
@@ -246,11 +247,11 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
    W = [0.02, 0.02, 0.02]
-   c = [0.53, 0.53, 0.53]
+   c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
    write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
@@ -268,11 +269,10 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
    prj = OffAxisProjectionPlot(pf,L,'density',width=(25, 'kpc'), 
-                               center=[0.53, 0.53, 0.53], 
                                north_vector=north_vector)
    prj.save()
 
@@ -292,8 +292,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.save()
 
 Panning and zooming
@@ -307,9 +307,10 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.pan((2/pf['kpc'],2/pf['kpc']))
+   from yt.units import kpc
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.pan((2*kpc, 2*kpc))
    slc.save()
 
 :class:`~yt.visualization.plot_window.SlicePlot.pan_rel` accepts deltas in units relative
@@ -318,8 +319,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.pan_rel((0.1, -0.1))
    slc.save()
 
@@ -328,8 +329,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.zoom(2)
    slc.save()
 
@@ -342,8 +343,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_axes_unit('Mpc')
    slc.save()
 
@@ -356,9 +357,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_center((0.53, 0.53))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_center((0.5, 0.5))
    slc.save()
 
 Fonts
@@ -369,8 +370,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_font({'family': 'sans-serif', 'style': 'italic','weight': 'bold', 'size': 24})
    slc.save()
 
@@ -388,9 +389,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_cmap('VorticitySquared', 'RdBu_r')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_cmap('density', 'RdBu_r')
    slc.save()
 
 The :class:`~yt.visualization.plot_window.SlicePlot.set_log` function accepts a field name
@@ -400,9 +401,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_log('VorticitySquared', False)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_log('density', False)
    slc.save()
 
 Lastly, the :class:`~yt.visualization.plot_window.SlicePlot.set_zlim` function makes it
@@ -411,9 +412,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_zlim('VorticitySquared', 1e-30, 1e-25)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_zlim('density', 1e-30, 1e-25)
    slc.save()
 
 Set the size of the plot
@@ -427,8 +428,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_window_size(10)
    slc.save()
 
@@ -438,8 +439,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_buff_size(1600)
    slc.save()
 
@@ -464,8 +465,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   my_galaxy = pf.disk([0.53, 0.53, 0.53], [0.0, 0.0, 1.0], 0.01, 0.003)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   my_galaxy = pf.disk([0.5, 0.5, 0.5], [0.0, 0.0, 1.0], 0.01, 0.003)
    plot = ProfilePlot(my_galaxy, "density", ["temperature"])
    plot.save()
 
@@ -483,8 +484,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   my_sphere = pf.sphere([0.53, 0.53, 0.53], (100, "pc"))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   my_sphere = pf.sphere([0.5, 0.5, 0.5], (100, "kpc"))
    plot = ProfilePlot(my_sphere, "temperature", ["cell_mass"],
                       weight_field=None)
    plot.save()
@@ -589,7 +590,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    my_sphere = pf.sphere("c", (50, "kpc"))
    plot = PhasePlot(my_sphere, "density", "temperature", ["cell_mass"],
                     weight_field=None)
@@ -602,9 +603,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    my_sphere = pf.sphere("c", (50, "kpc"))
-   plot = PhasePlot(my_sphere, "density", "temperature", ["HI_Fraction"],
+   plot = PhasePlot(my_sphere, "density", "temperature", ["H_fraction"],
                     weight_field="cell_mass")
    plot.save()
 
@@ -646,7 +647,7 @@
 .. notebook-cell::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, "x", "density", center='m', width=(10,'kpc'),
                       weight_field='density')
    p.show()

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -142,8 +142,7 @@
 :class:`~yt.visualization.volume_rendering.camera.Camera`, which represents a
 viewpoint into a volume.  The camera optionally accepts a volume, which can be
 either an instance of
-:class:`~yt.visualization.volume_rendering.grid_partitioner.HomogenizedVolume`
-or an instance of :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` that
+:class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` that
 has already been initialized.  If one is not supplied, the camera will generate
 one itself.  This can also be specified if you wish to save bricks between
 repeated calls, thus saving considerable amounts of time.

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -74,25 +74,84 @@
 
 __version__ = "3.0-dev"
 
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys
-    from yt.config import ytcfg
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg.set("yt","suppressStreamLogging", 'True')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
+# First module imports
+import numpy as np # For modern purposes
+import numpy # In case anyone wishes to use it by name
+
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
+from yt.utilities.logger import ytLogger as mylog
+
+import yt.utilities.physical_constants as physical_constants
+import yt.units as units
+from yt.units.yt_array import YTArray, YTQuantity
+
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
+
+from yt.data_objects.api import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    DatasetSeries, \
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
+
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
+
+from yt.frontends.stream.api import \
+    load_uniform_grid, load_amr_grids, \
+    load_particles, load_hexahedral_mesh, load_octree
+
+# For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
+GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
+TipsyStaticOutput = deprecated_class(TipsyDataset)
+
+# Now individual component imports from the visualization API
+from yt.visualization.api import \
+    PlotCollection, PlotCollectionInteractive, \
+    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
+    apply_colormap, scale_image, write_projection, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps, ProfilePlot, PhasePlot
+
+from yt.visualization.volume_rendering.api import \
+    off_axis_projection
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects, enable_parallelism
+
+from yt.convenience import \
+    load, simulation
+
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles, periodic_position
+

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,14 +63,13 @@
     HaloProfiler, \
     FakeProfile
 
-from .index_subset.api import \
+from .hierarchy_subset.api import \
     ConstructedRootGrid, \
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \
     ExtractedParameterFile
 
 from .level_sets.api import \
-    coalesce_join_tree, \
     identify_contours, \
     Clump, \
     find_clumps, \

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,9 +23,9 @@
     parallel_blocking_call
 from yt.utilities.physical_constants import \
     cm_per_mpc, \
-    mass_sun_cgs, \
-    rho_crit_now
-
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
@@ -256,7 +256,7 @@
 
         # rho0 in units of h^2 Msolar/Mpc^3
         rho0 = self.omega_matter0 * \
-                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
+                rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -293,7 +293,7 @@
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
         rho0 = self.omega_matter0 * \
-                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
+            rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -44,8 +44,9 @@
     parallel_root_only, \
     parallel_objects
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
-    rho_crit_now
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -932,7 +933,7 @@
         if 'ActualOverdensity' in profile.keys():
             return
 
-        rhocritnow = rho_crit_now * self.pf.hubble_constant**2 # g cm^-3
+        rhocritnow = rho_crit_g_cm3_h2 * self.pf.hubble_constant**2 # g cm^-3
         rho_crit = rhocritnow * ((1.0 + self.pf.current_redshift)**3.0)
         if not self.use_critical_density: rho_crit *= self.pf.omega_matter
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ b/yt/analysis_modules/hierarchy_subset/api.py
@@ -1,5 +1,5 @@
 """
-API for index_subset
+API for hierarchy_subset
 
 
 
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from .index_subset import \
+from .hierarchy_subset import \
     ConstructedRootGrid, \
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -7,7 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('index_subset', parent_package, top_path)
+    config = Configuration('hierarchy_subset', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -33,11 +33,9 @@
 from yt import units
 from yt.units.yt_array import YTQuantity
 import h5py
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-except ImportError:
-    pass
+from yt.frontends.fits.data_structures import ap
+pyfits = ap.pyfits
+pywcs = ap.pywcs
 
 comm = communication_system.communicators[-1]
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -17,13 +17,14 @@
 from yt import units
 import h5py
 try:
-    import astropy.io.fits as pyfits
     import xspec
     from scipy.integrate import cumtrapz
     from scipy import stats        
 except ImportError:
     pass
-    
+from yt.frontends.fits.data_structures import ap
+pyfits = ap.pyfits
+
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
 hc = (hcgs*clight).in_units("keV*angstrom")

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -53,7 +53,7 @@
 
     def _beta_par_squared(field, data):
         return data["gas","beta_par"]**2/data["gas","density"]
-    registry.add_field("gas","beta_par_squared",
+    registry.add_field(("gas","beta_par_squared"),
                        function = _beta_par_squared,
                        units="g/cm**3")
 
@@ -148,7 +148,9 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -211,7 +213,7 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
         dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
         Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -216,7 +216,7 @@
     """
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
-    _con_args = ('axis', 'weight_field')
+    _con_args = ('axis', 'field', 'weight_field')
     _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, pf = None, data_source=None, 
@@ -241,6 +241,10 @@
         return self.data_source.blocks
 
     @property
+    def field(self):
+        return [k for k in self.field_data.keys() if k not in self._container_fields]
+
+    @property
     def _mrep(self):
         return MinimalProjectionData(self)
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -802,6 +802,8 @@
             if center is None:
                 center = (self.pf.domain_right_edge
                         + self.pf.domain_left_edge)/2.0
+        elif iterable(center) and not isinstance(center, YTArray):
+            center = self.pf.arr(center, 'code_length')
         if iterable(width):
             w, u = width
             width = self.pf.arr(w, input_units = u)
@@ -1262,8 +1264,7 @@
     def _get_cut_mask(self, grid, field=None):
         if self._is_fully_enclosed(grid):
             return True # We do not want child masking here
-        if not isinstance(grid, (FakeGridForParticles,)) \
-             and grid.id in self._cut_masks:
+        if grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
         # If we get this far, we have to generate the cut_mask.
         return self._get_level_mask(self.regions, grid)
@@ -1320,6 +1321,5 @@
                     this_cut_mask)
             if item == "OR":
                 np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
-        if not isinstance(grid, FakeGridForParticles):
-            self._cut_masks[grid.id] = this_cut_mask
+        self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -279,7 +279,10 @@
                 self, dataset_type=self.dataset_type)
             # Now we do things that we need an instantiated index for
             # ...first off, we create our field_info now.
+            oldsettings = np.geterr()
+            np.seterr(all='ignore')
             self.create_field_info()
+            np.seterr(**oldsettings)
         return self._instantiated_index
     
     _index_proxy = None
@@ -361,11 +364,16 @@
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
         fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw])
+                                   if s in self.particle_types_raw
+                                   and len(f[s]) > 0])
         for field in fields:
             units = set([])
             for s in union:
-                units.add(self.field_units.get((s, field), ""))
+                # First we check our existing fields for units
+                funits = self._get_field_info(s, field).units
+                # Then we override with field_units settings.
+                funits = self.field_units.get((s, field), funits)
+                units.add(funits)
             if len(units) == 1:
                 self.field_units[union.name, field] = list(units)[0]
         self.particle_types += (union.name,)

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -188,6 +188,8 @@
             return rv
         elif param == "fof_groups":
             return None
+        elif param == "mu":
+            return 1.0
         else:
             return 0.0
 

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -66,8 +66,9 @@
     def setup_fluid_fields(self):
         pass
 
-    def setup_particle_fields(self, ptype):
+    def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
+            units = self.pf.field_units.get((ptype, f), units)
             self.add_output_field((ptype, f),
                 units = units, particle_type = True, display_name = dn)
             if (ptype, f) not in self.field_list:
@@ -99,7 +100,9 @@
             self.add_output_field(field, 
                                   units = self.pf.field_units.get(field, ""),
                                   particle_type = True)
-        self.setup_smoothed_fields(ptype)
+        self.setup_smoothed_fields(ptype, 
+                                   num_neighbors=num_neighbors,
+                                   ftype=ftype)
 
     def setup_smoothed_fields(self, ptype, num_neighbors = 64, ftype = "gas"):
         # We can in principle compute this, but it is not yet implemented.

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -28,8 +28,9 @@
     just_one
 
 from .vector_operations import \
-     create_magnitude_field
-    
+     create_magnitude_field, \
+     create_squared_field
+
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -126,6 +127,9 @@
     create_magnitude_field(registry, "vorticity", "1/s",
                            ftype=ftype, slice_info=slice_info,
                            validators=vort_validators)
+    create_squared_field(registry, "vorticity", "1/s**2",
+                         ftype=ftype, slice_info=slice_info,
+                         validators=vort_validators)
 
     def _vorticity_stretching_x(field, data):
         return data[ftype, "velocity_divergence"] * data[ftype, "vorticity_x"]

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -59,34 +59,46 @@
             * data[ftype,'density']
     return _density
 
-def add_species_field_by_density(registry, ftype, species):
+def add_species_field_by_density(registry, ftype, species, 
+                                 particle_type = False):
     """
     This takes a field registry, a fluid type, and a species name and then
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_density" already exists and refers to mass density.
     """
     registry.add_field((ftype, "%s_fraction" % species), 
-                        function = _create_fraction_func(ftype, species),
-                        units = "")
+                       function = _create_fraction_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "")
+
     registry.add_field((ftype, "%s_mass" % species),
-                        function = _create_mass_func(ftype, species),
-                        units = "g")
+                       function = _create_mass_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g")
+
     registry.add_field((ftype, "%s_number_density" % species),
-                        function = _create_number_density_func(ftype, species),
-                        units = "cm**-3")
+                       function = _create_number_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "cm**-3")
 
-def add_species_field_by_fraction(registry, ftype, species):
+def add_species_field_by_fraction(registry, ftype, species, 
+                                  particle_type = False):
     """
     This takes a field registry, a fluid type, and a species name and then
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_fraction" already exists and refers to mass fraction.
     """
     registry.add_field((ftype, "%s_density" % species), 
-                        function = _create_density_func(ftype, species),
-                        units = "g/cm**3")
+                       function = _create_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g/cm**3")
+
     registry.add_field((ftype, "%s_mass" % species),
-                        function = _create_mass_func(ftype, species),
-                        units = "g")
+                       function = _create_mass_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g")
+
     registry.add_field((ftype, "%s_number_density" % species),
-                        function = _create_number_density_func(ftype, species),
-                        units = "cm**-3")
+                       function = _create_number_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "cm**-3")

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -61,6 +61,28 @@
                        function = _magnitude, units = field_units,
                        validators = validators, particle_type = particle_type)
 
+def create_squared_field(registry, basename, field_units,
+                         ftype = "gas", slice_info = None,
+                         validators = None, particle_type=False):
+
+    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
+
+    # Is this safe?
+    if registry.pf.dimensionality < 3:
+        zn = ("index", "zeros")
+    if registry.pf.dimensionality < 2:
+        yn = ("index", "zeros")
+
+    def _squared(field, data):
+        squared  = data[xn] * data[xn]
+        squared += data[yn] * data[yn]
+        squared += data[zn] * data[zn]
+        return squared
+
+    registry.add_field((ftype, "%s_squared" % basename),
+                       function = _squared, units = field_units,
+                       validators = validators, particle_type = particle_type)
+
 def create_vector_fields(registry, basename, field_units,
                          ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -12,3 +12,29 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import sys, types, os, glob, cPickle, time, importlib
+
+_frontends = [
+    'art',
+    'artio',
+    'athena',
+    'boxlib',
+    'chombo',
+    'enzo',
+    'fits',
+    'flash',
+    'gdf',
+    'halo_catalogs',
+    'moab',
+    #'pluto',
+    'ramses',
+    'sph',
+    'stream',
+]
+
+class _frontend_container:
+    def __init__(self):
+        for frontend in _frontends:
+            _mod = "yt.frontends.%s.api" % frontend
+            setattr(self, frontend, importlib.import_module(_mod))

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 import os.path
-import glob
 import stat
 import weakref
 import cStringIO
@@ -203,14 +202,17 @@
         particle header, star files, etc.
         """
         base_prefix, base_suffix = filename_pattern['amr']
+        aexpstr = 'a'+file_amr.rsplit('a',1)[1].replace(base_suffix,'')
         possibles = glob.glob(os.path.dirname(file_amr)+"/*")
         for filetype, (prefix, suffix) in filename_pattern.iteritems():
             # if this attribute is already set skip it
             if getattr(self, "_file_"+filetype, None) is not None:
                 continue
-            stripped = file_amr.replace(base_prefix, prefix)
-            stripped = stripped.replace(base_suffix, suffix)
-            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            match = None
+            for possible in possibles:
+                if possible.endswith(aexpstr+suffix):
+                    if os.path.basename(possible).startswith(prefix):
+                        match = possible
             if match is not None:
                 mylog.info('discovered %s:%s', filetype, match)
                 setattr(self, "_file_"+filetype, match)

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ART frontend tests using SFG1 a=0.330
+ART frontend tests using D9p a=0.500
 
 
 
@@ -22,20 +22,22 @@
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
-_fields = ("Density", "particle_mass", ("all", "particle_position_x"))
+_fields = ("Temperature", "Density", "particle_mass", ("all", "particle_position_x"))
 
-sfg1 = "10MpcBox_csf512_a0.330.d"
+d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d"
 
-
- at requires_pf(sfg1, big_data=True)
-def test_sfg1():
-    pf = data_dir_load(sfg1)
-    yield assert_equal, str(pf), "10MpcBox_csf512_a0.330.d"
+ at requires_pf(d9p, big_data=True)
+def test_d9p():
+    pf = data_dir_load(d9p)
+    yield assert_equal, str(pf), "10MpcBox_HartGal_csf_a0.500.d"
+    for test in big_patch_amr(d9p, _fields):
+        test_d9p.__name__ = test.description
+        yield test
     dso = [None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
-                        sfg1, axis, field, weight_field,
+                        d9p, axis, field, weight_field,
                         ds)

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -177,7 +177,7 @@
             if self.dimensionality < 3:
                 dx[i].append(DRE[2] - DLE[1])
         self.level_dds = np.array(dx, dtype="float64")
-        coordinate_type = int(header_file.next())
+        header_file.next()
         if self.pf.geometry == "cartesian":
             default_ybounds = (0.0, 1.0)
             default_zbounds = (0.0, 1.0)
@@ -580,7 +580,11 @@
         header_file.readline()
         self._header_mesh_start = header_file.tell()
         header_file.next()
-        coordinate_type = int(header_file.next())
+        next_line = header_file.next()
+        if len(next_line.split()) == 1:
+            coordinate_type = int(next_line)
+        else:
+            coordinate_type = 0
         if coordinate_type == 0:
             self.geometry = "cartesian"
         elif coordinate_type == 1:

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -532,7 +532,7 @@
         self.dataset_type = dataset_type
         self.float_type = 'float64'
         self.parameter_file = weakref.proxy(pf) # for _obtain_enzo
-        self.float_type = self.enzo.index_information["GridLeftEdge"].dtype
+        self.float_type = self.enzo.hierarchy_information["GridLeftEdge"].dtype
         self.directory = os.getcwd()
         GridIndex.__init__(self, pf, dataset_type)
 
@@ -540,12 +540,12 @@
         pass
 
     def _count_grids(self):
-        self.num_grids = self.enzo.index_information["GridDimensions"].shape[0]
+        self.num_grids = self.enzo.hierarchy_information["GridDimensions"].shape[0]
 
     def _parse_index(self):
         self._copy_index_structure()
         mylog.debug("Copying reverse tree")
-        reverse_tree = self.enzo.index_information["GridParentIDs"].ravel().tolist()
+        reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
         # Initial setup:
         mylog.debug("Reconstructing parent-child relationships")
         grids = []
@@ -574,14 +574,14 @@
 
     def _copy_index_structure(self):
         # Dimensions are important!
-        self.grid_dimensions[:] = self.enzo.index_information["GridEndIndices"][:]
-        self.grid_dimensions -= self.enzo.index_information["GridStartIndices"][:]
+        self.grid_dimensions[:] = self.enzo.hierarchy_information["GridEndIndices"][:]
+        self.grid_dimensions -= self.enzo.hierarchy_information["GridStartIndices"][:]
         self.grid_dimensions += 1
-        self.grid_left_edge[:] = self.enzo.index_information["GridLeftEdge"][:]
-        self.grid_right_edge[:] = self.enzo.index_information["GridRightEdge"][:]
-        self.grid_levels[:] = self.enzo.index_information["GridLevels"][:]
-        self.grid_procs = self.enzo.index_information["GridProcs"].copy()
-        self.grid_particle_count[:] = self.enzo.index_information["GridNumberOfParticles"][:]
+        self.grid_left_edge[:] = self.enzo.hierarchy_information["GridLeftEdge"][:]
+        self.grid_right_edge[:] = self.enzo.hierarchy_information["GridRightEdge"][:]
+        self.grid_levels[:] = self.enzo.hierarchy_information["GridLevels"][:]
+        self.grid_procs = self.enzo.hierarchy_information["GridProcs"].copy()
+        self.grid_particle_count[:] = self.enzo.hierarchy_information["GridNumberOfParticles"][:]
 
     def save_data(self, *args, **kwargs):
         pass
@@ -829,12 +829,11 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
-                mass_unit = self.parameters["MassUnits"]
+                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
-                mylog.warning("Setting 1.0 in code units to be 1.0 g")
                 length_unit = mass_unit = time_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
@@ -899,6 +898,7 @@
         return obj
 
     def __init__(self, parameter_override=None, conversion_override=None):
+        self.fluid_types += ("enzo",)
         if parameter_override is None: parameter_override = {}
         self._parameter_override = parameter_override
         if conversion_override is None: conversion_override = {}

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -233,37 +233,8 @@
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self, pf)
 
-    def _read_data_set(self, grid, field):
-        if grid.id not in self.grids_in_memory:
-            mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
-            raise KeyError
-        tr = self.grids_in_memory[grid.id][field]
-        # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
-        return tr.swapaxes(0,2)[self.my_slice].copy()
-        # We don't do this, because we currently do not interpolate
-        coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
-        coef2 = 1.0 - coef1
-        t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.index_information["GridOldTimes"][grid.id]
-        return (coef1*self.grids_in_memory[grid.id][field] + \
-                coef2*self.old_grids_in_memory[grid.id][field])\
-                [self.my_slice]
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
     def _read_field_names(self, grid):
-        return self.grids_in_memory[grid.id].keys()
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
-        sl[axis] = slice(coord + 3, coord + 4)
-        sl = tuple(reversed(sl))
-        tr = self.grids_in_memory[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
+        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -292,13 +263,10 @@
         for chunk in chunks:
             for g in chunk.objs:
                 if g.id not in self.grids_in_memory: continue
-
-                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
-                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    data_view = self.grids_in_memory[g.id][fname]
-                    nd = g.select(selector, data_view, rv[field], ind)
+                    data_view = self.grids_in_memory[g.id][fname][self.my_slice]
+                    ind += g.select(selector, data_view, rv[field], ind)
         return rv
 
     def _read_particle_coords(self, chunks, ptf):
@@ -333,10 +301,6 @@
                             data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
-    @property
-    def _read_exception(self):
-        return KeyError
-
 class IOHandlerPacked2D(IOHandlerPackedHDF5):
 
     _dataset_type = "enzo_packed_2d"

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -10,15 +10,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-except ImportError:
-    pass
-
 import stat
+import types
 import numpy as np
 import weakref
+import warnings
 
 from yt.config import ytcfg
 from yt.funcs import *
@@ -38,6 +34,37 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            import astropy.io.fits as pyfits
+            self.log
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            import astropy.wcs as pywcs
+            self.log
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            from astropy import log
+            if log.exception_logging_enabled():
+                log.disable_exception_logging()
+            self._log = log
+        return self._log
+
+ap = astropy_imports()
+
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
@@ -52,11 +79,11 @@
 
     def __repr__(self):
         return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-    
+
 class FITSHierarchy(GridIndex):
 
     grid = FITSGrid
-    
+
     def __init__(self,pf,dataset_type='fits'):
         self.dataset_type = dataset_type
         self.field_indexes = {}
@@ -76,10 +103,10 @@
         for h in self._handle[self.parameter_file.first_image:]:
             if h.is_image:
                 self.field_list.append(("fits", h.name.lower()))
-                        
+
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
-                
+
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
@@ -98,12 +125,12 @@
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
-        
+
         self.grid_levels.flat[:] = 0
         self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
-        
+
     def _populate_grid_objects(self):
         for i in xrange(self.num_grids):
             self.grids[i]._prepare_grid()
@@ -112,7 +139,7 @@
 
     def _setup_derived_fields(self):
         super(FITSHierarchy, self)._setup_derived_fields()
-        [self.parameter_file.conversion_factors[field] 
+        [self.parameter_file.conversion_factors[field]
          for field in self.field_list]
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -122,8 +149,8 @@
             f = self.parameter_file.field_info[field]
             if f._function.func_name == "_TranslationFunc":
                 # Translating an already-converted field
-                self.parameter_file.conversion_factors[field] = 1.0 
-                
+                self.parameter_file.conversion_factors[field] = 1.0
+
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
@@ -132,7 +159,7 @@
     _field_info_class = FITSFieldInfo
     _dataset_type = "fits"
     _handle = None
-    
+
     def __init__(self, filename, dataset_type='fits',
                  primary_header = None,
                  sky_conversion = None,
@@ -142,24 +169,24 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, pyfits.HDUList):
+        if isinstance(filename, ap.pyfits.HDUList):
             self._handle = filename
             fname = filename.filename()
         else:
-            self._handle = pyfits.open(filename)
+            self._handle = ap.pyfits.open(filename)
             fname = filename
         for i, h in enumerate(self._handle):
             if h.is_image and h.data is not None:
                 self.first_image = i
                 break
-        
+
         if primary_header is None:
             self.primary_header = self._handle[self.first_image].header
         else:
             self.primary_header = primary_header
         self.shape = self._handle[self.first_image].shape
 
-        self.wcs = pywcs.WCS(header=self.primary_header)
+        self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
@@ -177,10 +204,11 @@
             self.new_unit = self.file_unit
             self.pixel_scale = self.wcs.wcs.cdelt[idx]
 
+        self.refine_by = 2
+
         Dataset.__init__(self, fname, dataset_type)
         self.storage_filename = storage_filename
-            
-        self.refine_by = 2
+
         # For plotting to APLpy
         self.hdu_list = self._handle
 
@@ -198,7 +226,7 @@
         self.length_unit = self.quan(length_factor,length_unit)
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")        
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -216,14 +244,14 @@
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])
-            
+
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5
-            
+
         # Get the simulation time
         try:
             self.current_time = self.parameters["time"]
@@ -231,7 +259,7 @@
             mylog.warning("Cannot find time")
             self.current_time = 0.0
             pass
-        
+
         # For now we'll ignore these
         self.periodicity = (False,)*3
         self.current_redshift = self.omega_lambda = self.omega_matter = \
@@ -242,15 +270,24 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
-            if isinstance(args[0], pyfits.HDUList):
+            if args[0].__class__.__name__ == "HDUList":
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True
         except:
             pass
         try:
-            fileh = pyfits.open(args[0])
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', category=UserWarning, append=True)
+                fileh = ap.pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()

diff -r bcbf96d9504fffc8bd878f82b2f06f9cc174852b -r 4b557418552ddcd5a27098adcab482d10c0c94e4 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -11,10 +11,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-try:
-    import astropy.io.fits as pyfits
-except ImportError:
-    pass
 
 from yt.utilities.math_utils import prec_accum
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/096f25767bee/
Changeset:   096f25767bee
Branch:      yt-3.0
User:        samskillman
Date:        2014-03-31 01:41:22+00:00
Summary:     Disable off-axis-proj for now.
Affected #:  1 file

diff -r 4b557418552ddcd5a27098adcab482d10c0c94e4 -r 096f25767beef16b61184eb9c4ffe7925aba115c yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -142,8 +142,8 @@
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, ProfilePlot, PhasePlot
 
-from yt.visualization.volume_rendering.api import \
-    off_axis_projection
+#from yt.visualization.volume_rendering.api import \
+#    off_axis_projection
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, enable_parallelism


https://bitbucket.org/yt_analysis/yt/commits/8df33c242511/
Changeset:   8df33c242511
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-11 05:34:34+00:00
Summary:     First go at opaque source automatic compositing. Works ok, but very manual right now
Affected #:  7 files

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -47,8 +47,8 @@
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=False)
 
-    def get_sampler_params(self):
-        lens_params = self.lens.get_sampler_params(self)
+    def get_sampler_params(self, render_source):
+        lens_params = self.lens.get_sampler_params(self, render_source)
         lens_params.update(width=self.width)
         return lens_params
 
@@ -207,3 +207,14 @@
             north_vector=np.dot(R, self.unit_vectors[1]))
         if self.steady_north:
             self.north_vector = np.dot(R, self.north_vector)
+
+    def project_to_plane(self, pos, res=None):
+        if res is None:
+            res = self.resolution
+        dx = np.dot(pos - self.position.d, self.unit_vectors[1])
+        dy = np.dot(pos - self.position.d, self.unit_vectors[0])
+        dz = np.dot(pos - self.position.d, self.unit_vectors[2])
+        # Transpose into image coords.
+        py = (res[0]/2 + res[0]*(dx/self.width[0].d)).astype('int')
+        px = (res[1]/2 + res[1]*(dy/self.width[1].d)).astype('int')
+        return px, py, dz

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -81,7 +81,12 @@
         mylog.debug("Entering %s" % str(self))
         super(PlaneParallelLens, self).__init__()
 
-    def get_sampler_params(self, camera):
+    def get_sampler_params(self, camera, render_source):
+        if render_source.zbuffer is not None:
+            image = render_source.zbuffer.rgba
+        else:
+            image = self.new_image(camera)
+
         sampler_params =\
             dict(vp_pos=np.concatenate([camera.inv_mat.ravel('F'),
                                         self.back_center.ravel()]),
@@ -92,7 +97,7 @@
                  x_vec=camera.unit_vectors[0],
                  y_vec=camera.unit_vectors[1],
                  width=np.array(camera.width, dtype='float64'),
-                 image=self.new_image(camera))
+                 image=image)
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -102,6 +107,16 @@
         self.viewpoint = self.front_center + \
             camera.unit_vectors[2] * 1.0e6 * camera.width[2]
 
+    def project_to_plane(self, camera, pos, res=None):
+        if res is None:
+            res = camera.resolution
+        dx = np.dot(pos - self.origin.d, camera.unit_vectors[1])
+        dy = np.dot(pos - self.origin.d, camera.unit_vectors[0])
+        dz = np.dot(pos - self.front_center.d, camera.unit_vectors[2])
+        # Transpose into image coords.
+        py = (res[0]*(dx/camera.width[0].d)).astype('int')
+        px = (res[1]*(dy/camera.width[1].d)).astype('int')
+        return px, py, dz
 
 class PerspectiveLens(Lens):
 
@@ -199,7 +214,7 @@
             info={'imtype': 'rendering'})
         return self.current_image
 
-    def get_sampler_params(self, camera):
+    def get_sampler_params(self, camera, render_source):
         vp = arr_fisheye_vectors(camera.resolution[0], self.fov)
         vp.shape = (camera.resolution[0]**2, 1, 3)
         vp2 = vp.copy()
@@ -211,7 +226,10 @@
         positions = np.ones((camera.resolution[0]**2, 1, 3),
                             dtype='float64') * camera.position
 
-        image = self.new_image(camera)
+        if render_source.zbuffer is not None:
+            image = render_source.zbuffer.rgba
+        else:
+            image = self.new_image(camera)
 
         sampler_params =\
             dict(vp_pos=positions,

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -19,7 +19,9 @@
     ParallelAnalysisInterface
 from yt.utilities.amr_kdtree.api import AMRKDTree
 from transfer_function_helper import TransferFunctionHelper
+from transfer_functions import TransferFunction
 from utils import new_volume_render_sampler, data_source_or_all
+from zbuffer_array import ZBuffer
 
 
 class RenderSource(ParallelAnalysisInterface):
@@ -33,7 +35,7 @@
         self.opaque = False
         self.zbuffer = None
 
-    def render(self, zbuffer=None):
+    def render(self, camera, zbuffer=None):
         pass
 
     def validate(self):
@@ -50,6 +52,10 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
+    def render(self, camera, zbuffer=None):
+        # This is definitely wrong for now
+        return self.zbuffer
+
 
 class VolumeSource(RenderSource):
 
@@ -81,6 +87,15 @@
         self.build_default_volume()
         self.build_default_transfer_function()
 
+    def set_transfer_function(self, transfer_function):
+        """
+        Set transfer function for this source
+        """
+        if not isinstance(transfer_function, TransferFunction):
+            raise RuntimeError("transfer_function not of correct type")
+        self.transfer_function = transfer_function
+        return self
+
     def validate(self):
         """Make sure that all dependencies have been met"""
         if self.data_source is None:
@@ -126,8 +141,9 @@
         self.sampler = sampler
         assert(self.sampler is not None)
 
-    def render(self, camera):
+    def render(self, camera, zbuffer=None):
         camera.lens.set_camera(camera)
+        self.zbuffer = zbuffer
         self.set_sampler(camera)
         assert (self.sampler is not None)
 
@@ -145,6 +161,7 @@
         mylog.debug("Done casting rays")
 
         self.current_image = self.finalize_image(camera, self.sampler.aimage)
+        self.zbuffer = ZBuffer(self.current_image, 0.0*zbuffer.z)
         return self.current_image
 
     def finalize_image(self, camera, image):

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -131,20 +131,25 @@
         return
 
     def composite(self):
+        cam = self.default_camera
         opaque = ZBuffer(
-            np.zeros(self.camera.resolution[0],
-                     self.camera.resolution[1],
-                     4),
-            np.ones(self.camera.resolution) * np.inf)
+            np.zeros([cam.resolution[0],
+                     cam.resolution[1],
+                     4]),
+            np.ones(cam.resolution) * np.inf)
 
         for k, source in self.iter_opaque_sources():
+            print "Adding opaque source:", source
             if source.zbuffer is not None:
                 opaque = opaque + source.zbuffer
 
         for k, source in self.iter_transparent_sources():
-            source.render(zbuffer=opaque)
-            opaque = opaque + source.zbuffer
-        pass
+            print "Adding transparent source:", source
+            print opaque.z.min(), opaque.z.max()
+            print opaque.rgba[:,:,:3].max()
+            im = source.render(cam, zbuffer=opaque)
+            #opaque = opaque + source.zbuffer
+        return im
 
     def set_default_camera(self, camera):
         self.default_camera = camera

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -1,30 +1,59 @@
 from yt.mods import *
 from yt.testing import \
     fake_random_pf
-from yt.visualization.volume_rendering.scene import Scene, RenderScene, \
-    create_volume_rendering
+from yt.visualization.volume_rendering.scene import Scene
 from yt.visualization.volume_rendering.camera import Camera
 from yt.visualization.volume_rendering.zbuffer_array import ZBuffer
 from yt.visualization.volume_rendering.render_source import VolumeSource,\
     OpaqueSource
-
+from yt.utilities.lib.misc_utilities import \
+    lines
+np.random.seed(0)
 
 pf = fake_random_pf(64)
-ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 2)
+ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 3.)
+pf.field_info[pf.field_list[0]].take_log=False
 
 sc = Scene()
 cam = Camera(ds)
-vr = VolumeSource(ds, field=('gas', 'density'))
+sc.set_default_camera(cam)
+vr = VolumeSource(ds, field=pf.field_list[0])
+vr.transfer_function.clear()
+vr.transfer_function.grey_opacity=True
+vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds")
 sc.add_source(vr)
-vr.build_defaults()
 
 op = OpaqueSource()
-op.set_scene(sc)
-empty = 0.0 * vr.new_image()
+empty = 0.0 * sc.default_camera.lens.new_image(cam)
 z = np.ones(empty.shape[:2]) * np.inf
+
 zbuff = ZBuffer(empty, z)
 op.set_zbuffer(zbuff)
 
 sc.add_source(op)
 
-sc.render('test.png')
+cam.set_width(1.0)
+
+# DRAW SOME LINES
+npoints = 100
+vertices = 0.5 * np.random.random([npoints, 3])
+#vertices = np.array([pf.domain_center, [2., 2., 1.]])
+cam.lens.setup_box_properties(cam)
+px, py, dz = cam.lens.project_to_plane(cam, vertices)
+print dz
+colors = np.random.random([npoints, 4])
+colors[:,3] = 1.0 
+lines(empty, px, py, colors, 24)
+#empty[px, py, :] = 1.0
+#z[px, py] = dz
+dummy = -np.ones_like(empty)
+lines(dummy, px, py, np.vstack([dz, dz]), 24)
+print dummy[dummy!=-1]
+z[:,:] = dummy[:,:,3]
+z[z==-1] = np.inf
+print z.min(), z.max()
+
+im = sc.composite()
+im = ImageArray(im.d)
+im.write_png("composite.png")
+#write_bitmap(zbuff.rgba[:, :, :3], 'composite.png')

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -11,7 +11,7 @@
 
 
 def new_volume_render_sampler(camera, render_source):
-    params = camera.get_sampler_params()
+    params = camera.get_sampler_params(render_source)
     params.update(transfer_function=render_source.transfer_function)
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)

diff -r 096f25767beef16b61184eb9c4ffe7925aba115c -r 8df33c24251188120ea456251fbb8c0652b36371 yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -29,7 +29,7 @@
     def __add__(self, other):
         assert(self.shape == other.shape)
         f_or_b = self.z < other.z
-        rgba = self.rgba * f_or_b + other.rgba * (1 - f_or_b)
+        rgba = (self.rgba.T * f_or_b).T + (other.rgba.T * (1 - f_or_b)).T
         z = np.min([self.z, other.z], axis=0)
         return ZBuffer(rgba, z)
 


https://bitbucket.org/yt_analysis/yt/commits/d4a6c2805356/
Changeset:   d4a6c2805356
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-17 20:01:24+00:00
Summary:     Merging from mainline.
Affected #:  149 files

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -34,11 +34,11 @@
  * Do not import "*" from anything other than "yt.funcs".
  * Internally, only import from source files directly -- instead of:
 
-   from yt.visualization.api import PlotCollection
+   from yt.visualization.api import ProjectionPlot
 
    do:
 
-   from yt.visualization.plot_collection import PlotCollection
+   from yt.visualization.plot_window import ProjectionPlot
 
  * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -43,7 +43,7 @@
 To indicate the return type of a given object, you can reference it using this
 construction:
 
-    This function returns a :class:`PlotCollection`.
+    This function returns a :class:`ProjectionPlot`.
 
 To reference a function, you can use:
 
@@ -51,4 +51,4 @@
 
 To reference a method, you can use:
 
-    To add a projection, use :meth:`PlotCollection.add_projection`.
+    To add a projection, use :meth:`ProjectionPlot.set_width`.

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -0,0 +1,401 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 1,
+     "metadata": {},
+     "source": [
+      "Full Halo Analysis"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Creating a Catalog"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.halo_analysis.api import *\n",
+      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "\n",
+      "# Load the data set with the full simulation information\n",
+      "data_pf = load(path+'Enzo_64/RD0006/RedshiftOutput0006')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Load the rockstar data files\n",
+      "halos_pf = load(path+'rockstar_halos/halos_0.0.bin')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Instantiate a catalog using those two paramter files\n",
+      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "                 output_dir = path+'halo_catalog')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "# Filter out less massive halos\n",
+      "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Finding Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n",
+      "hc.add_callback(\"sphere\", factor=2.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n",
+      "hc.add_callback(\"profile\", x_field=\"radius\",\n",
+      "                y_fields=[(\"gas\", \"overdensity\")],\n",
+      "                weight_field=\"cell_volume\", \n",
+      "                accumulation=False,\n",
+      "                storage=\"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Define a virial radius for the halo.\n",
+      "hc.add_callback(\"virial_quantities\", [\"radius\"], \n",
+      "                profile_storage = \"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated the virial radius, we delete the profiles we used to find it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('delete_attribute','virial_quantities_profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('sphere', radius_field='radius_200', factor = 5,\n",
+      "        field_parameters = dict(virial_radius=('quantity','radius_200')))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('profile', 'virial_radius', [('gas','temperature')],\n",
+      "        storage = 'virial_profiles',\n",
+      "        weight_field = 'cell_mass', \n",
+      "        accumulation=False, output_dir='profiles')\n"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Save the profiles\n",
+      "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc.create()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Reloading HaloCatalogs"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halos_pf =  load(path+'halo_catalog/halo_catalog.0.h5')\n",
+      "\n",
+      "hc_reloaded = HaloCatalog(halos_pf=halos_pf, output_dir=path+'halo_catalog')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      " Just as profiles are saved seperately throught the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc_reloaded.add_callback('load_profiles',storage='virial_profiles',\n",
+      "        output_dir='profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Calling `load` is the equivalent of calling `create` earlier, but defaults to to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc_reloaded.load()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Plotting Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halo = hc_reloaded.halo_list[0]\n",
+      "\n",
+      "radius = halo.virial_profiles['virial_radius']\n",
+      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import matplotlib.pyplot as plt\n",
+      "\n",
+      "plt.plot(radius,temperature)\n",
+      "\n",
+      "plt.semilogy()\n",
+      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
+      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "\n",
+      "plt.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -1,14 +1,13 @@
 Halo Analysis
 =============
 
-Halo finding, mass functions, merger trees, and profiling.
+Using halo catalogs, understanding the different halo finding methods,
+and using the halo mass function.
 
 .. toctree::
    :maxdepth: 1
 
-   running_halofinder
+   halo_catalogs
+   halo_finding
    halo_mass_function
-   hmf_howto
-   merger_tree
-   halo_profiling
-   ellipsoid_analysis
+   halo_analysis_example

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -0,0 +1,4 @@
+Using HaloCatalogs to do Analysis
+---------------------------------
+
+.. notebook:: Halo_Analysis.ipynb

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -0,0 +1,221 @@
+
+Creating Halo Catalogs
+======================
+
+In yt 3.0, operations relating to the analysis of halos (halo finding,
+merger tree creation, and individual halo analysis) are all brought 
+together into a single framework. This framework is substantially
+different from the limited framework included in yt-2.x and is only 
+backwards compatible in that output from old halo finders may be loaded.
+
+A catalog of halos can be created from any initial dataset given to halo 
+catalog through data_pf. These halos can be found using friends-of-friends,
+HOP, and Rockstar. The finder_method keyword dictates which halo finder to
+use. The available arguments are 'fof', 'hop', and'rockstar'. For more
+details on the relative differences between these halo finders see 
+:ref:`halo_finding`.
+
+.. code-block:: 
+    from yt.mods import *
+    from yt.analysis_modules.halo_analysis.api import HaloCatalog
+    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+    hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+
+A halo catalog may also be created from already run rockstar outputs. 
+This method is not implemented for previously run friends-of-friends or 
+HOP finders. Even though rockstar creates one file per processor, 
+specifying any one file allows the full catalog to be loaded. Here we 
+only specify the file output by the processor with ID 0. Note that the 
+argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+
+.. code-block:: 
+    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+    hc = HaloCatalog(halos_pf=halos_pf)
+
+Although supplying only the binary output of the rockstar halo finder 
+is sufficient for creating a halo catalog, it is not possible to find 
+any new information about the identified halos. To associate the halos 
+with the dataset from which they were found, supply arguments to both 
+halos_pf and data_pf.
+
+.. code-block::
+    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+    hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+
+A data container can also be supplied via keyword data_source, 
+associated with either dataset, to control the spatial region in 
+which halo analysis will be performed.
+
+Analysis Using Halo Catalogs
+============================
+
+Analysis is done by adding actions to the HaloCatalog. Each action is 
+represented by a callback function that will be run on each halo. 
+There are three types of actions:
+
+    - Filters
+    - Quantities
+    - Callbacks
+
+All interaction with this analysis can be performed by importing from 
+halo_analysis.
+
+Filters
+-------
+
+A filter is a function that returns True or False. If the return value 
+is True, any further queued analysis will proceed and the halo in 
+question will be added to the final catalog. If the return value False, 
+further analysis will not be performed and the halo will not be included 
+in the final catalog.
+
+An example of adding a filter:
+
+.. code-block::
+
+    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+
+Currently quantity_value is the only available filter, but more can be 
+added by the user by defining a function that accepts a halo object as 
+the first argument and then adding it as an available filter. If you 
+think that your filter may be of use to the general community, you can 
+add it to yt/analysis_modules/halo_analysis/halo_filters.py and issue a 
+pull request.
+
+An example of defining your own filter:
+
+.. code-block::
+    def my_filter_function(halo):
+        
+        # Define condition for filter
+        filter_value = True
+        
+        # Return a boolean value 
+        return filter_value
+
+    # Add your filter to the filter registry
+    add_filter("my_filter", my_filter_function)
+
+    # ... Later on in your script
+    hc.add_filter("my_filter")
+
+Quantities
+----------
+
+A quantity is a call back that returns a value or values. The return values 
+are stored within the halo object in a dictionary called “quantities.” At 
+the end of the analysis, all of these quantities will be written to disk as 
+the final form of the generated “halo catalog.”
+
+Quantities may be available in the initial fields found in the halo catalog, 
+or calculated from a function after supplying a definition. An example 
+definition of center of mass is shown below. Currently available quantities 
+are center_of_mass and bulk_velocity. Their definitions are available in 
+yt/analysis_modules/halo_analysis/halo_quantities.py . If you think that 
+your quantity may be of use to the general community, add it to 
+halo_quantities.py and issue a pull request.
+
+An example of adding a quantity:
+
+.. code-block::
+    hc.add_quantity('center_of_mass')
+
+An example of defining your own quantity:
+
+.. code-block::
+
+    def my_quantity_function(halo):
+        # Define quantity to return
+        quantity = 5
+        
+        return quantity
+
+    # Add your filter to the filter registry
+    add_quantity('my_quantity', my_quantity_function)
+
+
+    # ... Later on in your script
+    hc.add_quantity("my_quantity") 
+
+Callbacks
+---------
+
+A callback is actually the super class for quantities and filters and 
+is a general purpose function that does something, anything, to a Halo 
+object. This can include hanging new attributes off the Halo object, 
+performing analysis and writing to disk, etc. A callback does not return 
+anything.
+
+An example of using a pre-defined callback where we create a sphere for 
+each halo with a radius that is twice the saved “radius”.
+
+.. code-block::
+    hc.add_callback("sphere", factor=2.0)
+    
+Currently available callbacks are located in 
+yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
+be added by using the syntax shown below. If you think that your 
+callback may be of use to the general community, add it to 
+halo_callbacks.py and issue a pull request
+
+An example of defining your own callback:
+
+.. code-block::
+
+    def my_callback_function(halo):
+        # Perform some callback actions here
+        x = 2
+        halo.x_val = x
+
+    # Add the callback to the callback registry
+    add_callback('my_callback', my_callback_function)
+
+
+    # ...  Later on in your script
+    hc.add_callback("my_callback")
+
+Running Analysis
+================
+
+After all callbacks, quantities, and filters have been added, the 
+analysis begins with a call to HaloCatalog.create.
+
+.. code-block::
+    hc.create()
+
+The save_halos keyword determines whether the actual Halo objects 
+are saved after analysis on them has completed or whether just the 
+contents of their quantities dicts will be retained for creating the 
+final catalog. The looping over halos uses a call to parallel_objects 
+allowing the user to control how many processors work on each halo. 
+The final catalog is written to disk int the output directory given 
+when the HaloCatalog object was created.
+
+All callbacks, quantities, and filters are stored in an “actions” list, 
+meaning that they are executed in the same order in which they were added. 
+This enables the use of simple, reusable, single action callbacks that 
+depend on each other. This also prevents unecessary computation by allowing 
+the user to add filters at multiple stages to skip remaining analysis if it 
+is not warranted.
+
+Saving and Reloading Halo Catalogs
+==================================
+
+A HaloCatalog saved to disk can be reloaded as yt dataset with the 
+standard call to load. Any side data, such as profiles, can be reloaded 
+with a load_profiles callback and a call to HaloCatalog.load.
+
+.. code-block::
+    hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+    hc = HaloCatalog(halos_pf=hpf,
+                     output_dir="halo_catalogs/catalog_0046")
+    hc.add_callback("load_profiles", output_dir="profiles",
+                    filename="virial_profiles")
+    hc.load()
+
+Summary
+=======
+
+For a full example of how to use these methods together see 
+:ref:`halo_analysis_example`.

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -43,7 +43,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
@@ -56,7 +56,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -19,8 +19,8 @@
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**na.floor(na.log10(data_source[field]).min()  )
-c_max = 10**na.floor(na.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
 from yt.mods import * # set up our namespace
+from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
@@ -18,7 +19,7 @@
 
 slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -31,4 +31,4 @@
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+write_image(np.log10(image), "%s_offaxis_projection.png" % pf)

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -21,13 +21,13 @@
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
-# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+        alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
@@ -40,14 +40,14 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
@@ -55,7 +55,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -12,7 +12,7 @@
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((na.log10(mi)+2.0, na.log10(ma)))
+tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -33,7 +33,7 @@
 # separate columns into separate NumPy arrays, it is essential to set unpack=True.
 
 r, dens, std_dens, temp, std_temp = \
-	na.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
+	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
 
 fig1 = plt.figure()
 

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -21,7 +21,7 @@
 rect = (0.2,0.2,0.2,0.2)
 new_ax = fig.add_axes(rect)
 
-n, bins, patches = new_ax.hist(na.random.randn(1000)+20, 50,
+n, bins, patches = new_ax.hist(np.random.randn(1000)+20, 50,
     facecolor='yellow', edgecolor='yellow')
 new_ax.set_xlabel('Dinosaurs per furlong')
 

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -379,7 +379,7 @@
    something_else``.  Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do ``a =
    a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-   ``na.multiply(a, 3, a)``.
+   ``np.multiply(a, 3, a)``.
  * In general, avoid all double-underscore method names: ``__something`` is
    usually unnecessary.
  * Doc strings should describe input, output, behavior, and any state changes

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
 computation engine, Matplotlib for some visualization tasks and Mercurial for
 version control.  Because installation of all of these interlocking parts can 
 be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.  
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
 other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
 supported.
@@ -86,16 +86,41 @@
 Alternative Installation Methods
 --------------------------------
 
-If you want to forego the use of the install script, you need to make sure 
-you have yt's dependencies installed on your system.  These include: a C compiler, 
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and 
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system.  These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
 
 .. code-block:: bash
 
   $ pip install yt
 
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+  $ echo '/usr/local' > hdf5.cfg
+  $ export FTYPE_DIR=/opt/freetype
+
 If you choose this installation method, you do not need to run the activation
 script as it is unnecessary.
 

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -15,18 +15,6 @@
    ~yt.visualization.plot_window.ProjectionPlot
    ~yt.visualization.plot_window.OffAxisProjectionPlot
 
-PlotCollection
-^^^^^^^^^^^^^^
-
-.. autosummary::
-   :toctree: generated/
-
-   ~yt.visualization.plot_collection.PlotCollection
-   ~yt.visualization.plot_collection.PlotCollectionInteractive
-   ~yt.visualization.fixed_resolution.FixedResolutionBuffer
-   ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
-   ~yt.visualization.base_plot_types.get_multi_plot
-
 Data Sources
 ------------
 
@@ -721,8 +709,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:d75e416150ccb017cfdf89973f8d4463e780da4d9bdc9a3783001d22021d9081"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -154,7 +155,7 @@
       "Npixels = 512 \n",
       "cam = pf.h.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
       "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False, l_max=0)\n",
+      "                  sub_samples=5, no_ghost=False)\n",
       "\n",
       "# Here we substitute the TransferFunction we constructed earlier.\n",
       "cam.transfer_function = tfh.tf\n",

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -32,8 +32,8 @@
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
                            (8., 'kpc'), (1., 'kpc'))
 
-   c_min = 10**na.floor(na.log10(data_source['density']).min()  )
-   c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
+   c_min = 10**np.floor(np.log10(data_source['density']).min()  )
+   c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
    function = 'self.data[\'Density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -254,7 +254,7 @@
    c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
-   write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+   write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
 directions.

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -119,6 +119,9 @@
     ImageArray, particle_filter, create_profile, \
     Profile1D, Profile2D, Profile3D
 
+# For backwards compatibility
+TimeSeriesData = deprecated_class(DatasetSeries)
+
 from yt.frontends.api import _frontend_container
 frontends = _frontend_container()
 
@@ -134,8 +137,7 @@
 
 # Now individual component imports from the visualization API
 from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
@@ -143,7 +145,8 @@
     show_colormaps, ProfilePlot, PhasePlot
 
 #from yt.visualization.volume_rendering.api import \
-#    off_axis_projection
+#    off_axis_projection, ColorTransferFunction, \
+#    TransferFunctionHelper
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, enable_parallelism

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r 8df33c24251188120ea456251fbb8c0652b36371 -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,12 +63,6 @@
     HaloProfiler, \
     FakeProfile
 
-from .hierarchy_subset.api import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile
-
 from .level_sets.api import \
     identify_contours, \
     Clump, \

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/88ebca8c09cd/
Changeset:   88ebca8c09cd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-17 20:07:01+00:00
Summary:     Switch out set_width.
Affected #:  2 files

diff -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 -r 88ebca8c09cd951f22bfef04be45d1562a955cb2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -88,7 +88,7 @@
 
     def set_width(self, width):
         if not iterable(width):
-            width = YTArray([width, width, width], input_units="code_length")
+            width = [width, width, width] # No way to get code units.
         self.width = width
         self.switch_orientation()
 

diff -r d4a6c2805356ffbd192cbb3bb4d3cf44005be6d4 -r 88ebca8c09cd951f22bfef04be45d1562a955cb2 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -32,7 +32,7 @@
 
 sc.add_source(op)
 
-cam.set_width(1.0)
+cam.set_width( pf.domain_width )
 
 # DRAW SOME LINES
 npoints = 100


https://bitbucket.org/yt_analysis/yt/commits/a6ac57b7a424/
Changeset:   a6ac57b7a424
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-17 22:18:37+00:00
Summary:     A few fixes for lines to make things clearer.
Affected #:  1 file

diff -r 88ebca8c09cd951f22bfef04be45d1562a955cb2 -r a6ac57b7a42460943f9827ba4e967e9614b88fdc yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 cimport cython
 cimport libc.math as math
+from libc.math cimport abs
 from fp_utils cimport fmin, fmax
 
 cdef extern from "stdlib.h":
@@ -229,7 +230,10 @@
     cdef int has_alpha = (image.shape[2] == 4)
     for j in range(0, nl, 2):
         # From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
-        x0 = xs[j]; y0 = ys[j]; x1 = xs[j+1]; y1 = ys[j+1]
+        x0 = xs[j] 
+        y0 = ys[j]
+        x1 = xs[j+1]
+        y1 = ys[j+1]
         dx = abs(x1-x0)
         dy = abs(y1-y0)
         err = dx - dy


https://bitbucket.org/yt_analysis/yt/commits/b97de4cd8760/
Changeset:   b97de4cd8760
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 14:57:23+00:00
Summary:     MErging from upstream.
Affected #:  75 files

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -660,7 +660,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_passthrough
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_root_only
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_simple_proxy
-   ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_splitter
 
 Math Utilities
 --------------

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -104,42 +104,31 @@
 
 -------------
 
-.. function:: annotate_hop_circles(self, hop_output, max_number=None, annotate=False, min_size=20, max_size=10000000, font_size=8, print_halo_size=False, print_halo_mass=False, width=None):
+.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1, width= None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopCircleCallback`.)
+   (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
-   Accepts a :class:`yt.HopList` *hop_output* and plots up
-   to *max_number* (None for unlimited) halos as circles.
+   Accepts a :class:`yt.HaloCatalog` *HaloCatalog* and plots 
+   a circle at the location of each halo with the radius of
+   the circle corresponding to the virial radius of the halo.
+   If *width* is set to None (default) all halos are plotted.
+   Otherwise, only halos that fall within a slab with width
+   *width* centered on the center of the plot data. The 
+   color and transparency of the circles can be controlled with
+   *col* and *alpha* respectively.
 
 .. python-script::
+   
+   from yt.mods import *
+   data_pf = load('Enzo_64/RD0006/RD0006')
+   halos_pf = load('rockstar_halos/halos_0.0.bin')
 
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "z", "density")
-   p.annotate_hop_circles(halos)
-   p.save()
+   hc = HaloCatalog(halos_pf=halos_pf)
+   hc.create()
 
--------------
-
-.. function:: annotate_hop_particles(self, hop_output, max_number, p_size=1.0, min_size=20, alpha=0.2):
-
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopParticleCallback`.)
-
-   Adds particle positions for the members of each halo as
-   identified by HOP. Along *axis* up to *max_number* groups
-   in *hop_output* that are larger than *min_size* are
-   plotted with *p_size* pixels per particle;  *alpha*
-   determines the opacity of each particle.
-
-.. python-script::
-
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
-   p.annotate_hop_particles(halos, max_number=100, p_size=5.0)
-   p.save()
+   prj = ProjectionPlot(data_pf, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
 
 -------------
 

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,6 +154,9 @@
 from yt.convenience import \
     load, simulation
 
+from yt.testing import \
+    run_nose
+
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/api.py
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -20,6 +20,9 @@
 from .halo_callbacks import \
      add_callback
 
+from .halo_finding_methods import \
+     add_finding_method
+
 from .halo_filters import \
      add_filter
      

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/finding_methods.py
--- a/yt/analysis_modules/halo_analysis/finding_methods.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .operator_registry import \
-    hf_registry
-
-class HaloFindingMethod(object):
-    pass

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -30,20 +30,9 @@
 from .operator_registry import \
      callback_registry, \
      filter_registry, \
-     hf_registry, \
+     finding_method_registry, \
      quantity_registry
 
-from yt.analysis_modules.halo_finding.halo_objects import \
-    FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
-    HaloCatalogDataset
-from yt.frontends.stream.data_structures import \
-    load_particles
-from yt.frontends.halo_catalogs.rockstar.data_structures import \
-    RockstarDataset
-from yt.analysis_modules.halo_finding.rockstar.api import \
-    RockstarHaloFinder
-
 class HaloCatalog(ParallelAnalysisInterface):
     r"""Create a HaloCatalog: an object that allows for the creation and association
     of data with a set of halo objects.
@@ -103,7 +92,7 @@
 
     See Also
     --------
-    add_callback, add_filter, add_quantity
+    add_callback, add_filter, add_finding_method, add_quantity
     
     """
     
@@ -113,7 +102,6 @@
         ParallelAnalysisInterface.__init__(self)
         self.halos_pf = halos_pf
         self.data_pf = data_pf
-        self.finder_method = finder_method
         self.output_dir = ensure_dir(output_dir)
         if os.path.basename(self.output_dir) != ".":
             self.output_prefix = os.path.basename(self.output_dir)
@@ -133,6 +121,10 @@
                 data_source = data_pf.h.all_data()
         self.data_source = data_source
 
+        if finder_method is not None:
+            finder_method = finding_method_registry.find(finder_method)
+        self.finder_method = finder_method            
+        
         # all of the analysis actions to be performed: callbacks, filters, and quantities
         self.actions = []
         # fields to be written to the halo catalog
@@ -358,16 +350,22 @@
 
         if self.halos_pf is None:
             # Find the halos and make a dataset of them
-            particles_pf = self.find_halos()
+            self.halos_pf = self.finder_method(self.data_pf)
+            if self.halos_pf is None:
+                mylog.warning('No halos were found for {0}'.format(\
+                        self.data_pf.basename))
+                if save_catalog:
+                    self.halos_pf = self.data_pf
+                    self.save_catalog()
+                    self.halos_pf = None
+                return
 
             # Assign pf and data sources appropriately
-            self.halos_pf = particles_pf
-            self.data_source = particles_pf.all_data()
+            self.data_source = self.halos_pf.all_data()
 
             # Add all of the default quantities that all halos must have
             self.add_default_quantities('all')
 
-
         my_index = np.argsort(self.data_source["particle_identifier"])
         for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
             new_halo = Halo(self)
@@ -400,80 +398,6 @@
         if save_catalog:
             self.save_catalog()
 
-    def find_halos(self):
-
-        finder_method = (self.finder_method).lower()
-
-        if finder_method == "hop":
-            halo_list = HOPHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-
-        elif finder_method == "fof":
-            halo_list = FOFHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-            
-        elif finder_method == 'rockstar':
-            rh = RockstarHaloFinder(self.data_pf, 
-                outbase='{0}/rockstar_halos'.format(self.output_prefix))
-            rh.run()
-            halos_pf = RockstarDataset('{0}/rockstar_halos/halos_0.0.bin'.format(self.output_prefix))
-            halos_pf.create_field_info()
-        else:
-            raise RuntimeError("finder_method must be 'fof', 'hop', or 'rockstar'")
-
-        for attr in ["current_redshift", "current_time",
-                     "domain_dimensions",
-                     "cosmological_simulation", "omega_lambda",
-                     "omega_matter", "hubble_constant"]:
-            attr_val = getattr(self.data_pf, attr)
-            setattr(halos_pf, attr, attr_val)
-        halos_pf.current_time = halos_pf.current_time.in_cgs()
-
-        return halos_pf
-
-    def _parse_old_halo_list(self, halo_list):
-
-
-        data_pf = self.data_pf
-        num_halos = len(halo_list)
-
-        # Set up fields that we want to pull from identified halos and their units
-        new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
-            'particle_position_y','particle_position_z',
-            'virial_radius']
-        new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
-        # Set up a dictionary based on those fields 
-        # with empty arrays where we will fill in their values
-        halo_properties = { f : (np.zeros(num_halos),unit) \
-            for f, unit in zip(new_fields,new_units)}
-
-        # Iterate through the halos pulling out their positions and virial quantities
-        # and filling in the properties dictionary
-        for i,halo in enumerate(halo_list):
-            halo_properties['particle_identifier'][0][i] = i
-            halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
-            halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
-            com = halo.center_of_mass().in_cgs()
-            halo_properties['particle_position_x'][0][i] = com[0]
-            halo_properties['particle_position_y'][0][i] = com[1]
-            halo_properties['particle_position_z'][0][i] = com[2]
-
-        # Define a bounding box based on original data pf
-        bbox = np.array([data_pf.domain_left_edge.in_cgs(),
-                data_pf.domain_right_edge.in_cgs()]).T
-
-        # Create a pf with the halos as particles
-        particle_pf = load_particles(halo_properties, 
-                bbox=bbox, length_unit = 1, mass_unit=1)
-
-        # Create the field info dictionary so we can reference those fields
-        particle_pf.create_field_info()
-
-        return particle_pf
-
-
     def save_catalog(self):
         "Write out hdf5 file with all halo quantities."
 
@@ -513,4 +437,3 @@
         self.add_quantity("particle_position_z", field_type=field_type)
         self.add_quantity("virial_radius", field_type=field_type)
 
-

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -13,6 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
+from yt.utilities.spatial import KDTree
+
 from .halo_callbacks import HaloCallback
 from .operator_registry import filter_registry
 
@@ -58,3 +62,44 @@
     return eval("%s %s %s" % (h_value, operator, value))
 
 add_filter("quantity_value", quantity_value)
+
+def _not_subhalo(halo, field_type="halos"):
+    """
+    Only return true if this halo is not a subhalo.
+    
+    This is used for halo finders such as Rockstar that output parent
+    and subhalos together.
+    """
+
+    if not hasattr(halo.halo_catalog, "parent_dict"):
+        halo.halo_catalog.parent_dict = \
+          create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
+    return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
+add_filter("not_subhalo", _not_subhalo)
+
+def create_parent_dict(data_source, ptype="halos"):
+    """
+    Create a dictionary of halo parents to allow for filtering of subhalos.
+
+    For a pair of halos whose distance is smaller than the radius of at least 
+    one of the halos, the parent is defined as the halo with the larger radius.
+    Parent halos (halos with no parents of their own) have parent index values of -1.
+    """
+    pos = np.rollaxis(
+        np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_y"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
+    rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
+    ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
+    parents = -1 * np.ones_like(ids, dtype="int")
+    my_tree = KDTree(pos)
+
+    for i in xrange(ids.size):
+        neighbors = np.array(
+            my_tree.query_ball_point(pos[i], rad[i], p=2))
+        if neighbors.size > 1:
+            parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
+
+    parents[ids == parents] = -1
+    parent_dict = dict(zip(ids, parents))
+    return parent_dict

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- /dev/null
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -0,0 +1,141 @@
+"""
+Halo Finding methods
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.halo_finding.halo_objects import \
+    FOFHaloFinder, HOPHaloFinder
+from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+    HaloCatalogDataset
+from yt.frontends.stream.data_structures import \
+    load_particles
+
+from .operator_registry import \
+    finding_method_registry
+
+
+def add_finding_method(name, function):
+    finding_method_registry[name] = HaloFindingMethod(function)
+    
+class HaloFindingMethod(object):
+    r"""
+    A halo finding method is a callback that performs halo finding on a 
+    dataset and returns a new dataset that is the loaded halo finder output.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, ds):
+        return self.function(ds, *self.args, **self.kwargs)
+
+def _hop_method(pf):
+    r"""
+    Run the Hop halo finding method.
+    """
+    
+    halo_list = HOPHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("hop", _hop_method)
+
+def _fof_method(pf):
+    r"""
+    Run the FoF halo finding method.
+    """
+
+    halo_list = FOFHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("fof", _fof_method)
+
+def _rockstar_method(pf):
+    r"""
+    Run the Rockstar halo finding method.
+    """
+
+    from yt.frontends.halo_catalogs.rockstar.data_structures import \
+     RockstarDataset
+    from yt.analysis_modules.halo_finding.rockstar.api import \
+     RockstarHaloFinder
+    
+    rh = RockstarHaloFinder(pf)
+    rh.run()
+
+
+    halos_pf = RockstarDataset("rockstar_halos/halos_0.0.bin")
+    try:
+        halos_pf.create_field_info()
+    except ValueError:
+        return None
+
+    return halos_pf
+add_finding_method("rockstar", _rockstar_method)
+
+def _parse_old_halo_list(data_pf, halo_list):
+    r"""
+    Convert the halo list into a loaded dataset.
+    """
+
+    num_halos = len(halo_list)
+
+    if num_halos == 0: return None
+
+    # Set up fields that we want to pull from identified halos and their units
+    new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
+        'particle_position_y','particle_position_z',
+        'virial_radius']
+    new_units = [ '', 'g', 'cm', 'cm','cm','cm']
+
+    # Set up a dictionary based on those fields 
+    # with empty arrays where we will fill in their values
+    halo_properties = { f : (np.zeros(num_halos),unit) \
+        for f, unit in zip(new_fields,new_units)}
+
+    # Iterate through the halos pulling out their positions and virial quantities
+    # and filling in the properties dictionary
+    for i,halo in enumerate(halo_list):
+        halo_properties['particle_identifier'][0][i] = i
+        halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
+        halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
+
+        com = halo.center_of_mass().in_cgs()
+        halo_properties['particle_position_x'][0][i] = com[0]
+        halo_properties['particle_position_y'][0][i] = com[1]
+        halo_properties['particle_position_z'][0][i] = com[2]
+
+    # Define a bounding box based on original data pf
+    bbox = np.array([data_pf.domain_left_edge.in_cgs(),
+            data_pf.domain_right_edge.in_cgs()]).T
+
+    # Create a pf with the halos as particles
+    particle_pf = load_particles(halo_properties, 
+            bbox=bbox, length_unit = 1, mass_unit=1)
+
+    # Create the field info dictionary so we can reference those fields
+    particle_pf.create_field_info()
+
+    for attr in ["current_redshift", "current_time",
+                 "domain_dimensions",
+                 "cosmological_simulation", "omega_lambda",
+                 "omega_matter", "hubble_constant"]:
+        attr_val = getattr(data_pf, attr)
+        setattr(particle_pf, attr, attr_val)
+    particle_pf.current_time = particle_pf.current_time.in_cgs()
+    
+    return particle_pf

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ b/yt/analysis_modules/halo_analysis/operator_registry.py
@@ -27,5 +27,5 @@
 
 callback_registry = OperatorRegistry()
 filter_registry = OperatorRegistry()
-hf_registry = OperatorRegistry()
+finding_method_registry = OperatorRegistry()
 quantity_registry = OperatorRegistry()

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -110,7 +110,9 @@
         if self._name == "RockstarHalo":
             ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
         elif self._name == "LoadedHalo":
-            ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
+            ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
         sp_pid = ds['particle_index']
         self._ds_sort = sp_pid.argsort()
         sp_pid = sp_pid[self._ds_sort]
@@ -217,7 +219,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return np.array([vx, vy, vz]) / pm.sum()
+        return self.pf.arr([vx, vy, vz], vx.units) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -331,9 +333,11 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
+                   + ["particle_index"]:
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
-        if 'creation_time' in self.data.pf.field_list:
+	handle.create_dataset("/%s/particle_mass" % gn,
+		data=self["particle_mass"].in_units('Msun'))
+        if ('io','creation_time') in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
                 data=self['creation_time'])
         n = handle["/%s" % gn]
@@ -848,6 +852,7 @@
         self._saved_fields = {}
         self._ds_sort = None
         self._particle_mask = None
+	self._pid_sort = None
 
 
     def __getitem__(self, key):
@@ -865,14 +870,28 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                field_data = field_data[field_data.argsort()]
+                #this is an index for turning data sorted by particle index 
+		#into the same order as the fields on disk
+		self._pid_sort = field_data.argsort().argsort()
+	    #convert to YTArray using the data from disk
+	    if key == 'particle_mass':
+		field_data = self.pf.arr(field_data, 'Msun')
+	    else:
+	        field_data = self.pf.arr(field_data, 
+		    self.pf._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
         # We won't store this field below in saved_fields because
         # that would mean keeping two copies of it, one in the yt
         # machinery and one here.
-        ds = self.pf.sphere(self.CoM, 1.05 * self.max_radius)
-        return np.take(ds[key][self._ds_sort], self.particle_mask)
+        ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
+	# If particle_mask hasn't been called once then _ds_sort won't have
+	# the proper values set yet
+        if self._particle_mask is None:
+	    self.particle_mask
+        return ds[key][self._ds_sort][self.particle_mask][self._pid_sort]
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1087,10 +1106,10 @@
         gc.collect()
 
     def _get_dm_indices(self):
-        if 'creation_time' in self._data_source.index.field_list:
+        if ('io','creation_time') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on creation time")
             return (self._data_source["creation_time"] <= 0)
-        elif 'particle_type' in self._data_source.index.field_list:
+        elif ('io','particle_type') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
         else:
@@ -2141,7 +2160,7 @@
         elif fancy_padding and self._distributed:
             LE_padding = np.empty(3, dtype='float64')
             RE_padding = np.empty(3, dtype='float64')
-            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            avg_spacing = (vol / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
             for dim in xrange(3):
@@ -2388,7 +2407,7 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2412,7 +2431,7 @@
             sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
+                self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -53,7 +53,7 @@
         self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
         self.index = particle_fields.pop("particle_index")
-        self.mass = particle_fields.pop("ParticleMassMsun")
+        self.mass = particle_fields.pop("particle_mass")
         self.padded_particles = []
         self.nMerge = 4
         self.tree = tree

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -220,12 +220,11 @@
         cdef np.int64_t last_fof_tag = 1
         cdef np.int64_t k = 0
         for i in range(num_particles):
-            if fof_tags[i] == 0:
+            if fof_tags[i] < 0:
                 continue
             if fof_tags[i] != last_fof_tag:
                 last_fof_tag = fof_tags[i]
                 if k > 16:
-                    print "Finding subs", k, i
                     fof_obj.num_p = k
                     find_subs(&fof_obj)
                 k = 0

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -22,7 +22,6 @@
 from yt.fields.local_fields import add_field, derived_field
 from yt.data_objects.image_array import ImageArray
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.utilities.definitions import inv_axis_names
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -134,7 +133,7 @@
         --------
         >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
         """
-        axis = fix_axis(axis)
+        axis = fix_axis(axis, self.pf)
 
         if center == "c":
             ctr = self.pf.domain_center

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -24,6 +24,7 @@
     logfile = 'False',
     coloredlogs = 'False',
     suppressstreamlogging = 'False',
+    StdoutStreamLogging = 'False',
     loglevel = '20',
     inline = 'False',
     numthreads = '-1',
@@ -53,6 +54,7 @@
     answer_testing_bitwise = 'False',
     gold_standard_filename = 'gold311',
     local_standard_filename = 'local001',
+    answer_tests_url = 'http://answers.yt-project.org/%s_%s',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False'

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -41,7 +41,6 @@
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.data_point_utilities import CombineGrids,\
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
-from yt.utilities.definitions import axis_names, x_dict, y_dict
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -182,25 +181,12 @@
         weight value before being integrated, and at the conclusion of the
         projection the resultant values will be divided by the projected
         `weight_field`.
-    max_level : int
-        If supplied, only cells at or below this level will be projected.
     center : array_like, optional
         The 'center' supplied to fields that use it.  Note that this does
         not have to have `coord` as one value.  Strictly optional.
     data_source : `yt.data_objects.api.AMRData`, optional
         If specified, this will be the data source used for selecting
         regions to project.
-    node_name: string, optional
-        The node in the .yt file to find or store this slice at.  Should
-        probably not be used.
-    field_cuts : list of strings, optional
-        If supplied, each of these strings will be evaluated to cut a
-        region of a grid out.  They can be of the form "grid['Temperature']
-        > 100" for instance.
-    preload_style : string
-        Either 'level', 'all', or None (default).  Defines how grids are
-        loaded -- either level by level, or all at once.  Only applicable
-        during parallel runs.
     serialize : bool, optional
         Whether we should store this projection in the .yt file or not.
     kwargs : dict of items
@@ -219,7 +205,7 @@
     _con_args = ('axis', 'field', 'weight_field')
     _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
-                 center = None, pf = None, data_source=None, 
+                 center = None, pf = None, data_source = None,
                  style = "integrate", field_parameters = None):
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
         self.proj_style = style
@@ -252,8 +238,8 @@
         self._mrep.upload()
 
     def _get_tree(self, nvals):
-        xax = x_dict[self.axis]
-        yax = y_dict[self.axis]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         xd = self.pf.domain_dimensions[xax]
         yd = self.pf.domain_dimensions[yax]
         bounds = (self.pf.domain_left_edge[xax],
@@ -292,18 +278,20 @@
         else:
             raise NotImplementedError
         # TODO: Add the combine operation
-        ox = self.pf.domain_left_edge[x_dict[self.axis]]
-        oy = self.pf.domain_left_edge[y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        ox = self.pf.domain_left_edge[xax]
+        oy = self.pf.domain_left_edge[yax]
         px, py, pdx, pdy, nvals, nwvals = tree.get_all(False, merge_style)
         nvals = self.comm.mpi_allreduce(nvals, op=op)
         nwvals = self.comm.mpi_allreduce(nwvals, op=op)
-        np.multiply(px, self.pf.domain_width[x_dict[self.axis]], px)
+        np.multiply(px, self.pf.domain_width[xax], px)
         np.add(px, ox, px)
-        np.multiply(pdx, self.pf.domain_width[x_dict[self.axis]], pdx)
+        np.multiply(pdx, self.pf.domain_width[xax], pdx)
 
-        np.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
+        np.multiply(py, self.pf.domain_width[yax], py)
         np.add(py, oy, py)
-        np.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
+        np.multiply(pdy, self.pf.domain_width[yax], pdy)
         if self.weight_field is not None:
             np.divide(nvals, nwvals[:,None], nvals)
         # We now convert to half-widths and center-points
@@ -348,8 +336,10 @@
 
     def _initialize_chunk(self, chunk, tree):
         icoords = chunk.icoords
-        i1 = icoords[:,x_dict[self.axis]]
-        i2 = icoords[:,y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        i1 = icoords[:,xax]
+        i2 = icoords[:,yax]
         ilevel = chunk.ires * self.pf.ires_factor
         tree.initialize_chunk(i1, i2, ilevel)
 
@@ -370,8 +360,10 @@
         else:
             w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords
-        i1 = icoords[:,x_dict[self.axis]]
-        i2 = icoords[:,y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        i1 = icoords[:,xax]
+        i2 = icoords[:,yax]
         ilevel = chunk.ires * self.pf.ires_factor
         tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
 
@@ -478,9 +470,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        self._data_source = self.pf.region(self.center,
-            self.left_edge - self.base_dds,
-            self.right_edge + self.base_dds)
+        LE = self.left_edge - self.base_dds
+        RE = self.right_edge + self.base_dds
+        if not all(self.pf.periodicity):
+            for i in range(3):
+                if self.pf.periodicity[i]: continue
+                LE[i] = max(LE[i], self.pf.domain_left_edge[i])
+                RE[i] = min(RE[i], self.pf.domain_right_edge[i])
+        self._data_source = self.pf.region(self.center, LE, RE)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
         self._pdata_source = self.pf.region(self.center,
@@ -592,7 +589,7 @@
     ----------
     left_edge : array_like
         The left edge of the region to be extracted
-    rigth_edge : array_like
+    right_edge : array_like
         The left edge of the region to be extracted
     dims : array_like
         Number of cells along each axis of resulting grid.

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -28,7 +28,6 @@
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.definitions import  x_dict, y_dict
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.parameter_file_storage import \
@@ -726,9 +725,10 @@
     _spatial = False
     def __init__(self, axis, pf, field_parameters):
         ParallelAnalysisInterface.__init__(self)
-        self.axis = fix_axis(axis)
         super(YTSelectionContainer2D, self).__init__(
             pf, field_parameters)
+        # We need the pf, which will exist by now, for fix_axis.
+        self.axis = fix_axis(axis, self.pf)
         self.set_field_parameter("axis", axis)
 
     def _convert_field_name(self, field):
@@ -821,8 +821,8 @@
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
-        xax = x_dict[self.axis]
-        yax = y_dict[self.axis]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
                   center[yax] - height*0.5, center[yax] + height*0.5)
         frb = FixedResolutionBuffer(self, bounds, resolution,

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -20,13 +20,11 @@
 import numpy as np
 
 from yt.funcs import *
-from yt.utilities.definitions import x_dict, y_dict
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
     YTDataContainer, \
     YTSelectionContainer
-from yt.utilities.definitions import x_dict, y_dict
 from yt.fields.field_exceptions import \
     NeedsGridType, \
     NeedsOriginalGrid, \
@@ -379,9 +377,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -249,11 +249,11 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask
 
 class ParticleOctreeSubset(OctreeSubset):

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -25,8 +25,6 @@
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
 from yt.data_objects.derived_quantities import \
     DerivedQuantityCollection
-from yt.utilities.definitions import \
-    x_dict, y_dict, axis_names
 from yt.utilities.exceptions import YTSphereTooSmall
 from yt.utilities.linear_interpolators import TrilinearFieldInterpolator
 from yt.utilities.minimal_representation import \
@@ -56,9 +54,6 @@
     fields : list of strings, optional
         If you want the object to pre-retrieve a set of fields, supply them
         here.  This is not necessary.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -73,12 +68,15 @@
     def __init__(self, axis, coords, pf=None, field_parameters=None):
         super(YTOrthoRayBase, self).__init__(pf, field_parameters)
         self.axis = axis
-        self.px_ax = x_dict[self.axis]
-        self.py_ax = y_dict[self.axis]
-        self.px_dx = 'd%s'%(axis_names[self.px_ax])
-        self.py_dx = 'd%s'%(axis_names[self.py_ax])
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        self.px_ax = xax
+        self.py_ax = yax
+        # Even though we may not be using x,y,z we use them here.
+        self.px_dx = 'd%s'%('xyz'[self.px_ax])
+        self.py_dx = 'd%s'%('xyz'[self.py_ax])
         self.px, self.py = coords
-        self.sort_by = axis_names[self.axis]
+        self.sort_by = 'xyz'[self.axis]
 
     @property
     def coords(self):
@@ -105,9 +103,6 @@
     fields : list of strings, optional
         If you want the object to pre-retrieve a set of fields, supply them
         here.  This is not necessary.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -168,9 +163,6 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -191,16 +183,18 @@
         self.coord = coord
 
     def _generate_container_field(self, field):
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         if self._current_chunk is None:
             self.index._identify_base_chunk(self)
         if field == "px":
-            return self._current_chunk.fcoords[:,x_dict[self.axis]]
+            return self._current_chunk.fcoords[:,xax]
         elif field == "py":
-            return self._current_chunk.fcoords[:,y_dict[self.axis]]
+            return self._current_chunk.fcoords[:,yax]
         elif field == "pdx":
-            return self._current_chunk.fwidth[:,x_dict[self.axis]] * 0.5
+            return self._current_chunk.fwidth[:,xax] * 0.5
         elif field == "pdy":
-            return self._current_chunk.fwidth[:,y_dict[self.axis]] * 0.5
+            return self._current_chunk.fwidth[:,yax] * 0.5
         else:
             raise KeyError(field)
 
@@ -247,9 +241,6 @@
     node_name: string, optional
         The node in the .yt file to find or store this slice at.  Should
         probably not be used.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Notes
     -----

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -574,6 +574,8 @@
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
         self.unit_registry.add("code_velocity", 1.0, dimensions.velocity)
+        self.unit_registry.add("code_metallicity", 1.0,
+                               dimensions.dimensionless)
 
     def set_units(self):
         """
@@ -624,16 +626,14 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        vel_unit = getattr(self, "code_velocity",
+        vel_unit = getattr(self, "velocity_unit",
                     self.length_unit / self.time_unit)
         self.unit_registry.modify("code_velocity", vel_unit)
         # domain_width does not yet exist
-        if self.domain_left_edge is None or self.domain_right_edge is None:
-            DW = np.zeros(3)
-        else:
+        if None not in (self.domain_left_edge, self.domain_right_edge):
             DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
-                               DW.units.dimensions)
+            self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                                   DW.units.dimensions)
 
     _arr = None
     @property

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/tests/test_chunking.py
--- a/yt/data_objects/tests/test_chunking.py
+++ b/yt/data_objects/tests/test_chunking.py
@@ -3,7 +3,7 @@
 def _get_dobjs(c):
     dobjs = [("sphere", ("center", (1.0, "unitary"))),
              ("sphere", ("center", (0.1, "unitary"))),
-             ("ortho_ray", (0, (c[x_dict[0]], c[y_dict[0]]))),
+             ("ortho_ray", (0, (c[1], c[2]))),
              ("slice", (0, c[0])),
              #("disk", ("center", [0.1, 0.3, 0.6],
              #           (0.2, 'unitary'), (0.1, 'unitary'))),

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -29,8 +29,8 @@
         uc = [np.unique(c) for c in coords]
         # Some simple projection tests with single grids
         for ax, an in enumerate("xyz"):
-            xax = x_dict[ax]
-            yax = y_dict[ax]
+            xax = pf.coordinates.x_axis[ax]
+            yax = pf.coordinates.y_axis[ax]
             for wf in ["density", None]:
                 fns = []
                 proj = pf.proj(["ones", "density"], ax, weight_field = wf)

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -17,8 +17,6 @@
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal, YTArray
-from yt.utilities.definitions import \
-    x_dict, y_dict
 from yt.utilities.exceptions import \
     YTNoDataInObjectError
 from yt.units.unit_object import Unit
@@ -50,8 +48,8 @@
         slc_pos = 0.5
         # Some simple slice tests with single grids
         for ax, an in enumerate("xyz"):
-            xax = x_dict[ax]
-            yax = y_dict[ax]
+            xax = pf.coordinates.x_axis[ax]
+            yax = pf.coordinates.y_axis[ax]
             for wf in ["density", None]:
                 fns = []
                 slc = pf.slice(ax, slc_pos)

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -171,9 +171,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -25,11 +25,13 @@
 from yt.funcs import *
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
+from .field_plugin_registry import \
+    register_field_plugin
 
 _primordial_mass_fraction = \
   {"H": primordial_H_mass_fraction,
    "He" : (1 - primordial_H_mass_fraction)}
-    
+
 # See YTEP-0003 for details, but we want to ensure these fields are all
 # populated:
 #
@@ -159,3 +161,20 @@
     if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
         return 1
     return int(my_split[loc + 1])
+
+ at register_field_plugin
+def setup_species_fields(registry, ftype = "gas", slice_info = None):
+    # We have to check what type of field this is -- if it's particles, then we
+    # set particle_type to True.
+    particle_type = ftype not in registry.pf.fluid_types
+    for species in registry.species_names:
+        # These are all the species we should be looking for fractions or
+        # densities of.
+        if (ftype, "%s_density" % species) in registry:
+            func = add_species_field_by_density
+        elif (ftype, "%s_fraction" % species) in registry:
+            func = add_species_field_by_fraction
+        else:
+            # Skip it
+            continue
+        func(registry, ftype, species, particle_type)

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -20,7 +20,7 @@
 
 from .fields import \
       SkeletonFieldInfo, \
-      add_flash_field
+      add_skeleton_field
 
 from .io import \
       IOHandlerSkeleton

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,36 +13,30 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import stat
 import numpy as np
-import weakref
 
-from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
-from yt.data_objects.index import \
-    AMRHierarchy
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import cm_per_mpc
-from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
-    #__slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = None
+    def __init__(self, id, index, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename=index.index_filename,
+                              index=index)
+        self.Parent = []
         self.Children = []
         self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
 
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -50,7 +44,6 @@
 class SkeletonHierarchy(AMRHierarchy):
 
     grid = SkeletonGrid
-    float_type = np.float64
     
     def __init__(self, pf, dataset_type='skeleton'):
         self.dataset_type = dataset_type
@@ -66,6 +59,10 @@
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,
         # on-disk fields.
+        # NOTE: Each should be a tuple, where the first element is the on-disk
+        # fluid type or particle type.  Convention suggests that the on-disk
+        # fluid type is usually the dataset_type and the on-disk particle type
+        # (for a single population of particles) is "io".
         pass
     
     def _count_grids(self):
@@ -96,30 +93,34 @@
 
 class SkeletonDataset(Dataset):
     _index_class = SkeletonHierarchy
-    _fieldinfo_fallback = SkeletonFieldInfo
-    _fieldinfo_known = KnownSkeletonFields
-    _handle = None
+    _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton',
-                 storage_filename = None,
-                 conversion_override = None):
-
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-
+    def __init__(self, filename, dataset_type='skeleton'):
+        self.fluid_types += ('skeleton',)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-    def _set_units(self):
-        # This needs to set up the dictionaries that convert from code units to
-        # CGS.  The needed items are listed in the second entry:
-        #   self.time_units         <= sec_conversion
-        #   self.conversion_factors <= mpc_conversion
-        #   self.units              <= On-disk fields
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        # self.length_unit = self.quan(1.0, "cm")
+        # self.mass_unit = self.quan(1.0, "g")
+        # self.time_unit = self.quan(1.0, "s")
+        # self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
         pass
 
     def _parse_parameter_file(self):
-        # This needs to set up the following items:
+        # This needs to set up the following items.  Note that these are all
+        # assumed to be in code units; domain_left_edge and domain_right_edge
+        # will be updated to be in code units at a later time.  This includes
+        # the cosmological parameters.
         #
         #   self.unique_identifier
         #   self.parameters             <= full of code-specific items of use

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/_skeleton/definitions.py
--- a/yt/frontends/_skeleton/definitions.py
+++ b/yt/frontends/_skeleton/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,79 +13,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+from yt.funcs import mylog
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.utilities.physical_constants import \
-    kboltz
+    FieldInfoContainer
 
-# The first field container is where any fields that exist on disk go, along
-# with their conversion factors, display names, etc.
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
 
-KnownSkeletonFields = FieldInfoContainer()
-add_skeleton_field = KnownSkeletonFields.add_field
+class SkeletonFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = SkeletonFieldInfo.add_field
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-# Often, we want to translate between fields on disk and fields in yt.  This
-# construct shows how to do that.  Note that we use TranslationFunc.
+    def __init__(self, pf):
+        super(SkeletonFieldInfo, self).__init__(pf)
+        # If you want, you can check self.field_list
 
-translation_dict = {"x-velocity": "velx",
-                    "y-velocity": "vely",
-                    "z-velocity": "velz",
-                    "Density": "dens",
-                    "Temperature": "temp",
-                    "Pressure" : "pres", 
-                    "Grav_Potential" : "gpot",
-                    "particle_position_x" : "particle_posx",
-                    "particle_position_y" : "particle_posy",
-                    "particle_position_z" : "particle_posz",
-                    "particle_velocity_x" : "particle_velx",
-                    "particle_velocity_y" : "particle_vely",
-                    "particle_velocity_z" : "particle_velz",
-                    "particle_index" : "particle_tag",
-                    "Electron_Fraction" : "elec",
-                    "HI_Fraction" : "h   ",
-                    "HD_Fraction" : "hd  ",
-                    "HeI_Fraction": "hel ",
-                    "HeII_Fraction": "hep ",
-                    "HeIII_Fraction": "hepp",
-                    "HM_Fraction": "hmin",
-                    "HII_Fraction": "hp  ",
-                    "H2I_Fraction": "htwo",
-                    "H2II_Fraction": "htwp",
-                    "DI_Fraction": "deut",
-                    "DII_Fraction": "dplu",
-                    "ParticleMass": "particle_mass",
-                    "Flame_Fraction": "flam"}
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the parameter file.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
 
-for f,v in translation_dict.items():
-    if v not in KnownSkeletonFields:
-        pfield = v.startswith("particle")
-        add_skeleton_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    if f.endswith("_Fraction") :
-        dname = "%s\/Fraction" % f.split("_")[0]
-    else :
-        dname = f                    
-    ff = KnownSkeletonFields[v]
-    pfield = f.startswith("particle")
-    add_field(f, TranslationFunc(v),
-              take_log=KnownSkeletonFields[v].take_log,
-              units = ff.units, display_name=dname,
-              particle_type = pfield)
-
-# Here's an example of adding a new field:
-
-add_skeleton_field("dens", function=NullFunc, take_log=True,
-                convert_function=_get_convert("dens"),
-                units=r"g / cm**3")
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -23,12 +23,31 @@
     _particle_reader = False
     _dataset_type = "skeleton"
 
-    def _read_data(self, grid, field):
-        # This must return the array, of size/shape grid.ActiveDimensions, that
-        # corresponds to 'field'.
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # If this is not implemented, the IO handler will just slice a
-        # _read_data item.
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
         pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        pass
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -74,7 +74,7 @@
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
-        mask = selector.select_points(x, y, z)
+        mask = selector.select_points(x, y, z, 0.0)
         if self.caching:
             self.masks[key] = mask
             return self.masks[key]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -64,7 +64,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z = (np.asarray(rv[ptype][pn % ax], dtype="=f8")
                                for ax in 'XYZ')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(rv[ptype][field], "=f8")

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -243,12 +243,17 @@
                 self.add_field(name = ("gas", "%s_density" % nice_name),
                                function = func,
                                units = "g/cm**3")
-                # We know this will either have one letter, or two.
-                if field[3] in string.letters:
-                    element, weight = field[2:4], field[4:-1]
-                else:
-                    element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+                # Most of the time our species will be of the form
+                # element name + atomic weight (e.g. C12), but
+                # sometimes we make up descriptive names (e.g. ash)
+                if any(char.isdigit() for char in field):
+                    # We know this will either have one letter, or two.
+                    if field[3] in string.letters:
+                        element, weight = field[2:4], field[4:-1]
+                    else:
+                        element, weight = field[2:3], field[3:-1]
+                    weight = int(weight)
+
                 # Here we can, later, add number density.
             if field.startswith("omegadot("):
                 nice_name = field[9:-1]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -124,7 +124,6 @@
         if species != "Electron":
             self.alias(("gas", "%s_density" % yt_name),
                        ("enzo", "%s_Density" % species))
-        add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
         species_names = [fn.rsplit("_Density")[0] for ft, fn in 

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -104,7 +104,7 @@
                             r"particle_position_%s")
                     x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(pds.get(field).value, "=f8")

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -24,6 +24,32 @@
 _fields = ("temperature", "density", "velocity_magnitude",
            "velocity_divergence")
 
+def check_color_conservation(pf):
+    species_names = pf.field_info.species_names
+    dd = pf.all_data()
+    dens_yt = dd["density"].copy()
+    # Enumerate our species here
+    for s in sorted(species_names):
+        if s == "El": continue
+        dens_yt -= dd["%s_density" % s]
+    dens_yt -= dd["metal_density"]
+    delta_yt = np.abs(dens_yt / dd["density"])
+
+    # Now we compare color conservation to Enzo's color conservation
+    dd = pf.all_data()
+    dens_enzo = dd["Density"].copy()
+    for f in sorted(pf.field_list):
+        if not f[1].endswith("_Density") or \
+               f[1].startswith("Dark_Matter_")  or \
+               f[1].startswith("Electron_") or \
+               f[1].startswith("SFR_") or \
+               f[1].startswith("Forming_Stellar_") or \
+               f[1].startswith("Star_Particle_"):
+            continue
+        dens_enzo -= dd[f]
+    delta_enzo = np.abs(dens_enzo / dd["Density"])
+    return assert_almost_equal, delta_yt, delta_enzo
+
 m7 = "DD0010/moving7_0010"
 @requires_pf(m7)
 def test_moving7():
@@ -37,7 +63,15 @@
 @requires_pf(g30, big_data=True)
 def test_galaxy0030():
     pf = data_dir_load(g30)
+    yield check_color_conservation(pf)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+
+ecp = "enzo_cosmology_plus/DD0046/DD0046"
+ at requires_pf(ecp, big_data=True)
+def test_ecp():
+    pf = data_dir_load(ecp)
+    # Now we test our species fields
+    yield check_color_conservation(pf)

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -93,7 +93,7 @@
                 x = p_fields[start:end, px]
                 y = p_fields[start:end, py]
                 z = p_fields[start:end, pz]
-                mask = selector.select_points(x, y, z)
+                mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:
                     fi = self._particle_fields[field]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -68,7 +68,7 @@
                     x = f['particle_position_x'].value.astype("float64")
                     y = f['particle_position_y'].value.astype("float64")
                     z = f['particle_position_z'].value.astype("float64")
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -74,7 +74,7 @@
                     x = halos['particle_position_x'].astype("float64")
                     y = halos['particle_position_y'].astype("float64")
                     z = halos['particle_position_z'].astype("float64")
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -77,7 +77,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z = (np.asarray(rv[ptype, pn % ax], "=f8")
                                for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     for field in field_list:
                         data = np.asarray(rv.pop((ptype, field))[mask], "=f8")
                         yield (ptype, field), data

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -471,16 +471,20 @@
         self.current_time = hvals["time"]
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
-        if self.parameters.get('bPeriodic', True):
-            self.periodicity = (True, True, True)
+        periodic = self.parameters.get('bPeriodic', True)
+        period = self.parameters.get('dPeriod', None)
+        comoving = self.parameters.get('bComove', False)
+        self.periodicity = (periodic, periodic, periodic)
+        if comoving and period is None:
+            period = 1.0
+        if periodic and period is not None:
             # If we are periodic, that sets our domain width to either 1 or dPeriod.
-            self.domain_left_edge = np.zeros(3, "float64") - 0.5*self.parameters.get('dPeriod', 1)
-            self.domain_right_edge = np.zeros(3, "float64") + 0.5*self.parameters.get('dPeriod', 1)
+            self.domain_left_edge = np.zeros(3, "float64") - 0.5*period
+            self.domain_right_edge = np.zeros(3, "float64") + 0.5*period
         else:
-            self.periodicity = (False, False, False)
             self.domain_left_edge = None
             self.domain_right_edge = None
-        if self.parameters.get('bComove', False):
+        if comoving:
             cosm = self._cosmology_parameters or {}
             self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -31,7 +31,8 @@
 from yt.utilities.physical_constants import mh
 from yt.fields.species_fields import \
     add_species_field_by_fraction, \
-    add_species_field_by_density
+    add_species_field_by_density, \
+    setup_species_fields
 
 from yt.fields.particle_fields import \
     add_volume_weighted_smoothed_field
@@ -63,9 +64,30 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
+        # These are metallicity fields that get discovered for FIRE simulations
+        ("Metallicity_00", ("", ["metallicity"], None)),
+        ("Metallicity_01", ("", ["He_fraction"], None)),
+        ("Metallicity_02", ("", ["C_fraction"], None)),
+        ("Metallicity_03", ("", ["N_fraction"], None)),
+        ("Metallicity_04", ("", ["O_fraction"], None)),
+        ("Metallicity_05", ("", ["Ne_fraction"], None)),
+        ("Metallicity_06", ("", ["Mg_fraction"], None)),
+        ("Metallicity_07", ("", ["Si_fraction"], None)),
+        ("Metallicity_08", ("", ["S_fraction"], None)),
+        ("Metallicity_09", ("", ["Ca_fraction"], None)),
+        ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
+    def __init__(self, *args, **kwargs):
+        super(SPHFieldInfo, self).__init__(*args, **kwargs)
+        # Special case for FIRE
+        if ("PartType0", "Metallicity_00") in self.field_list:
+            self.species_names += ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
+                "Ca", "Fe"]
 
+    def setup_particle_fields(self, ptype, *args, **kwargs):
+        super(SPHFieldInfo, self).setup_particle_fields(ptype, *args, **kwargs)
+        setup_species_fields(self, ptype)
 
 class TipsyFieldInfo(SPHFieldInfo):
 

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -99,7 +99,7 @@
                 g = f["/%s" % ptype]
                 coords = g["Coordinates"][:].astype("float64")
                 mask = selector.select_points(
-                            coords[:,0], coords[:,1], coords[:,2])
+                            coords[:,0], coords[:,1], coords[:,2], 0.0)
                 del coords
                 if mask is None: continue
                 for field in field_list:
@@ -113,7 +113,9 @@
                     elif field in self._element_names:
                         rfield = 'ElementAbundance/' + field
                         data = g[rfield][:][mask,...]
-
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
                     else:
                         data = g[field][:][mask,...]
 
@@ -190,6 +192,10 @@
                     for j in gp.keys():
                         kk = j
                         fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
                 else:
                     kk = k
                     if not hasattr(g[kk], "shape"): continue
@@ -275,7 +281,7 @@
                 pos = self._read_field_from_file(f,
                             tp[ptype], "Coordinates")
                 mask = selector.select_points(
-                    pos[:,0], pos[:,1], pos[:,2])
+                    pos[:,0], pos[:,1], pos[:,2], 0.0)
                 del pos
                 if mask is None: continue
                 for field in field_list:
@@ -528,7 +534,7 @@
                 mask = selector.select_points(
                     p["Coordinates"]['x'].astype("float64"),
                     p["Coordinates"]['y'].astype("float64"),
-                    p["Coordinates"]['z'].astype("float64"))
+                    p["Coordinates"]['z'].astype("float64"), 0.0)
                 if mask is None: continue
                 tf = self._fill_fields(field_list, p, mask, data_file)
                 for field in field_list:
@@ -551,6 +557,8 @@
             pf.domain_left_edge = 0
             pf.domain_right_edge = 0
             f.seek(pf._header_offset)
+            mi =   np.array([1e30, 1e30, 1e30], dtype="float64")
+            ma =  -np.array([1e30, 1e30, 1e30], dtype="float64")
             for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
@@ -560,19 +568,23 @@
                     c = min(CHUNKSIZE, stop - ind)
                     pp = np.fromfile(f, dtype = self._pdtypes[ptype],
                                      count = c)
-                    for ax in 'xyz':
-                        mi = pp["Coordinates"][ax].min()
-                        ma = pp["Coordinates"][ax].max()
-                        outlier = self.arr(np.max(np.abs((mi,ma))), 'code_length')
-                        if outlier > pf.domain_right_edge or -outlier < pf.domain_left_edge:
-                            # scale these up so the domain is slightly
-                            # larger than the most distant particle position
-                            pf.domain_left_edge = -1.01*outlier
-                            pf.domain_right_edge = 1.01*outlier
+                    eps = np.finfo(pp["Coordinates"]["x"].dtype).eps
+                    np.minimum(mi, [pp["Coordinates"]["x"].min(),
+                                    pp["Coordinates"]["y"].min(),
+                                    pp["Coordinates"]["z"].min()], mi)
+                    np.maximum(ma, [pp["Coordinates"]["x"].max(),
+                                    pp["Coordinates"]["y"].max(),
+                                    pp["Coordinates"]["z"].max()], ma)
                     ind += c
-        pf.domain_left_edge = np.ones(3)*pf.domain_left_edge
-        pf.domain_right_edge = np.ones(3)*pf.domain_right_edge
-        pf.domain_width = np.ones(3)*2*pf.domain_right_edge
+        # We extend by 1%.
+        DW = ma - mi
+        mi -= 0.01 * DW
+        ma += 0.01 * DW
+        pf.domain_left_edge = pf.arr(mi, 'code_length')
+        pf.domain_right_edge = pf.arr(ma, 'code_length')
+        pf.domain_width = DW = pf.domain_right_edge - pf.domain_left_edge
+        pf.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                                 DW.units.dimensions)
 
     def _initialize_index(self, data_file, regions):
         pf = data_file.pf
@@ -739,7 +751,7 @@
                 c = np.frombuffer(s, dtype="float64")
                 c.shape = (c.shape[0]/3.0, 3)
                 mask = selector.select_points(
-                            c[:,0], c[:,1], c[:,2])
+                            c[:,0], c[:,1], c[:,2], 0.0)
                 del c
                 if mask is None: continue
                 for field in field_list:

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -85,7 +85,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z  = (gf[ptype, "particle_position_%s" % ax]
                                 for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(gf[ptype, field])
@@ -127,7 +127,7 @@
             for ptype, field_list in sorted(ptf.items()):
                 x, y, z = (f[ptype, "particle_position_%s" % ax]
                            for ax in 'xyz')
-                mask = selector.select_points(x, y, z)
+                mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:
                     data = f[ptype, field][mask]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,7 +24,6 @@
 
 from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.definitions import inv_axis_names, axis_names, x_dict, y_dict
 import yt.extern.progressbar as pb
 import yt.utilities.rpdb as rpdb
 from yt.units.yt_array import YTArray, YTQuantity
@@ -637,8 +636,8 @@
         return os.environ.get("OMP_NUM_THREADS", 0)
     return nt
 
-def fix_axis(axis):
-    return inv_axis_names.get(axis, axis)
+def fix_axis(axis, pf):
+    return pf.coordinates.axis_id.get(axis, axis)
 
 def get_image_suffix(name):
     suffix = os.path.splitext(name)[1]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -24,7 +24,7 @@
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
+    ParallelAnalysisInterface
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 import yt.visualization._MPL as _MPL

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -36,7 +36,7 @@
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
+    ParallelAnalysisInterface, parallel_root_only
 from yt.utilities.exceptions import YTFieldNotFound
 
 class Index(ParallelAnalysisInterface):
@@ -126,7 +126,8 @@
         if getattr(self, "io", None) is not None: return
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
-    def _save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
+    @parallel_root_only
+    def save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
         """
         Arbitrary numpy data will be saved to the region in the datafile
         described by *node* and *name*.  If data file does not exist, it throws
@@ -157,14 +158,6 @@
         del self._data_file
         self._data_file = h5py.File(self.__data_filename, self._data_mode)
 
-    save_data = parallel_splitter(_save_data, _reload_data_file)
-
-    def _reset_save_data(self,round_robin=False):
-        if round_robin:
-            self.save_data = self._save_data
-        else:
-            self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-
     def save_object(self, obj, name):
         """
         Save an object (*obj*) to the data_file using the Pickle protocol,

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -31,7 +31,7 @@
 from yt.utilities.physical_constants import sec_per_year
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
+    ParallelAnalysisInterface
 from yt.utilities.lib.GridTree import GridTree, MatchPointsToGrids
 
 from yt.data_objects.data_containers import data_object_registry

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -38,10 +38,12 @@
         # So if gRE > coord, we get a mask, if not, we get a zero
         #    if gLE > coord, we get a zero, if not, mask
         # Thus, if the coordinate is between the two edges, we win!
-        np.choose(np.greater(self.grid_right_edge[:,x_dict[axis]],coord[0]),(0,mask),mask)
-        np.choose(np.greater(self.grid_left_edge[:,x_dict[axis]],coord[0]),(mask,0),mask)
-        np.choose(np.greater(self.grid_right_edge[:,y_dict[axis]],coord[1]),(0,mask),mask)
-        np.choose(np.greater(self.grid_left_edge[:,y_dict[axis]],coord[1]),(mask,0),mask)
+        xax = self.pf.coordinates.x_axis[axis]
+        yax = self.pf.coordinates.y_axis[axis]
+        np.choose(np.greater(self.grid_right_edge[:,xax],coord[0]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,xax],coord[0]),(mask,0),mask)
+        np.choose(np.greater(self.grid_right_edge[:,yax],coord[1]),(0,mask),mask)
+        np.choose(np.greater(self.grid_left_edge[:,yax],coord[1]),(mask,0),mask)
         ind = np.where(mask == 1)
         return self.grids[ind], ind
 

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -30,7 +30,7 @@
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
+    ParallelAnalysisInterface
 
 from yt.data_objects.data_containers import data_object_registry
 

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -32,7 +32,7 @@
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
+    ParallelAnalysisInterface
 
 from yt.data_objects.data_containers import data_object_registry
 from yt.data_objects.octree_subset import ParticleOctreeSubset

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -455,10 +455,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_points(self, np.ndarray[np.float64_t, ndim=1] x,
-                           np.ndarray[np.float64_t, ndim=1] y,
-                           np.ndarray[np.float64_t, ndim=1] z,
-                           np.float64_t radius = 0.0):
+    def count_points(self, np.ndarray[anyfloat, ndim=1] x,
+                           np.ndarray[anyfloat, ndim=1] y,
+                           np.ndarray[anyfloat, ndim=1] z,
+                           np.float64_t radius):
         cdef int count = 0
         cdef int i
         cdef np.float64_t pos[3]
@@ -483,10 +483,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def select_points(self, np.ndarray[np.float64_t, ndim=1] x,
-                            np.ndarray[np.float64_t, ndim=1] y,
-                            np.ndarray[np.float64_t, ndim=1] z,
-                            np.float64_t radius = 0.0):
+    def select_points(self, np.ndarray[anyfloat, ndim=1] x,
+                            np.ndarray[anyfloat, ndim=1] y,
+                            np.ndarray[anyfloat, ndim=1] z,
+                            np.float64_t radius):
         cdef int count = 0
         cdef int i
         cdef np.float64_t pos[3]

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -39,7 +39,6 @@
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger, ufstring
-from yt.utilities.definitions import inv_axis_names
 from yt.visualization.image_writer import apply_colormap
 from yt.visualization.api import Streamlines
 from .widget_store import WidgetStore

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a9ff5830466c/
Changeset:   a9ff5830466c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 14:58:19+00:00
Summary:     Merging from SDF
Affected #:  18 files

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,8 +6,6 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-
-
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -15,6 +13,10 @@
         np.int64_t id
         float pos[6]
 
+cdef import from "rockstar.h":
+    particle *global_particles "p"
+    void rockstar_cleanup()
+
 cdef import from "fof.h":
     struct fof:
         np.int64_t num_p
@@ -23,13 +25,34 @@
 cdef import from "halo.h":
     struct halo:
         np.int64_t id
-        float pos[6], corevel[3], bulkvel[3]
+        float pos[6]
+        float corevel[3]
+        float bulkvel[3]
         float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float J[3]
+        float energy, spin
+        float alt_m[4]
+        float Xoff, Voff, b_to_a, c_to_a
+        float A[3]
         float bullock_spin, kin_to_pot
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
+ctypedef packed struct haloflat:
+    np.int64_t id
+    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float corevel_x, corevel_y, corevel_z
+    float bulkvel_x, bulkvel_y, bulkvel_z
+    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+    float J1, J2, J3
+    float energy, spin
+    float alt_m1, alt_m2, alt_m3, alt_m4
+    float Xoff, Voff, b_to_a, c_to_a
+    float A1, A2, A3
+    float bullock_spin, kin_to_pot
+    np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+    float min_pos_err, min_vel_err, min_bulkvel_err
+
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
@@ -38,6 +61,9 @@
     halo *halos
     np.int64_t num_halos
     void calc_mass_definition() nogil
+    void free_particle_copies() nogil
+    void alloc_particle_copies(np.int64_t total_copies) nogil
+    void free_halos() nogil
 
 # For outputing halos, rockstar style
 
@@ -48,6 +74,7 @@
 
 cdef import from "config.h":
     void setup_config() nogil
+    void output_config(char *fn) nogil
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
@@ -197,45 +224,87 @@
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def return_halos(self):
+        cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
+        rv = np.asarray(haloview).copy()
+        rockstar_cleanup()
+        free_halos()
+        return rv
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
                                 np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel,
-                                np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.int64_t nfof,
-                                np.int64_t npart_max):
+                                np.ndarray[np.float64_t, ndim=2] vel):
 
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j
-        cdef np.int64_t num_particles = pid.shape[0]
+        cdef np.int64_t i, j, k, ind, offset
+        cdef np.int64_t num_particles = pind.shape[0]
+        global global_particles
 
         # Allocate space for correct number of particles
-        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
         cdef fof fof_obj
-        fof_obj.particles = particles
 
-        cdef np.int64_t last_fof_tag = 1
-        cdef np.int64_t k = 0
-        for i in range(num_particles):
-            if fof_tags[i] < 0:
+        cdef np.int64_t max_count = 0
+        cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
+        fof_obj.num_p = 0
+        j = 0
+        # We're going to do one iteration to get the most frequent value.
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Don't count the null group
+            if local_tag == -1: continue
+            if local_tag != last_fof_tag:
+                if j > max_count:
+                    max_count = j
+                last_fof_tag = local_tag
+                j = 1
+            else:
+                j += 1
+        if j > max_count:
+            max_count = j
+        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        j = 0
+        cdef int counter = 0, ndone = 0
+        cdef np.ndarray[np.int64_t, ndim=1] pcounts 
+        pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
+        cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
+        free_halos()
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Skip this one -- it means no group.
+            if local_tag == -1:
                 continue
-            if fof_tags[i] != last_fof_tag:
-                last_fof_tag = fof_tags[i]
-                if k > 16:
-                    fof_obj.num_p = k
-                    find_subs(&fof_obj)
-                k = 0
-            particles[k].id = pid[i]
-
-            # fill in locations & velocities
-            for j in range(3):
-                particles[k].pos[j] = pos[i,j]
-                particles[k].pos[j+3] = vel[i,j]
-            k += 1
-        free(particles)
-
-
-
+            if i == pind.shape[0] - 1:
+                next_tag = local_tag + 1
+            else:
+                next_tag = fof_tags[pind[i+1]]
+            for k in range(3):
+                fof_obj.particles[j].pos[k] = pos[ind,k]
+                fof_obj.particles[j].pos[k+3] = vel[ind,k]
+            fof_obj.particles[j].id = j
+            fof_obj.num_p += 1
+            j += 1
+            # Now we check if we're the last one
+            if local_tag != next_tag:
+                pcounts[ndone] = fof_obj.num_p
+                counter += 1
+                ndone += 1
+                if counter == frac:
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2],
+                        halos[num_halos - 1].pos[2])
+                    counter = 0
+                global_particles = &fof_obj.particles[0]
+                find_subs(&fof_obj)
+                # Now we reset
+                fof_obj.num_p = j = 0
+        free(fof_obj.particles)
+        return pcounts

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,7 +204,7 @@
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf.length_unit.in_units("Mpccm/h")
-    conv[3] = conv[4] = conv[5] = 1e-5
+    conv[3] = conv[4] = conv[5] = pf.velocity_unit.in_units("km/s")
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[1] = pf.domain_left_edge[1]
     left_edge[2] = pf.domain_left_edge[2]

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,6 +21,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -28,6 +30,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'moab',
     #'pluto',
     'ramses',
+    'sdf',
     'sph',
     'stream',
 ]

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/__init__.py
--- /dev/null
+++ b/yt/frontends/sdf/__init__.py
@@ -0,0 +1,15 @@
+"""
+__init__ for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/api.py
--- /dev/null
+++ b/yt/frontends/sdf/api.py
@@ -0,0 +1,24 @@
+"""
+API for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      SDFDataset
+
+from .io import \
+      IOHandlerSDF
+
+from .fields import \
+      SDFFieldInfo

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/data_structures.py
--- /dev/null
+++ b/yt/frontends/sdf/data_structures.py
@@ -0,0 +1,139 @@
+"""
+Data structures for a generic SDF frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+import types
+
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.utilities.physical_constants import \
+    G, \
+    cm_per_kpc, \
+    mass_sun_cgs
+from yt.utilities.cosmology import Cosmology
+from .fields import \
+    SDFFieldInfo
+from .io import \
+    IOHandlerSDF, \
+    SDFRead,\
+    SDFIndex
+
+class SDFFile(ParticleFile):
+    pass
+
+class SDFDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = SDFFile
+    _field_info_class = SDFFieldInfo
+    _particle_mass_name = None
+    _particle_coordinates_name = None
+    _particle_velocity_name = None
+    _sindex = None
+
+    def __init__(self, filename, dataset_type = "sdf_particles",
+                 n_ref = 64, over_refine_factor = 1,
+                 bounding_box = None,
+                 sdf_header = None,
+                 idx_filename = None,
+                 idx_header = None,
+                 idx_level = 9):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        if bounding_box is not None:
+            bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+        else:
+            self.domain_left_edge = self.domain_right_edge = None
+        self.sdf_header = sdf_header
+        self.idx_filename = idx_filename
+        self.idx_header = idx_header
+        self.idx_level = idx_level
+        super(SDFDataset, self).__init__(filename, dataset_type)
+
+    def _parse_parameter_file(self):
+        self.sdf_container = SDFRead(self.parameter_filename,
+                                     header=self.sdf_header)
+        # Reference
+        self.parameters = self.sdf_container.parameters
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        if None in (self.domain_left_edge, self.domain_right_edge):
+            R0 = self.parameters['R0']
+            self.domain_left_edge = np.array([
+              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_right_edge = np.array([
+              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_left_edge *= self.parameters.get("a", 1.0)
+            self.domain_right_edge *= self.parameters.get("a", 1.0)
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.cosmological_simulation = 1
+
+        self.current_redshift = self.parameters.get("redshift", 0.0)
+        self.omega_lambda = self.parameters["Omega0_lambda"]
+        self.omega_matter = self.parameters["Omega0_m"]
+        self.hubble_constant = self.parameters["h_100"]
+        # Now we calculate our time based on the cosmology.
+        cosmo = Cosmology(self.hubble_constant,
+                          self.omega_matter, self.omega_lambda)
+        self.current_time = cosmo.hubble_time(self.current_redshift)
+        mylog.info("Calculating time to be %0.3e seconds", self.current_time)
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @property
+    def sindex(self):
+        if self._sindex is None:
+            if self.idx_filename is not None:
+                indexdata = SDFRead(self.idx_filename,
+                                    header=self.idx_header)
+                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+            else:
+                raise RuntimeError("SDF index0 file not supplied in load.")
+        else:
+            return self._sindex
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "kpc")
+        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
+        self.time_unit = self.quan(1.0, "Gyr")
+        self.mass_unit = self.quan(1e10, "Msun")
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        if not os.path.isfile(args[0]): return False
+        with open(args[0], "r") as f:
+            line = f.readline().strip()
+            return line == "# SDF 1.0"

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/fields.py
--- /dev/null
+++ b/yt/frontends/sdf/fields.py
@@ -0,0 +1,47 @@
+"""
+SDF-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+
+from yt.funcs import *
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+from yt.config import ytcfg
+from yt.utilities.physical_constants import mh
+from yt.fields.species_fields import \
+    add_species_field_by_fraction, \
+    add_species_field_by_density, \
+    setup_species_fields
+
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
+
+class SDFFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+
+    known_particle_fields = (
+        ("mass", ("code_mass", ["particle_mass"], None)),
+        ("x", ("code_length", ["particle_position_x"], None)),
+        ("y", ("code_length", ["particle_position_y"], None)),
+        ("z", ("code_length", ["particle_position_z"], None)),
+        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
+        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
+        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
+        ("ident", ("", ["particle_index"], None)),
+    )

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/io.py
--- /dev/null
+++ b/yt/frontends/sdf/io.py
@@ -0,0 +1,564 @@
+"""
+SDF data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import glob
+import h5py
+import numpy as np
+from yt.funcs import *
+from yt.utilities.exceptions import *
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.fortran_utils import read_record
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+CHUNKSIZE = 32**3
+
+class IOHandlerSDF(BaseIOHandler):
+    _dataset_type = "sdf_particles"
+
+    @property
+    def _handle(self):
+        return self.pf.sdf_container
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'], self._handle['y'], self._handle['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x']
+                y = self._handle['y']
+                z = self._handle['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        x, y, z = (self._handle[ax] for ax in 'xyz')
+        pcount = x.size
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(CHUNKSIZE, pcount - ind)
+            pos = np.empty((npart, 3), dtype=x.dtype)
+            pos[:,0] = x[ind:ind+npart]
+            pos[:,1] = y[ind:ind+npart]
+            pos[:,2] = z[ind:ind+npart]
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += CHUNKSIZE
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].size}
+
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
+import re
+import os
+
+_types = {
+    'int': 'int32',
+    'int64_t': 'int64',
+    'float': 'float32',
+    'double': 'float64',
+    'unsigned int': 'I',
+    'unsigned char': 'B',
+}
+
+def get_type(vtype, len=None):
+    try:
+        t = _types[vtype]
+        if len is not None:
+            t = np.dtype((t, len))
+        else:
+            t = np.dtype(t)
+    except KeyError:
+        t = eval("np."+vtype)
+    return t
+
+def lstrip(text_list):
+    return [t.strip() for t in text_list]
+
+def get_struct_vars(line):
+    spl = lstrip(line.split(";"))
+    multiv = lstrip(spl[0].split(","))
+    ret = lstrip(multiv[0].split())
+    ctype = ret[0]
+    vnames = [ret[-1]] + multiv[1:]
+    vnames = [v.strip() for v in vnames]
+    for vtype in ret[1:-1]:
+        ctype += ' ' + vtype
+    num = None
+    if len(vnames) == 1:
+        if '[' in vnames[0]:
+            num = int(vnames[0].split('[')[-1].strip(']'))
+            #num = int(re.sub("\D", "", vnames[0]))
+    ctype = get_type(ctype, len=num)
+    return ctype, vnames
+
+class DataStruct(object):
+    """docstring for DataStruct"""
+
+    _offset = 0
+
+    def __init__(self, dtypes, num, filename):
+        self.filename = filename
+        self.dtype = np.dtype(dtypes)
+        self.size = num
+        self.itemsize = self.dtype.itemsize
+        self.data = {}
+        self.handle = None
+
+    def set_offset(self, offset):
+        self._offset = offset
+        if self.size == -1:
+            file_size = os.path.getsize(self.filename)
+            file_size -= offset
+            self.size = float(file_size) / self.itemsize
+            assert(int(self.size) == self.size)
+
+    def build_memmap(self):
+        assert(self.size != -1)
+        self.handle = np.memmap(self.filename, dtype=self.dtype,
+                        mode='r', shape=self.size, offset=self._offset)
+        for k in self.dtype.names:
+            self.data[k] = self.handle[k]
+
+class SDFRead(dict):
+
+    """docstring for SDFRead"""
+
+    _eof = 'SDF-EOH'
+
+    def __init__(self, filename, header=None):
+        self.filename = filename
+        if header is None:
+            header = filename
+        self.header = header
+        self.parameters = {}
+        self.structs = []
+        self.comments = []
+        self.parse_header()
+        self.set_offsets()
+        self.load_memmaps()
+
+    def parse_header(self):
+        """docstring for parse_header"""
+        # Pre-process
+        ascfile = open(self.header, 'r')
+        while True:
+            l = ascfile.readline()
+            if self._eof in l: break
+
+            self.parse_line(l, ascfile)
+
+        hoff = ascfile.tell()
+        ascfile.close()
+        if self.header != self.filename:
+            hoff = 0
+        self.parameters['header_offset'] = hoff
+
+    def parse_line(self, line, ascfile):
+        """Parse a line of sdf"""
+
+
+        if 'struct' in line:
+            self.parse_struct(line, ascfile)
+            return
+
+        if "#" in line:
+            self.comments.append(line)
+            return
+
+        spl = lstrip(line.split("="))
+        vtype, vname = lstrip(spl[0].split())
+        vname = vname.strip("[]")
+        vval = spl[-1].strip(";")
+        if vtype == 'parameter':
+            self.parameters[vname] = vval
+            return
+        elif vtype == "char":
+            vtype = "str"
+
+        try:
+            vval = eval("np."+vtype+"(%s)" % vval)
+        except AttributeError:
+            vval = eval("np."+_types[vtype]+"(%s)" % vval)
+
+        self.parameters[vname] = vval
+
+    def parse_struct(self, line, ascfile):
+        assert 'struct' in line
+
+        str_types = []
+        comments = []
+        str_lines = []
+        l = ascfile.readline()
+        while "}" not in l:
+            vtype, vnames = get_struct_vars(l)
+            for v in vnames:
+                str_types.append((v, vtype))
+            l = ascfile.readline()
+        num = l.strip("}[]")
+        num = num.strip("\;\\\n]")
+        if len(num) == 0:
+            # We need to compute the number of records.  The DataStruct will
+            # handle this.
+            num = '-1'
+        num = int(num)
+        struct = DataStruct(str_types, num, self.filename)
+        self.structs.append(struct)
+        return
+
+    def set_offsets(self):
+        running_off = self.parameters['header_offset']
+        for struct in self.structs:
+            struct.set_offset(running_off)
+            running_off += struct.size * struct.itemsize
+        return
+
+    def load_memmaps(self):
+        for struct in self.structs:
+            struct.build_memmap()
+            self.update(struct.data)
+
+
+class SDFIndex(object):
+
+    """docstring for SDFIndex
+
+    This provides an index mechanism into the full SDF Dataset.
+
+    Most useful class methods:
+        get_cell_data(level, cell_iarr, fields)
+        iter_bbox_data(left, right, fields)
+        iter_bbox_data(left, right, fields)
+
+    """
+    def __init__(self, sdfdata, indexdata, level=9):
+        super(SDFIndex, self).__init__()
+        self.sdfdata = sdfdata
+        self.indexdata = indexdata
+        self.level = level
+        self.rmin = None
+        self.rmax = None
+        self.domain_width = None
+        self.domain_buffer = 0
+        self.domain_dims = 0
+        self.domain_active_dims = 0
+        self.masks = {
+            "p" : int("011"*level, 2),
+            "t" : int("101"*level, 2),
+            "r" : int("110"*level, 2),
+            "z" : int("011"*level, 2),
+            "y" : int("101"*level, 2),
+            "x" : int("110"*level, 2),
+            2 : int("011"*level, 2),
+            1 : int("101"*level, 2),
+            0 : int("110"*level, 2),
+        }
+        self.dim_slices = {
+            "p" : slice(0, None, 3),
+            "t" : slice(1, None, 3),
+            "r" : slice(2, None, 3),
+            "z" : slice(0, None, 3),
+            "y" : slice(1, None, 3),
+            "x" : slice(2, None, 3),
+            2 : slice(0, None, 3),
+            1 : slice(1, None, 3),
+            0 : slice(2, None, 3),
+        }
+        self.set_bounds()
+
+    def set_bounds(self):
+        r_0 = self.sdfdata.parameters['R0']
+        DW = 2.0 * r_0
+
+        self.rmin = np.zeros(3)
+        self.rmax = np.zeros(3)
+        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
+        if sorted_rtp:
+            self.rmin[:] = [0.0, 0.0, -np.pi]
+            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
+        else:
+            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
+            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
+            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
+            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
+            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
+            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
+
+        #/* expand root for non-power-of-two */
+        expand_root = 0.0
+        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+        if ic_Nmesh != 0:
+            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+            if (f2 != ic_Nmesh):
+                expand_root = 1.0*f2/ic_Nmesh - 1.0;
+            print 'Expanding: ', f2, ic_Nmesh, expand_root
+        self.rmin *= 1.0 + expand_root
+        self.rmax *= 1.0 + expand_root
+        self.domain_width = self.rmax - self.rmin
+        self.domain_dims = 1 << self.level
+        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
+        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
+        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+
+    def get_key(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        rep1 = np.binary_repr(i1, width=self.level)
+        rep2 = np.binary_repr(i2, width=self.level)
+        rep3 = np.binary_repr(i3, width=self.level)
+        inter = np.zeros(self.level*3, dtype='c')
+        inter[self.dim_slices[0]] = rep1
+        inter[self.dim_slices[1]] = rep2
+        inter[self.dim_slices[2]] = rep3
+        return int(inter.tostring(), 2)
+
+    def get_key_ijk(self, i1, i2, i3, level=None):
+        return self.get_key(np.array([i1, i2, i3]), level=level)
+
+    def get_slice_key(self, ind, dim='r'):
+        slb = np.binary_repr(ind, width=self.level)
+        expanded = np.array([0]*self.level*3, dtype='c')
+        expanded[self.dim_slices[dim]] = slb
+        return int(expanded.tostring(), 2)
+
+    def get_slice_chunks(self, slice_dim, slice_index):
+        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
+        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox_slow(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+        ileft = np.array(ileft)
+        iright = np.array(iright)
+        for i in range(3):
+            left_key = self.get_slice_key(ileft[i], dim=i)
+            right_key= self.get_slice_key(iright[i], dim=i)
+            dim_inds = (self.indexdata['index'] & ~self.masks[i])
+            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
+            del dim_inds
+
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+
+        print 'Getting data from ileft to iright:',  ileft, iright
+
+        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+                           ileft[1]:iright[1]+1,
+                           ileft[2]:iright[2]+1]
+
+        X = X.ravel()
+        Y = Y.ravel()
+        Z = Z.ravel()
+        # Correct For periodicity
+        X[X < self.domain_buffer] += self.domain_active_dims
+        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Y[Y < self.domain_buffer] += self.domain_active_dims
+        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Z[Z < self.domain_buffer] += self.domain_active_dims
+        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+
+        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+
+        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        indices = indices[indices < self.indexdata['index'].shape[0]]
+        return indices
+
+    def get_bbox(self, left, right):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+
+        return self.get_ibbox(ileft, iright)
+
+    def get_data(self, chunk, fields):
+        data = {}
+        for field in fields:
+            data[field] = self.sdfdata[field][chunk]
+        return data
+
+    def iter_data(self, inds, fields):
+        num_inds = len(inds)
+        num_reads = 0
+        print 'Reading %i chunks' % num_inds
+        i = 0
+        while (i < num_inds):
+            ind = inds[i]
+            base = self.indexdata['base'][ind]
+            length = self.indexdata['len'][ind]
+            # Concatenate aligned reads
+            nexti = i+1
+            combined = 0
+            while nexti < len(inds):
+                nextind = inds[nexti]
+                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
+                if base + length == self.indexdata['base'][nextind]:
+                    length += self.indexdata['len'][nextind]
+                    i += 1
+                    nexti += 1
+                    combined += 1
+                else:
+                    break
+
+            chunk = slice(base, base+length)
+            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            num_reads += 1
+            data = self.get_data(chunk, fields)
+            yield data
+            del data
+            i += 1
+        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+
+    def iter_bbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_bbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def iter_ibbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_ibbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def get_contiguous_chunk(self, left_key, right_key, fields):
+        max_key = self.indexdata['index'][-1]
+        if left_key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
+        base = self.indexdata['base'][left_key]
+        right_key = min(right_key, self.indexdata['index'][-1])
+        length = self.indexdata['base'][right_key] + \
+            self.indexdata['len'][right_key] - base
+        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        return self.get_data(slice(base, base + length), fields)
+
+    def iter_slice_data(self, slice_dim, slice_index, fields):
+        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
+        for off, l in zip(offsets, lengths):
+            data = {}
+            chunk = slice(off, off+l)
+            for field in fields:
+                data[field] = self.sdfdata[field][chunk]
+            yield data
+            del data
+
+    def get_key_bounds(self, level, cell_iarr):
+        """
+        Get index keys for index file supplied.
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.
+
+        Returns:
+            lmax_lk, lmax_rk
+        """
+        shift = self.level-level
+        level_buff = 0
+        level_lk = self.get_key(cell_iarr + level_buff)
+        level_rk = self.get_key(cell_iarr + level_buff) + 1
+        lmax_lk = (level_lk << shift*3)
+        lmax_rk = (((level_rk) << shift*3) -1)
+        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
+        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
+        return lmax_lk, lmax_rk
+
+    def get_cell_data(self, level, cell_iarr, fields):
+        """
+        Get data from requested cell
+
+        This uses the raw cell index, and doesn't account for periodicity or
+        an expanded domain (non-power of 2).
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.         fields: list
+            Requested fields
+
+        Returns:
+            cell_data: dict
+                Dictionary of field_name, field_data
+        """
+        cell_iarr = np.array(cell_iarr)
+        lk, rk =self.get_key_bounds(level, cell_iarr)
+        return self.get_contiguous_chunk(lk, rk, fields)

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/sdf/setup.py
--- /dev/null
+++ b/yt/frontends/sdf/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+import glob
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sdf', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -22,6 +22,7 @@
 #    config.add_subpackage("artio2")
     config.add_subpackage("pluto")
     config.add_subpackage("ramses")
+    config.add_subpackage("sdf")
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -116,7 +116,7 @@
 except ImportError:
     pass
 
-def get_memory_usage():
+def get_memory_usage(subtract_share = False):
     """
     Returning resident size in megabytes
     """
@@ -130,6 +130,7 @@
         return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
+    if subtract_share: resident -= share
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
@@ -681,7 +682,7 @@
     return s
 
 @contextlib.contextmanager
-def memory_checker(interval = 15):
+def memory_checker(interval = 15, dest = None):
     r"""This is a context manager that monitors memory usage.
 
     Parameters
@@ -699,6 +700,8 @@
     ...     del arr
     """
     import threading
+    if dest is None:
+        dest = sys.stdout
     class MemoryChecker(threading.Thread):
         def __init__(self, event, interval):
             self.event = event
@@ -707,7 +710,7 @@
 
         def run(self):
             while not self.event.wait(self.interval):
-                print "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
+                print >> dest, "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
 
     e = threading.Event()
     mem_check = MemoryChecker(e, interval)

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -67,7 +67,8 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int num_domains
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?,
+                  int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
                          Oct *o)

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -264,24 +264,25 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
-                  ):
+                  int max_level = 99):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3], level
+        cdef int ind32[3]
         cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
         cdef int i
         cur = next = NULL
-        level = -1
+        cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
-        self.get_root(ind, &next)
+            ind32[i] = ind[i]
+        self.get_root(ind32, &next)
         # We want to stop recursing when there's nowhere else to go
-        while next != NULL:
+        while next != NULL and level <= max_level:
             level += 1
             for i in range(3):
                 ipos[i] = (ipos[i] << 1) + ind[i]
@@ -345,7 +346,8 @@
         # grow a stack of parent Octs.
         # Note that in the first iteration, we will just find the up-to-27
         # neighbors, including the main oct.
-        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef np.int64_t i, j, k, n, level, ii, nfound = 0, dlevel
+        cdef int ind[3]
         cdef OctList *olist, *my_list
         my_list = olist = NULL
         cdef Oct *cand
@@ -355,6 +357,7 @@
         # ndim is the oct dimensions of the level, not the cell dimensions.
         for i in range(3):
             ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            # Here we adjust for oi.dds meaning *cell* width.
             ndim[i] = (ndim[i] >> self.oref)
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
@@ -379,9 +382,10 @@
                     # correctly, but we might.
                     if cand == NULL: continue
                     for level in range(1, oi.level+1):
+                        dlevel = oi.level - level
                         if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                            ind[n] = (npos[n] >> dlevel) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]
@@ -908,6 +912,7 @@
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
         self.fill_func = oct_visitors.fill_file_indices_rind
+
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):
     if top.children == NULL: return olist
@@ -920,7 +925,7 @@
     # For now, we assume we will not be doing this along all three zeros,
     # because that would be pretty tricky.
     if i == j == k == 0: return olist
-    cdef int n[3], ind[3], off[3][2], ii, ij, ik, ci
+    cdef np.int64_t n[3], ind[3], off[3][2], ii, ij, ik, ci
     ind[0] = 1 - i
     ind[1] = 1 - j
     ind[2] = 1 - k

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -36,6 +36,9 @@
     np.int64_t pn       # Particle number
     np.float64_t r2     # radius**2
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
 cdef inline np.float64_t r2dist(np.float64_t ppos[3],
                                 np.float64_t cpos[3],
                                 np.float64_t DW[3],

diff -r b97de4cd8760e2af4f8a17d30f1bfbef3f67b69e -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -28,8 +28,9 @@
 from .amr_kdtools cimport _find_node, Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
+import sys
 
-cdef ContourID *contour_create(np.int64_t contour_id,
+cdef inline ContourID *contour_create(np.int64_t contour_id,
                                ContourID *prev = NULL):
     node = <ContourID *> malloc(sizeof(ContourID))
     #print "Creating contour with id", contour_id
@@ -40,12 +41,12 @@
     if prev != NULL: prev.next = node
     return node
 
-cdef void contour_delete(ContourID *node):
+cdef inline void contour_delete(ContourID *node):
     if node.prev != NULL: node.prev.next = node.next
     if node.next != NULL: node.next.prev = node.prev
     free(node)
 
-cdef ContourID *contour_find(ContourID *node):
+cdef inline ContourID *contour_find(ContourID *node):
     cdef ContourID *temp, *root
     root = node
     # First we find the root
@@ -61,7 +62,7 @@
         node = temp
     return root
 
-cdef void contour_union(ContourID *node1, ContourID *node2):
+cdef inline void contour_union(ContourID *node1, ContourID *node2):
     node1 = contour_find(node1)
     node2 = contour_find(node2)
     if node1.contour_id < node2.contour_id:
@@ -69,7 +70,7 @@
     elif node2.contour_id < node1.contour_id:
         node1.parent = node2
 
-cdef int candidate_contains(CandidateContour *first,
+cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
                             np.int64_t join_id = -1):
     while first != NULL:
@@ -78,7 +79,7 @@
         first = first.next
     return 0
 
-cdef CandidateContour *candidate_add(CandidateContour *first,
+cdef inline CandidateContour *candidate_add(CandidateContour *first,
                                      np.int64_t contour_id,
                                      np.int64_t join_id = -1):
     cdef CandidateContour *node
@@ -617,7 +618,7 @@
 
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
-    cdef np.float64_t DW[3]
+    cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
 
     def __init__(self, linking_length):
@@ -625,6 +626,7 @@
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
@@ -633,7 +635,7 @@
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
                                 int domain_id = -1, int domain_offset = 0,
                                 periodicity = (True, True, True),
-                                minimum_count = 8):
+                                int minimum_count = 8):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -641,22 +643,27 @@
         cdef ContourID *c0, *c1
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
+        cdef int counter = 0
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
-        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        pdoms = np.zeros(positions.shape[0], dtype="int64")
+        pdoms -= -1
         cdef np.int64_t *pdom = <np.int64_t*> pdoms.data
         # First we allocate our container
         cdef ContourID **container = <ContourID**> malloc(
             sizeof(ContourID*) * positions.shape[0])
         for i in range(3):
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.DLE[i] = octree.DLE[i]
+            self.DRE[i] = octree.DRE[i]
             self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
+            counter += 1
             container[i] = NULL
             for j in range(3):
                 pos[j] = positions[i, j]
-            oct = octree.get(pos)
+            oct = octree.get(pos, NULL)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = oct.domain_ind - moff
@@ -670,14 +677,18 @@
             offset = pdoms[pind[i]]
             if doff[offset] < 0:
                 doff[offset] = i
+        del pdoms
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
-        cdef int counter = 0
+        counter = 0
+        cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
+        print >> sys.stderr, "Will be outputting every", frac
+        cdef int inside, skip_early
         for i in range(doff.shape[0]):
+            if counter >= frac:
+                counter = 0
+                print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1
-            if counter == 10000:
-                counter = 0
-                #print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             # Any particles found for this oct?
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -703,7 +714,8 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            # Now we look at each particle.
+            # We might know that all our internal particles are linked.
+            # Otherwise, we look at each particle.
             for j in range(pcount[i]):
                 # Note that this offset is the particle index
                 pind0 = pind[doff[i] + j]
@@ -721,28 +733,32 @@
                                         offset, pind0, 
                                         doff[i] + j)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
-        contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
+        contour_ids = np.ones(positions.shape[0], dtype="int64")
+        contour_ids *= -1
         # Sort on our particle IDs.
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 c0 = contour_find(c1)
-                contour_ids[pind[poffset]] = c0.contour_id
+                contour_ids[offset] = c0.contour_id
                 c0.count += 1
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
+                offset = pind[offset]
                 if c0.count < minimum_count:
-                    contour_ids[pind[poffset]] = -1
+                    contour_ids[offset] = -1
         free(container)
+        del pind
         return contour_ids
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
@@ -753,7 +769,8 @@
                                    np.int64_t pind0,
                                    np.int64_t poffset):
         # Now we look at each particle and evaluate it
-        cdef np.float64_t pos0[3], pos1[3], d
+        cdef np.float64_t pos0[3], pos1[3], edges[2][3]
+        cdef int link
         cdef ContourID *c0, *c1
         cdef np.int64_t pind1
         cdef int i, j, k
@@ -765,23 +782,62 @@
             self.last = c0
             if self.first == NULL:
                 self.first = c0
-        c0 = contour_find(c0)
-        container[pind0] = c0
+        c0 = container[pind0] = contour_find(c0)
         for i in range(3):
+            # We make a very conservative guess here about the edges.
             pos0[i] = positions[pind0*3 + i]
+            edges[0][i] = pos0[i] - self.linking_length*1.01
+            edges[1][i] = pos0[i] + self.linking_length*1.01
+            if edges[0][i] < self.DLE[i] or edges[0][i] > self.DRE[i]:
+                # We skip this one, since we're close to the boundary
+                edges[0][i] = -1e30
+                edges[1][i] = 1e30
+        # Lets set up some bounds for the particles.  Maybe we can get away
+        # with reducing our number of calls to r2dist_early.
         for i in range(pcount):
             pind1 = pind[noffset + i]
             if pind1 == pind0: continue
+            c1 = container[pind1]
+            if c1 != NULL and c1.contour_id == c0.contour_id:
+                # Already linked.
+                continue
             for j in range(3):
                 pos1[j] = positions[pind1*3 + j]
-            d = r2dist(pos0, pos1, self.DW, self.periodicity)
-            if d > self.linking_length2:
-                continue
-            c1 = container[pind1]
+            link = r2dist_early(pos0, pos1, self.DW, self.periodicity,
+                                self.linking_length2, edges)
+            if link == 0: continue
             if c1 == NULL:
-                container[pind1] = c1 = contour_create(
-                    noffset + i, self.last)
-            contour_union(c0, c1)
-            c0 = c1 = contour_find(c0)
-            container[pind1] = c0
-            container[pind0] = c0
+                container[pind1] = c0
+            elif c0.contour_id != c1.contour_id:
+                contour_union(c0, c1)
+                c0 = container[pind1] = container[pind0] = contour_find(c0)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef inline int r2dist_early(np.float64_t ppos[3],
+                             np.float64_t cpos[3],
+                             np.float64_t DW[3],
+                             bint periodicity[3],
+                             np.float64_t max_r2,
+                             np.float64_t edges[2][3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    cdef int inside = 0
+    for i in range(3):
+        if cpos[i] < edges[0][i]:
+            return 0
+        if cpos[i] > edges[1][i]:
+            return 0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if r2 > max_r2: return 0
+    return 1


https://bitbucket.org/yt_analysis/yt/commits/291f78f26b65/
Changeset:   291f78f26b65
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-18 00:56:39+00:00
Summary:     Modify lines more to allow for injecting of values directly onto a numpy array. What we really need is a zdepth-aware lines function that blends/masks/etc.  This would be the place where fancier things like tubes would be drawn. Also change dz calculation to be positive. Not sure if I understand why this works yet, but seems to.
Affected #:  2 files

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r 291f78f26b65348d5c6a197f54b0b524b43df9d7 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -228,6 +228,7 @@
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
     cdef int has_alpha = (image.shape[2] == 4)
+    cdef int no_color = (image.shape[2] < 3)
     for j in range(0, nl, 2):
         # From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
         x0 = xs[j] 
@@ -237,13 +238,18 @@
         dx = abs(x1-x0)
         dy = abs(y1-y0)
         err = dx - dy
-        if has_alpha:
+
+        if no_color:
+            for i in range(4):
+                alpha[i] = colors[j, 0]
+        elif has_alpha:
             for i in range(4):
                 alpha[i] = colors[j/points_per_color,i]
         else:
             for i in range(3):
                 alpha[i] = colors[j/points_per_color,3]*\
                         colors[j/points_per_color,i]
+
         if x0 < x1:
             sx = 1
         else:
@@ -260,7 +266,9 @@
             if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
                 for xi in range(x0-thick/2, x0+(1+thick)/2):
                     for yi in range(y0-thick/2, y0+(1+thick)/2):
-                        if has_alpha:
+                        if no_color:
+                            image[xi, yi, 0] = fmin(alpha[i], image[xi, yi, 0])
+                        elif has_alpha:
                             image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
                             if outa != 0.0:
                                 outa = 1.0/outa
@@ -273,6 +281,7 @@
                                 image[xi, yi, i] = \
                                         (1.-alpha[i])*image[xi,yi,i] + alpha[i]
 
+
             if (x0 == x1 and y0 == y1):
                 break
             e2 = 2*err

diff -r a6ac57b7a42460943f9827ba4e967e9614b88fdc -r 291f78f26b65348d5c6a197f54b0b524b43df9d7 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -112,7 +112,7 @@
             res = camera.resolution
         dx = np.dot(pos - self.origin.d, camera.unit_vectors[1])
         dy = np.dot(pos - self.origin.d, camera.unit_vectors[0])
-        dz = np.dot(pos - self.front_center.d, camera.unit_vectors[2])
+        dz = np.dot(pos - self.front_center.d, -camera.unit_vectors[2])
         # Transpose into image coords.
         py = (res[0]*(dx/camera.width[0].d)).astype('int')
         px = (res[1]*(dy/camera.width[1].d)).astype('int')


https://bitbucket.org/yt_analysis/yt/commits/7e4cfd0d5b1c/
Changeset:   7e4cfd0d5b1c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 15:14:49+00:00
Summary:     Commenting out import
Affected #:  1 file

diff -r a9ff5830466c1218ca6d6bf25ce7b9b9e10ba124 -r 7e4cfd0d5b1c4c77903b6b4741077bf5d35b7168 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.funcs import *
-from .volume_rendering.api import off_axis_projection
+#from .volume_rendering.api import off_axis_projection
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder


https://bitbucket.org/yt_analysis/yt/commits/3d359d34eedb/
Changeset:   3d359d34eedb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 15:15:05+00:00
Summary:     Merging from Sam
Affected #:  2 files

diff -r 7e4cfd0d5b1c4c77903b6b4741077bf5d35b7168 -r 3d359d34eedbb0c03f6d13ba24a2ce73f4da4002 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -228,6 +228,7 @@
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
     cdef int has_alpha = (image.shape[2] == 4)
+    cdef int no_color = (image.shape[2] < 3)
     for j in range(0, nl, 2):
         # From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
         x0 = xs[j] 
@@ -237,13 +238,18 @@
         dx = abs(x1-x0)
         dy = abs(y1-y0)
         err = dx - dy
-        if has_alpha:
+
+        if no_color:
+            for i in range(4):
+                alpha[i] = colors[j, 0]
+        elif has_alpha:
             for i in range(4):
                 alpha[i] = colors[j/points_per_color,i]
         else:
             for i in range(3):
                 alpha[i] = colors[j/points_per_color,3]*\
                         colors[j/points_per_color,i]
+
         if x0 < x1:
             sx = 1
         else:
@@ -260,7 +266,9 @@
             if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
                 for xi in range(x0-thick/2, x0+(1+thick)/2):
                     for yi in range(y0-thick/2, y0+(1+thick)/2):
-                        if has_alpha:
+                        if no_color:
+                            image[xi, yi, 0] = fmin(alpha[i], image[xi, yi, 0])
+                        elif has_alpha:
                             image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
                             if outa != 0.0:
                                 outa = 1.0/outa
@@ -273,6 +281,7 @@
                                 image[xi, yi, i] = \
                                         (1.-alpha[i])*image[xi,yi,i] + alpha[i]
 
+
             if (x0 == x1 and y0 == y1):
                 break
             e2 = 2*err

diff -r 7e4cfd0d5b1c4c77903b6b4741077bf5d35b7168 -r 3d359d34eedbb0c03f6d13ba24a2ce73f4da4002 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -112,7 +112,7 @@
             res = camera.resolution
         dx = np.dot(pos - self.origin.d, camera.unit_vectors[1])
         dy = np.dot(pos - self.origin.d, camera.unit_vectors[0])
-        dz = np.dot(pos - self.front_center.d, camera.unit_vectors[2])
+        dz = np.dot(pos - self.front_center.d, -camera.unit_vectors[2])
         # Transpose into image coords.
         py = (res[0]*(dx/camera.width[0].d)).astype('int')
         px = (res[1]*(dy/camera.width[1].d)).astype('int')


https://bitbucket.org/yt_analysis/yt/commits/82a15c662863/
Changeset:   82a15c662863
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 15:38:39+00:00
Summary:     Fixing unbound self.bounds.
Affected #:  1 file

diff -r 3d359d34eedbb0c03f6d13ba24a2ce73f4da4002 -r 82a15c6628632866542c7afb0f985d355a316de8 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -67,7 +67,7 @@
         if bounds is None:
             bounds = self.pf.h.all_data().quantities['Extrema'](self.field)
             bounds = [b.ndarray_view() for b in bounds]
-            self.bounds = bounds
+        self.bounds = bounds
 
         # Do some error checking.
         assert(len(self.bounds) == 2)


https://bitbucket.org/yt_analysis/yt/commits/87b01e823a5c/
Changeset:   87b01e823a5c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 17:38:31+00:00
Summary:     Max out max_t at 1.0.
Affected #:  1 file

diff -r 82a15c6628632866542c7afb0f985d355a316de8 -r 87b01e823a5c69f29b809123b1e6c5ee726bdbbf yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -804,6 +804,7 @@
     cdef np.float64_t tmax[3], tdelta[3]
     cdef np.float64_t dist, alpha, dt, exit_t, enter_t = -1.0
     cdef np.float64_t tr, tl, temp_x, temp_y, dv
+    if max_t > 1.0: max_t = 1.0
     direction = -1
     if vc.left_edge[0] <= v_pos[0] and v_pos[0] <= vc.right_edge[0] and \
        vc.left_edge[1] <= v_pos[1] and v_pos[1] <= vc.right_edge[1] and \


https://bitbucket.org/yt_analysis/yt/commits/9896f03c418c/
Changeset:   9896f03c418c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-05 02:43:50+00:00
Summary:     First pass at project_to_plane for fisheye lens.
Affected #:  1 file

diff -r 87b01e823a5c69f29b809123b1e6c5ee726bdbbf -r 9896f03c418c10e8c94511c1b4b0df7b775080c6 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -250,6 +250,24 @@
         """
         self.viewpoint = self.center
 
+    def project_to_plane(self, camera, pos, res=None):
+        if res is None:
+            res = camera.resolution
+        # the return values here need to be px, py, dz
+        # these are the coordinates and dz for the resultant image.
+        # Basically, what we need is an inverse projection from the fisheye
+        # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
+        # vector, and we need the reverse.
+        theta = np.arccos(pos[:,2]) 
+        fov_rad = self.fov * np.pi / 180.0
+        r = 2.0 * theta / fov_rad
+        phi = np.arccos(pos[:,0] / np.sin(theta))
+        px = r * np.cos(phi) + 1.0
+        py = r * np.sin(phi) + 1.0
+        dz = np.zeros_like(py) + 1.0
+        px = np.rint(px * res)
+        py = np.rint(py * res)
+        return px, py, dz
 
 lenses = {'plane-parallel': PlaneParallelLens,
           'perspective': PerspectiveLens,


https://bitbucket.org/yt_analysis/yt/commits/8043451eb47a/
Changeset:   8043451eb47a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-06 03:50:25+00:00
Summary:     More changes to try to get fisheye working.
Affected #:  4 files

diff -r 9896f03c418c10e8c94511c1b4b0df7b775080c6 -r 8043451eb47ab152a5666ab360fd20fefc01a8a2 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -261,12 +261,15 @@
         theta = np.arccos(pos[:,2]) 
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
-        phi = np.arccos(pos[:,0] / np.sin(theta))
-        px = r * np.cos(phi) + 1.0
-        py = r * np.sin(phi) + 1.0
-        dz = np.zeros_like(py) + 1.0
-        px = np.rint(px * res)
-        py = np.rint(py * res)
+        phi = np.arctan2(pos[:,1], pos[:,0])
+        px = r * np.cos(phi) + 0.0
+        py = r * np.sin(phi) + 0.0
+        u = camera.focus.uq
+        # dz is distance the ray would travel
+        dp = pos - camera.position
+        dz = (dp * dp).sum(axis=1)**0.5 / self.radius
+        px = (u * np.rint(px * res[0])).astype("int64")
+        py = (u * np.rint(py * res[1])).astype("int64")
         return px, py, dz
 
 lenses = {'plane-parallel': PlaneParallelLens,

diff -r 9896f03c418c10e8c94511c1b4b0df7b775080c6 -r 8043451eb47ab152a5666ab360fd20fefc01a8a2 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -165,7 +165,7 @@
         return self.current_image
 
     def finalize_image(self, camera, image):
-        image.shape = camera.resolution[0], camera.resolution[1], 4
+        #image.shape = camera.resolution[0], camera.resolution[1], 4
         image = self.volume.reduce_tree_images(image,
                                                camera.lens.viewpoint)
         if self.transfer_function.grey_opacity is False:

diff -r 9896f03c418c10e8c94511c1b4b0df7b775080c6 -r 8043451eb47ab152a5666ab360fd20fefc01a8a2 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -131,12 +131,10 @@
         return
 
     def composite(self):
+        # TODO: Sam, does this look right?
         cam = self.default_camera
-        opaque = ZBuffer(
-            np.zeros([cam.resolution[0],
-                     cam.resolution[1],
-                     4]),
-            np.ones(cam.resolution) * np.inf)
+        empty = cam.lens.new_image(cam)
+        opaque = ZBuffer(empty, np.ones(empty.shape[:2]) * np.inf)
 
         for k, source in self.iter_opaque_sources():
             print "Adding opaque source:", source

diff -r 9896f03c418c10e8c94511c1b4b0df7b775080c6 -r 8043451eb47ab152a5666ab360fd20fefc01a8a2 yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -29,7 +29,12 @@
     def __add__(self, other):
         assert(self.shape == other.shape)
         f_or_b = self.z < other.z
-        rgba = (self.rgba.T * f_or_b).T + (other.rgba.T * (1 - f_or_b)).T
+        if self.z.shape[1] == 1:
+            # Non-rectangular
+            rgba = (self.rgba * f_or_b[:,None,:])
+            rgba += (other.rgba * (1.0 - f_or_b)[:,None,:])
+        else:
+            rgba = (self.rgba.T * f_or_b).T + (other.rgba.T * (1 - f_or_b)).T
         z = np.min([self.z, other.z], axis=0)
         return ZBuffer(rgba, z)
 


https://bitbucket.org/yt_analysis/yt/commits/7225395f6070/
Changeset:   7225395f6070
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-06 04:06:13+00:00
Summary:     Nearing the right fisheye projection.
Affected #:  1 file

diff -r 8043451eb47ab152a5666ab360fd20fefc01a8a2 -r 7225395f6070f6ca4e46e7877b61668858bced98 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -258,18 +258,21 @@
         # Basically, what we need is an inverse projection from the fisheye
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
-        theta = np.arccos(pos[:,2]) 
+        lpos = camera.position - pos
+        theta = np.arccos(lpos[:,2]) 
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
-        phi = np.arctan2(pos[:,1], pos[:,0])
-        px = r * np.cos(phi) + 0.0
-        py = r * np.sin(phi) + 0.0
+        phi = np.arctan2(lpos[:,1], lpos[:,0])
+        px = r * np.cos(phi)
+        py = r * np.sin(phi)
         u = camera.focus.uq
         # dz is distance the ray would travel
         dp = pos - camera.position
         dz = (dp * dp).sum(axis=1)**0.5 / self.radius
-        px = (u * np.rint(px * res[0])).astype("int64")
-        py = (u * np.rint(py * res[1])).astype("int64")
+        px = (px + 1.0) * res[0] / 2.0
+        py = (py + 1.0) * res[1] / 2.0
+        px = (u * np.rint(px)).astype("int64")
+        py = (u * np.rint(py)).astype("int64")
         return px, py, dz
 
 lenses = {'plane-parallel': PlaneParallelLens,


https://bitbucket.org/yt_analysis/yt/commits/f85dbaeabc54/
Changeset:   f85dbaeabc54
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-06 11:03:50+00:00
Summary:     Fisheye plane projection.
Affected #:  1 file

diff -r 7225395f6070f6ca4e46e7877b61668858bced98 -r f85dbaeabc54429b1a51a9f449c39701b306d34c yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -259,7 +259,9 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         lpos = camera.position - pos
-        theta = np.arccos(lpos[:,2]) 
+        mag = (lpos * lpos).sum(axis=1)**0.5
+        dz = mag / self.radius
+        theta = np.arccos(lpos[:,2] / mag) 
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
         phi = np.arctan2(lpos[:,1], lpos[:,0])
@@ -267,8 +269,6 @@
         py = r * np.sin(phi)
         u = camera.focus.uq
         # dz is distance the ray would travel
-        dp = pos - camera.position
-        dz = (dp * dp).sum(axis=1)**0.5 / self.radius
         px = (px + 1.0) * res[0] / 2.0
         py = (py + 1.0) * res[1] / 2.0
         px = (u * np.rint(px)).astype("int64")


https://bitbucket.org/yt_analysis/yt/commits/b29cafcf81d8/
Changeset:   b29cafcf81d8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-06 14:35:53+00:00
Summary:     Rotation matrices aren't set correctly for the fisheye, and speeding things up.
Affected #:  2 files

diff -r f85dbaeabc54429b1a51a9f449c39701b306d34c -r b29cafcf81d8858d9d9cf18c0b05e029820aaf49 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -19,7 +19,7 @@
 #cimport healpix_interface
 from libc.stdlib cimport malloc, free, abs
 from libc.math cimport exp, floor, log2, \
-    lrint, fabs, atan, asin, cos, sin, sqrt
+    lrint, fabs, atan, atan2, asin, cos, sin, sqrt
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
 from field_interpolation_tables cimport \
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
@@ -1091,22 +1091,16 @@
     cdef np.ndarray[np.float64_t, ndim=3] vp
     cdef int i, j, k
     cdef np.float64_t r, phi, theta, px, py
-    cdef np.float64_t pi = 3.1415926
-    cdef np.float64_t fov_rad = fov * pi / 180.0
+    cdef np.float64_t fov_rad = fov * np.pi / 180.0
     cdef int nx = resolution/nimx
     cdef int ny = resolution/nimy
     vp = np.zeros((nx,ny, 3), dtype="float64")
     for i in range(nx):
-        px = 2.0 * (nimi*nx + i) / (resolution) - 1.0
+        px = (2.0 * (nimi*nx + i)) / resolution - 1.0
         for j in range(ny):
-            py = 2.0 * (nimj*ny + j) / (resolution) - 1.0
+            py = (2.0 * (nimj*ny + j)) / resolution - 1.0
             r = (px*px + py*py)**0.5
-            if r == 0.0:
-                phi = 0.0
-            elif px < 0:
-                phi = pi - asin(py / r)
-            else:
-                phi = asin(py / r)
+            phi = atan2(py, px)
             theta = r * fov_rad / 2.0
             theta += off_theta
             phi += off_phi

diff -r f85dbaeabc54429b1a51a9f449c39701b306d34c -r b29cafcf81d8858d9d9cf18c0b05e029820aaf49 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -215,7 +215,7 @@
         return self.current_image
 
     def get_sampler_params(self, camera, render_source):
-        vp = arr_fisheye_vectors(camera.resolution[0], self.fov)
+        vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
         vp.shape = (camera.resolution[0]**2, 1, 3)
         vp2 = vp.copy()
         for i in range(3):
@@ -260,8 +260,9 @@
         # vector, and we need the reverse.
         lpos = camera.position - pos
         mag = (lpos * lpos).sum(axis=1)**0.5
+        lpos /= mag[:,None]
         dz = mag / self.radius
-        theta = np.arccos(lpos[:,2] / mag) 
+        theta = np.arccos(lpos[:,2])
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
         phi = np.arctan2(lpos[:,1], lpos[:,0])


https://bitbucket.org/yt_analysis/yt/commits/618c4642b08e/
Changeset:   618c4642b08e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-20 19:43:40+00:00
Summary:     Fixing project_to_plane with rotation matrix
Affected #:  1 file

diff -r b29cafcf81d8858d9d9cf18c0b05e029820aaf49 -r 618c4642b08e9ec2020cc741628be672553defe5 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -259,6 +259,9 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         lpos = camera.position - pos
+        lpos2 = lpos.copy()
+        for i in range(3):
+          lpos[:,i] = (lpos2 * self.rotation_matrix[i,:]).sum()
         mag = (lpos * lpos).sum(axis=1)**0.5
         lpos /= mag[:,None]
         dz = mag / self.radius


https://bitbucket.org/yt_analysis/yt/commits/833961c85ba5/
Changeset:   833961c85ba5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-20 19:58:43+00:00
Summary:     Re-enabling CTF in imports, fixing plane projection for fisheye.
Affected #:  2 files

diff -r 618c4642b08e9ec2020cc741628be672553defe5 -r 833961c85ba51310608a13c437603e6a3efd7790 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -144,7 +144,8 @@
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, ProfilePlot, PhasePlot
 
-#from yt.visualization.volume_rendering.api import \
+from yt.visualization.volume_rendering.api import \
+    ColorTransferFunction
 #    off_axis_projection, ColorTransferFunction, \
 #    TransferFunctionHelper
 

diff -r 618c4642b08e9ec2020cc741628be672553defe5 -r 833961c85ba51310608a13c437603e6a3efd7790 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -261,7 +261,7 @@
         lpos = camera.position - pos
         lpos2 = lpos.copy()
         for i in range(3):
-          lpos[:,i] = (lpos2 * self.rotation_matrix[i,:]).sum()
+          lpos[:,i] = (lpos2 * self.rotation_matrix[i,:]).sum(axis=1)
         mag = (lpos * lpos).sum(axis=1)**0.5
         lpos /= mag[:,None]
         dz = mag / self.radius


https://bitbucket.org/yt_analysis/yt/commits/117cf71eb7d8/
Changeset:   117cf71eb7d8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-21 20:25:32+00:00
Summary:     Change to using dot products.
Affected #:  1 file

diff -r 833961c85ba51310608a13c437603e6a3efd7790 -r 117cf71eb7d88248e4c3290809f32bb7ad6f4e49 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -217,10 +217,7 @@
     def get_sampler_params(self, camera, render_source):
         vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
         vp.shape = (camera.resolution[0]**2, 1, 3)
-        vp2 = vp.copy()
-        for i in range(3):
-            vp[:, :, i] = (vp2 * self.rotation_matrix[:, i]).sum(axis=2)
-        del vp2
+        vp = vp.dot(self.rotation_matrix)
         vp *= self.radius
         uv = np.ones(3, dtype='float64')
         positions = np.ones((camera.resolution[0]**2, 1, 3),
@@ -258,10 +255,11 @@
         # Basically, what we need is an inverse projection from the fisheye
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
+        # First, we transform lpos into *relative to the camera* coordinates.
         lpos = camera.position - pos
-        lpos2 = lpos.copy()
-        for i in range(3):
-          lpos[:,i] = (lpos2 * self.rotation_matrix[i,:]).sum(axis=1)
+        inv_mat = np.linalg.inv(self.rotation_matrix)
+        lpos = lpos.dot(self.rotation_matrix)
+        #lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
         lpos /= mag[:,None]
         dz = mag / self.radius


https://bitbucket.org/yt_analysis/yt/commits/6831f1590421/
Changeset:   6831f1590421
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-21 21:18:59+00:00
Summary:     One more fix.
Affected #:  1 file

diff -r 117cf71eb7d88248e4c3290809f32bb7ad6f4e49 -r 6831f159042134e19b1bf8b2cd045463df532c21 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -217,7 +217,7 @@
     def get_sampler_params(self, camera, render_source):
         vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
         vp.shape = (camera.resolution[0]**2, 1, 3)
-        vp = vp.dot(self.rotation_matrix)
+        vp = vp.dot(np.linalg.inv(self.rotation_matrix))
         vp *= self.radius
         uv = np.ones(3, dtype='float64')
         positions = np.ones((camera.resolution[0]**2, 1, 3),


https://bitbucket.org/yt_analysis/yt/commits/030712f22927/
Changeset:   030712f22927
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-17 21:28:16+00:00
Summary:     Adding better thread support
Affected #:  1 file

diff -r 6831f159042134e19b1bf8b2cd045463df532c21 -r 030712f229270477d6aba434397760f201ccf816 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -357,22 +357,25 @@
                 free(idata)
                 free(v_pos)
         else:
-            with nogil, parallel():
+            with nogil, parallel(num_threads = num_threads):
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 # If we do not have a simple image plane, we have to cast all
                 # our rays 
-                for j in prange(size, schedule="dynamic", chunksize=100):
+                every = <int> (size / 25.0)
+                for j in prange(size, schedule="static", chunksize=1):
                     offset = j * 3
                     for i in range(3): v_pos[i] = im.vp_pos[i + offset]
                     for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                    if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
+                        continue
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
                     for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
                     if im.zbuffer != NULL:
-                        max_t = im.zbuffer[j]
+                        max_t = fclip(im.zbuffer[j], 0.0, 1.0)
                     else:
                         max_t = 1.0
                     walk_volume(vc, v_pos, v_dir, self.sampler, 
@@ -1100,6 +1103,9 @@
         for j in range(ny):
             py = (2.0 * (nimj*ny + j)) / resolution - 1.0
             r = (px*px + py*py)**0.5
+            if r > 1.01:
+                vp[i,j,0] = vp[i,j,1] = vp[i,j,2] = 0.0
+                continue
             phi = atan2(py, px)
             theta = r * fov_rad / 2.0
             theta += off_theta


https://bitbucket.org/yt_analysis/yt/commits/058a3bab1c61/
Changeset:   058a3bab1c61
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-17 21:28:25+00:00
Summary:     Adding black to blueish.
Affected #:  1 file

diff -r 030712f229270477d6aba434397760f201ccf816 -r 058a3bab1c6188cf74db80b2106aef9f521dc515 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -99,6 +99,15 @@
 
 add_cmap('black_green', cdict)
 
+cdict = {'red':   ((0.0, 0.0, 0.0),
+                   (1.0, 0.2, 0.2)),
+         'green': ((0.0, 0.0, 0.0),
+                   (1.0, 0.2, 0.2)),
+         'blue':  ((0.0, 0.0, 0.0),
+                   (1.0, 1.0, 1.0))}
+
+add_cmap('black_blueish', cdict)
+
 # This one is a variant of a colormap commonly
 # used for X-ray observations by Maxim Markevitch
 


https://bitbucket.org/yt_analysis/yt/commits/33daa5b208b6/
Changeset:   33daa5b208b6
Branch:      yt-3.0
User:        samskillman
Date:        2014-10-14 13:53:41+00:00
Summary:     Fixing issue when zbuffer is not passed in
Affected #:  3 files

diff -r 291f78f26b65348d5c6a197f54b0b524b43df9d7 -r 33daa5b208b600875948e2f2c0da0522da0c6140 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -65,7 +65,7 @@
         (xmi, xma), (ymi, yma), (zmi, zma) = \
             data_source.quantities['Extrema'](['x', 'y', 'z'])
         width = np.sqrt((xma - xmi) ** 2 + (yma - ymi) ** 2 +
-                        (zma - zmi) ** 2) / np.sqrt(3)
+                        (zma - zmi) ** 2)
         focus = data_source.get_field_parameter('center')
 
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):

diff -r 291f78f26b65348d5c6a197f54b0b524b43df9d7 -r 33daa5b208b600875948e2f2c0da0522da0c6140 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -161,7 +161,12 @@
         mylog.debug("Done casting rays")
 
         self.current_image = self.finalize_image(camera, self.sampler.aimage)
-        self.zbuffer = ZBuffer(self.current_image, 0.0*zbuffer.z)
+        if zbuffer is None:
+            self.zbuffer = ZBuffer(self.current_image,
+                                   np.zeros(self.current_image.shape[:2]))
+        else:
+            self.zbuffer = ZBuffer(self.current_image, 0.0*zbuffer.z)
+
         return self.current_image
 
     def finalize_image(self, camera, image):

diff -r 291f78f26b65348d5c6a197f54b0b524b43df9d7 -r 33daa5b208b600875948e2f2c0da0522da0c6140 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -146,7 +146,7 @@
         for k, source in self.iter_transparent_sources():
             print "Adding transparent source:", source
             print opaque.z.min(), opaque.z.max()
-            print opaque.rgba[:,:,:3].max()
+            print opaque.rgba[:, :, :3].max()
             im = source.render(cam, zbuffer=opaque)
             #opaque = opaque + source.zbuffer
         return im
@@ -164,7 +164,7 @@
         return handle
 
 
-def volume_render(data_source, field=None, fname=None):
+def volume_render(data_source, field=None, fname=None, clip_ratio=None):
     data_source = data_source_or_all(data_source)
     sc = Scene()
     if field is None:
@@ -176,4 +176,4 @@
     cam = Camera(data_source)
     sc.set_default_camera(cam)
     sc.add_source(vol)
-    return sc.render(fname=fname), sc
+    return sc.render(fname=fname, clip_ratio=clip_ratio), sc


https://bitbucket.org/yt_analysis/yt/commits/f70baad06843/
Changeset:   f70baad06843
Branch:      yt-3.0
User:        samskillman
Date:        2014-10-14 14:12:05+00:00
Summary:     Adding docstrings for volume_rendering, new test for simple volume rendering.
Affected #:  2 files

diff -r 33daa5b208b600875948e2f2c0da0522da0c6140 -r f70baad0684369338432bdbfbf0883d7d0ced67d yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -165,6 +165,45 @@
 
 
 def volume_render(data_source, field=None, fname=None, clip_ratio=None):
+    r""" Create a simple volume rendering of a data source.
+
+    A helper function that creates a default camera view, transfer
+    function, and image size. Using these, it returns and image and
+    an instance of the Scene class, allowing one to further modify
+    their rendering.
+
+    Parameters
+    ----------
+    data_source : :class:`yt.data_objects.data_containers.AMR3DData`
+        This is the source to be rendered, which can be any arbitrary yt
+    field: string, tuple, optional
+        The field to be rendered. By default, this will use the first
+        field in data_source.ds.field_list.  A default transfer function
+        will be built that spans the range of values for that given field,
+        and the field will be logarithmically scaled if the field_info
+        object specifies as such.
+    fname: string, optional
+        If specified, the resulting rendering will be saved to this filename
+        in png format.
+    clip_ratio: float, optional
+        If specified, the resulting image will be clipped before saving,
+        using a threshold based on clip_ratio multiplied by the standard
+        deviation of the pixel values. Recommended values are between 2 and 6.
+
+    Returns
+    -------
+    im: ImageArray
+        The resulting image, stored as an ImageArray object.
+    sc: Scene
+        A :class:`yt.visualization.volume_rendering.scene.Scene` object
+        that was constructed during the rendering. Useful for further
+        modifications, rotations, etc.
+
+    Example:
+    >>> import yt
+    >>> ds = yt.load("Enzo_64/DD0046/DD0046")
+    >>> im, sc = yt.volume_render(ds, fname='test.png', clip_ratio=4.0)
+    """
     data_source = data_source_or_all(data_source)
     sc = Scene()
     if field is None:
@@ -176,4 +215,5 @@
     cam = Camera(data_source)
     sc.set_default_camera(cam)
     sc.add_source(vol)
-    return sc.render(fname=fname, clip_ratio=clip_ratio), sc
+    im = sc.render(fname=fname, clip_ratio=clip_ratio)
+    return im, sc

diff -r 33daa5b208b600875948e2f2c0da0522da0c6140 -r f70baad0684369338432bdbfbf0883d7d0ced67d yt/visualization/volume_rendering/tests/test_simple_vr.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py
@@ -0,0 +1,19 @@
+"""
+Test Simple Volume Rendering Scene
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.mods import *
+from yt.testing import \
+    fake_random_pf
+from yt.visualization.volume_rendering.scene import volume_render
+
+ds = fake_random_pf(32)
+im, sc = volume_render(ds, fname='test.png', clip_ratio=4.0)


https://bitbucket.org/yt_analysis/yt/commits/e454f8bb5a83/
Changeset:   e454f8bb5a83
Branch:      yt-3.0
User:        samskillman
Date:        2014-10-14 15:20:35+00:00
Summary:     Fixing perspective rendering, some units issues
Affected #:  3 files

diff -r f70baad0684369338432bdbfbf0883d7d0ced67d -r e454f8bb5a835f0f3b818179176971bd993191c4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -42,7 +42,7 @@
         self.set_lens(lens_type)
         if data_source is not None:
             data_source = data_source_or_all(data_source)
-            self.set_defaults_from_data_source(data_source)
+        self.set_defaults_from_data_source(data_source)
 
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=False)
@@ -71,9 +71,11 @@
         if iterable(width) and len(width) > 1 and isinstance(width[1], str):
             width = data_source.pf.quan(width[0], input_units=width[1])
             # Now convert back to code length for subsequent manipulation
-            width = width.in_units("code_length").value
+            width = width.in_units("code_length")  # .value
         if not iterable(width):
-            width = (width, width, width)  # left/right, top/bottom, front/back
+            width = data_source.pf.arr([width, width, width],
+                                       input_units='code_length')
+            # left/right, top/bottom, front/back
         if not isinstance(width, YTArray):
             width = data_source.pf.arr(width, input_units="code_length")
         if not isinstance(focus, YTArray):
@@ -88,7 +90,7 @@
 
     def set_width(self, width):
         if not iterable(width):
-            width = [width, width, width] # No way to get code units.
+            width = np.array([width, width, width])  # Can't get code units.
         self.width = width
         self.switch_orientation()
 

diff -r f70baad0684369338432bdbfbf0883d7d0ced67d -r e454f8bb5a835f0f3b818179176971bd993191c4 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -118,6 +118,7 @@
         px = (res[1]*(dy/camera.width[1].d)).astype('int')
         return px, py, dz
 
+
 class PerspectiveLens(Lens):
 
     """docstring for PerspectiveLens"""
@@ -133,7 +134,7 @@
             info={'imtype': 'rendering'})
         return self.current_image
 
-    def get_sampler_params(self, camera):
+    def get_sampler_params(self, camera, render_source):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
         # We might have a different width and back_center
@@ -206,6 +207,7 @@
     def setup_box_properties(self, camera):
         self.radius = camera.width.max()
         super(FisheyeLens, self).setup_box_properties(camera)
+        self.set_viewpoint(camera)
 
     def new_image(self, camera):
         self.current_image = ImageArray(
@@ -248,7 +250,7 @@
         """
         For a PerspectiveLens, the viewpoint is the front center.
         """
-        self.viewpoint = self.center
+        self.viewpoint = camera.position
 
 
 lenses = {'plane-parallel': PlaneParallelLens,

diff -r f70baad0684369338432bdbfbf0883d7d0ced67d -r e454f8bb5a835f0f3b818179176971bd993191c4 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -21,35 +21,39 @@
 from time import time
 
 #pf = fake_random_pf(8)
-#w = (pf.domain_width[0]*30).in_units('code_length')
-#sc = Scene()
-#cam = Camera(pf, lens_type='perspective')
-#print "WIDTH: ", w
-#cam.set_width(w)
-#vol = VolumeSource(pf, field=('gas', 'density'))
-#sc.set_default_camera(cam)
-#sc.add_source(vol)
-#
-#t = -time()
-#sc.render('test_perspective.png', clip_ratio=None)
-#t += time()
-#print 'Total time: %e' % t
+pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
+w = (pf.domain_width*30).in_units('code_length')
+w = pf.arr(w, 'code_length')
+sc = Scene()
+cam = Camera(pf, lens_type='perspective')
+print "WIDTH: ", w
+cam.set_width(w)
+vol = VolumeSource(pf, field=('gas', 'density'))
+sc.set_default_camera(cam)
+sc.add_source(vol)
+
+t = -time()
+sc.render('test_perspective.png', clip_ratio=None)
+t += time()
+print 'Total time: %e' % t
 
 pf = load('/home/skillman/kipac/data/IsolatedGalaxy/galaxy0030/galaxy0030')
 ds = pf.h.sphere(pf.domain_center, pf.domain_width[0] / 100)
 sc = Scene()
 cam = Camera(pf, lens_type='fisheye')
-cam.lens.fov=180.0
-cam.resolution=(512,512)
-cam.set_width(1.0)
-v,c = pf.find_max('density')
+cam.lens.fov = 180.0
+cam.resolution = (512,512)
+cam.set_width(pf.domain_width)
+v, c = pf.find_max('density')
+c = pf.arr(c, 'code_length')
 p = pf.domain_center.copy()
-cam.set_position(c)
+cam.set_position(c-0.05*pf.domain_width)
 #pf.field_info[('gas','density')].take_log=False
 vol = VolumeSource(pf, field=('gas', 'density'))
 tf = vol.transfer_function
-tf.grey_opacity=True
-#tf.map_to_colormap(tf.x_bounds[0], tf.x_bounds[1], scale=3000.0, colormap='RdBu')
+tf.grey_opacity = True
+# tf.map_to_colormap(tf.x_bounds[0], tf.x_bounds[1], scale=3000.0, 
+#                    colormap='RdBu')
 sc.set_default_camera(cam)
 sc.add_source(vol)
 


https://bitbucket.org/yt_analysis/yt/commits/96630969d06d/
Changeset:   96630969d06d
Branch:      yt-3.0
User:        samskillman
Date:        2014-10-14 15:23:57+00:00
Summary:     Adding some notes to the vr dir
Affected #:  1 file

diff -r e454f8bb5a835f0f3b818179176971bd993191c4 -r 96630969d06d79c5ffe2937606fbe83426c8dc50 yt/visualization/volume_rendering/notes.md
--- /dev/null
+++ b/yt/visualization/volume_rendering/notes.md
@@ -0,0 +1,39 @@
+
+Overview of Volume Rendering
+============================
+
+In 3.0, we have moved away from the "god class" that was Camera, and have
+attempted to break down the VR system into a hierarchy of classes.  So far
+we are at:
+
+1. Scene 
+2. Camera
+3. Lens 
+4. Source
+
+For now, a scene only has one camera, i.e. one viewpoint. I would like this to be
+extended to multiple cameras at some point, but not in this pass.
+
+A Camera can have many lenses. When taking a snapshot, the Camera will loop 
+over the lenses that have been added by the user.  We should come up with a
+naming convention and storage system.
+
+
+A Lens defines how the vectors are oriented pointing outward from the camera
+position.  Plane-parallel, Perspective, Fisheye are the first set that need to
+be implemented. As much of the Lens as possible will be set up using defaults 
+derived from the scene, such as the width/depth/etc.
+
+A Source is a data source with intent on how to visualize it.  For example, a
+VolumeSource should be treated volumetrically, with a transfer function defined
+for a given field or set of fields.  A generic OpaqueSource should define
+a method for pixelizing a ZBuffer object, carrying information about both the
+color and depth of the surface/streamline/annotation. These will be used for
+compositing later.
+
+
+sc = Scene(data_source)
+cam = sc.add_camera(cam) // triggers cam.set_defaults_from_data_source(data_source)
+lens = PlaneParallelLens()
+cam.set_lens(lens) # This sets up lens based on camera.
+

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list