[yt-svn] commit/yt: 11 new changesets

Bitbucket commits-noreply at bitbucket.org
Sun May 6 10:27:28 PDT 2012


11 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/b4877489f654/
changeset:   b4877489f654
branch:      yt
user:        ngoldbaum
date:        2012-05-03 03:39:58
summary:     First stab at a general orientation interface.  I've commented out all of the
cameras except for the Camera for debugging purposes.
affected #:  5 files

diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1158,7 +1158,7 @@
     _type_name = "cutting"
     _con_args = ('normal', 'center')
     def __init__(self, normal, center, fields = None, node_name = None,
-                 **kwargs):
+                 north_vector = None, **kwargs):
         """
         This is a data object corresponding to an oblique slice through the
         simulation domain.


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -120,7 +120,7 @@
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera, off_axis_projection, MosaicFisheyeCamera
+    HomogenizedVolume, Camera, off_axis_projection#, MosaicFisheyeCamera
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -619,3 +619,16 @@
         # and check, use out array.
         result.append(na.mean(sorted[indexer], axis=axis, out=out))
     return na.array(result)
+
+def get_rotation_matrix(self, theta, rot_vector):
+    ux = rot_vector[0]
+    uy = rot_vector[1]
+    uz = rot_vector[2]
+    cost = na.cos(theta)
+    sint = na.sin(theta)
+    
+    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+                  [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
+                  [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+    
+    return R


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -39,5 +39,7 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
-from camera import Camera, PerspectiveCamera, StereoPairCamera, \
-    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera
+from camera import Camera, off_axis_projection
+
+#from camera import Camera, PerspectiveCamera, StereoPairCamera, \
+#    off_axis_projection, FisheyeCamera, MosaicFisheyeCamera


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -33,12 +33,13 @@
 from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
     arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
     arr_ang2pix_nest, arr_fisheye_vectors, rotate_vectors
+from yt.utilities.math_utils import get_rotation_matrix
+from yt.utilities.orientation import Orientation
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from numpy import pi
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -48,8 +49,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         r"""A viewpoint into a volume, for volume rendering.
 
         The camera represents the eye of an observer, which will be used to
@@ -74,7 +74,7 @@
             Boolean to control whether to normalize the north_vector
             by subtracting off the dot product of it and the normal
             vector.  Makes it easier to do rotations along a single
-            axis.  If north_vector is specifies, is switched to
+            axis.  If north_vector is specified, is switched to
             True. Default: False
         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
             The volume to ray cast through.  Can be specified for finer-grained
@@ -136,12 +136,6 @@
             prone to longer data IO times.  If all the data can fit in
             memory on each cpu, this can be the fastest option for
             multiple ray casts on the same dataset.
-        expand_factor: float, optional
-            A parameter to be used with the PerspectiveCamera.
-            Controls how much larger a volume to render, which is
-            currently difficult to gauge for the PerspectiveCamera.
-            For full box renders, values in the 2.0-3.0 range seem to
-            produce desirable results. Default: 1.0
         le: array_like, optional
             Specifies the left edge of the volume to be rendered.
             Currently only works with use_kd=True.
@@ -188,23 +182,13 @@
         self.sub_samples = sub_samples
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
-        self.width = width
-        self.center = center
-        self.steady_north = steady_north
-        self.expand_factor = expand_factor
-        # This seems to be necessary for now.  Not sure what goes wrong when not true.
-        if na.all(north_vector == normal_vector):
-            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
-            north_vector == None
-        if north_vector is not None: self.steady_north=True
-        self.north_vector = north_vector
-        self.rotation_vector = north_vector
+        self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
+        self._setup_box_properties(width, center, orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
-        self._setup_normalized_vectors(normal_vector, north_vector)
         self.log_fields = log_fields
         self.use_kd = use_kd
         self.l_max = l_max
@@ -223,40 +207,21 @@
             self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def _setup_normalized_vectors(self, normal_vector, north_vector):
-        # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            if self.rotation_vector is None:
-                self.rotation_vector=north_vector
-        else:
-            if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.normal_vector = normal_vector
-        self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
-                                  - 0.5*self.width[1]*self.unit_vectors[1] \
-                                  - 0.5*self.width[2]*self.unit_vectors[2]
-        self.back_center = self.center - 0.5*self.width[2]*self.unit_vectors[2]
-        self.front_center = self.center + 0.5*self.width[2]*self.unit_vectors[2]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+    def _setup_box_properties(self, width, center, unit_vectors):
+        self.width = width
+        self.center = center
+        self.box_vectors = na.array([unit_vectors[0]*width[0],
+                                     unit_vectors[1]*width[1],
+                                     unit_vectors[2]*width[2]])
+        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.back_center =  center - 0.5*width[0]*unit_vectors[2]
+        self.front_center = center + 0.5*width[0]*unit_vectors[2]         
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera to point at a new location.
+        This will recalculate all the necessary vectors and vector planes to orient
+        the image plane so that it points at a new location.
 
         Parameters
         ----------
@@ -267,14 +232,13 @@
             The "up" direction for the plane of rays.  If not specific,
             calculated automatically.
         """
-        normal_vector = self.front_center - new_center
-        self._setup_normalized_vectors(normal_vector, north_vector)
+        self.orienter.look_at(new_center, north_vector = north_vector)
 
     def switch_view(self, normal_vector=None, width=None, center=None, north_vector=None):
-        r"""Change the view direction based on any of the view parameters.
+        r"""Change the view based on any of the view parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera with new normal vectors, widths, centers, or north vectors.
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
 
         Parameters
         ----------
@@ -297,11 +261,13 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.north_vector
+            north_vector = self.orienter.north_vector
         if normal_vector is None:
-            normal_vector = self.front_center-self.center
-        self._setup_normalized_vectors(normal_vector, north_vector)
-        
+            normal_vector = self.front_cemter - self.center
+        self.orienter.switch_orientation(normal_vector = normal_vector, center = center,
+                                         north_vector = north_vector)
+        self._setup_box_properties(width, center, orienter.unit_vectors)
+
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -311,7 +277,6 @@
         py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
         inv_mat = self.inv_mat
-        bc = self.back_center
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -320,8 +285,8 @@
         bounds = (px.min(), px.max(), py.min(), py.max())
         vector_plane = VectorPlane(positions, self.box_vectors[2],
                                       self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
         return vector_plane
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False):
@@ -393,8 +358,7 @@
 
         """
         self.width = [w / factor for w in self.width]
-        self._setup_normalized_vectors(
-                self.unit_vectors[2], self.unit_vectors[1])
+        self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
 
     def zoomin(self, final, n_steps, clip_ratio = None):
         r"""Loop over a zoomin and return snapshots along the way.
@@ -506,15 +470,7 @@
         if rot_vector is None:
             rot_vector = self.rotation_vector
             
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+        R = get_rotation_matrix(self, theta, rot_vector)
 
         normal_vector = self.front_center-self.center
 
@@ -555,718 +511,722 @@
 
 data_object_registry["camera"] = Camera
 
-class InteractiveCamera(Camera):
-    def __init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector = None, steady_north=False,
-                 volume = None, fields = None,
-                 log_fields = None,
-                 sub_samples = 5, pf = None,
-                 use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
-        self.frames = []
-        Camera.__init__(self, center, normal_vector, width,
-                 resolution, transfer_function,
-                 north_vector = north_vector, steady_north=steady_north,
-                 volume = volume, fields = fields,
-                 log_fields = log_fields,
-                 sub_samples = sub_samples, pf = pf,
-                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type,expand_factor=expand_factor,
-                 le=le, re=re)
+# class InteractiveCamera(Camera):
+#     def __init__(self, center, normal_vector, width,
+#                  resolution, transfer_function,
+#                  north_vector = None, steady_north=False,
+#                  volume = None, fields = None,
+#                  log_fields = None,
+#                  sub_samples = 5, pf = None,
+#                  use_kd=True, l_max=None, no_ghost=True,
+#                  tree_type='domain',le=None, re=None):
+#         self.frames = []
+#         Camera.__init__(self, center, normal_vector, width,
+#                  resolution, transfer_function,
+#                  north_vector = north_vector, steady_north=steady_north,
+#                  volume = volume, fields = fields,
+#                  log_fields = log_fields,
+#                  sub_samples = sub_samples, pf = pf,
+#                  use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+#                  tree_type=tree_type,le=le, re=re)
 
-    def snapshot(self, fn = None, clip_ratio = None):
-        import matplotlib
-        matplotlib.pylab.figure(2)
-        self.transfer_function.show()
-        matplotlib.pylab.draw()
-        im = Camera.snapshot(self, fn, clip_ratio)
-        matplotlib.pylab.figure(1)
-        matplotlib.pylab.imshow(im/im.max())
-        matplotlib.pylab.draw()
-        self.frames.append(im)
+#     def snapshot(self, fn = None, clip_ratio = None):
+#         import matplotlib
+#         matplotlib.pylab.figure(2)
+#         self.transfer_function.show()
+#         matplotlib.pylab.draw()
+#         im = Camera.snapshot(self, fn, clip_ratio)
+#         matplotlib.pylab.figure(1)
+#         matplotlib.pylab.imshow(im/im.max())
+#         matplotlib.pylab.draw()
+#         self.frames.append(im)
         
-    def rotation(self, theta, n_steps, rot_vector=None):
-        for frame in Camera.rotation(self, theta, n_steps, rot_vector):
-            if frame is not None:
-                self.frames.append(frame)
+#     def rotation(self, theta, n_steps, rot_vector=None):
+#         for frame in Camera.rotation(self, theta, n_steps, rot_vector):
+#             if frame is not None:
+#                 self.frames.append(frame)
                 
-    def zoomin(self, final, n_steps):
-        for frame in Camera.zoomin(self, final, n_steps):
-            if frame is not None:
-                self.frames.append(frame)
+#     def zoomin(self, final, n_steps):
+#         for frame in Camera.zoomin(self, final, n_steps):
+#             if frame is not None:
+#                 self.frames.append(frame)
                 
-    def clear_frames(self):
-        del self.frames
-        self.frames = []
+#     def clear_frames(self):
+#         del self.frames
+#         self.frames = []
         
-    def save_frames(self, basename, clip_ratio=None):
-        for i, frame in enumerate(self.frames):
-            fn = basename + '_%04i.png'%i
-            if clip_ratio is not None:
-                write_bitmap(frame, fn, clip_ratio*image.std())
-            else:
-                write_bitmap(frame, fn)
+#     def save_frames(self, basename, clip_ratio=None):
+#         for i, frame in enumerate(self.frames):
+#             fn = basename + '_%04i.png'%i
+#             if clip_ratio is not None:
+#                 write_bitmap(frame, fn, clip_ratio*image.std())
+#             else:
+#                 write_bitmap(frame, fn)
 
-data_object_registry["interactive_camera"] = InteractiveCamera
+# data_object_registry["interactive_camera"] = InteractiveCamera
 
-class PerspectiveCamera(Camera):
-    def get_vector_plane(self, image):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
-        dl = (self.back_center - self.front_center)
-        self.front_center += dl
-        self.back_center -= dl
-        px = self.expand_factor*na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
-                         self.resolution[0])[:,None]
-        py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
-                         self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
-        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
-                          dtype='float64', order='C')
-        positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
-        positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
-        positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
+# class PerspectiveCamera(Camera):
+#     def __init__(self, center, normal_vector, width,
+#                  resolution, transfer_function,
+#                  north_vector = None, steady_north=False,
+#                  volume = None, fields = None,
+#                  log_fields = None,
+#                  sub_samples = 5, pf = None,
+#                  use_kd=True, l_max=None, no_ghost=True,
+#                  tree_type='domain', expand_factor = 1.0,
+#                  le=None, re=None):
+#         self.expand_factor = 1.0
+#         Camera.__init__(self, center, normal_vector, width,
+#                  resolution, transfer_function,
+#                  north_vector = None, steady_north=False,
+#                  volume = None, fields = None,
+#                  log_fields = None,
+#                  sub_samples = 5, pf = None,
+#                  use_kd=True, l_max=None, no_ghost=True,
+#                  tree_type='domain', le=None, re=None)
         
-        # We are likely adding on an odd cutting condition here
-        vectors = self.front_center - positions
-        positions = self.front_center - 2.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
-        vectors = (self.front_center - positions)
 
-        vector_plane = VectorPlane(positions, vectors,
-                                      self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
-        return vector_plane
+#     def get_vector_plane(self, image):
+#         # We should move away from pre-generation of vectors like this and into
+#         # the usage of on-the-fly generation in the VolumeIntegrator module
+#         # We might have a different width and back_center
+#         dl = (self.back_center - self.front_center)
+#         self.front_center += dl
+#         self.back_center -= dl
+#         px = self.expand_factor*na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+#                          self.resolution[0])[:,None]
+#         py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+#                          self.resolution[1])[None,:]
+#         inv_mat = self.inv_mat
+#         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+#                           dtype='float64', order='C')
+#         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
+#         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
+#         positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
+#         bounds = (px.min(), px.max(), py.min(), py.max())
+        
+#         # We are likely adding on an odd cutting condition here
+#         vectors = self.front_center - positions
+#         positions = self.front_center - 2.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
+#         vectors = (self.front_center - positions)
 
-def corners(left_edge, right_edge):
-    return na.array([
-      [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
-      [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
-      [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
-      [right_edge[:,0], right_edge[:,1], right_edge[:,2]],
-      [left_edge[:,0], right_edge[:,1], right_edge[:,2]],
-      [left_edge[:,0], left_edge[:,1], right_edge[:,2]],
-      [right_edge[:,0], left_edge[:,1], right_edge[:,2]],
-      [left_edge[:,0], right_edge[:,1], left_edge[:,2]],
-    ], dtype='float64')
+#         vector_plane = VectorPlane(positions, vectors,
+#                                       self.back_center, bounds, image,
+#                                       self.unit_vectors[0],
+#                                       self.unit_vectors[1])
+#         return vector_plane
 
-class HEALpixCamera(Camera):
-    def __init__(self, center, radius, nside,
-                 transfer_function = None, fields = None,
-                 sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, use_kd=True, no_ghost=False):
-        ParallelAnalysisInterface.__init__(self)
-        if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
-        self.radius = radius
-        self.nside = nside
-        self.use_kd = use_kd
-        if transfer_function is None:
-            transfer_function = ProjectionTransferFunction()
-        self.transfer_function = transfer_function
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.sub_samples = sub_samples
-        self.log_fields = log_fields
-        if volume is None:
-            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields)
-        self.use_kd = isinstance(volume, AMRKDTree)
-        self.volume = volume
+# def corners(left_edge, right_edge):
+#     return na.array([
+#       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
+#       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
+#       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
+#       [right_edge[:,0], right_edge[:,1], right_edge[:,2]],
+#       [left_edge[:,0], right_edge[:,1], right_edge[:,2]],
+#       [left_edge[:,0], left_edge[:,1], right_edge[:,2]],
+#       [right_edge[:,0], left_edge[:,1], right_edge[:,2]],
+#       [left_edge[:,0], right_edge[:,1], left_edge[:,2]],
+#     ], dtype='float64')
 
-    def snapshot(self, fn = None, clim = None):
-        nv = 12*self.nside**2
-        image = na.zeros((nv,1,3), dtype='float64', order='C')
-        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
-        vs *= self.radius
-        vs.shape = (nv,1,3)
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
-        vector_plane = VectorPlane(positions, vs, self.center,
-                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Rendering equivalent of %0.2f^2 image", nv**0.5)
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+# class HEALpixCamera(Camera):
+#     def __init__(self, center, radius, nside,
+#                  transfer_function = None, fields = None,
+#                  sub_samples = 5, log_fields = None, volume = None,
+#                  pf = None, use_kd=True, no_ghost=False):
+#         ParallelAnalysisInterface.__init__(self)
+#         if pf is not None: self.pf = pf
+#         self.center = na.array(center, dtype='float64')
+#         self.radius = radius
+#         self.nside = nside
+#         self.use_kd = use_kd
+#         if transfer_function is None:
+#             transfer_function = ProjectionTransferFunction()
+#         self.transfer_function = transfer_function
+#         if fields is None: fields = ["Density"]
+#         self.fields = fields
+#         self.sub_samples = sub_samples
+#         self.log_fields = log_fields
+#         if volume is None:
+#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+#                                log_fields=log_fields)
+#         self.use_kd = isinstance(volume, AMRKDTree)
+#         self.volume = volume
 
-        total_cells = 0
-        for brick in self.volume.traverse(None, self.center, image):
-            brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
+#     def snapshot(self, fn = None, clim = None):
+#         nv = 12*self.nside**2
+#         image = na.zeros((nv,1,3), dtype='float64', order='C')
+#         vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+#         vs *= self.radius
+#         vs.shape = (nv,1,3)
+#         uv = na.ones(3, dtype='float64')
+#         positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+#         vector_plane = VectorPlane(positions, vs, self.center,
+#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+#         tfp = TransferFunctionProxy(self.transfer_function)
+#         tfp.ns = self.sub_samples
+#         self.volume.initialize_source()
+#         mylog.info("Rendering equivalent of %0.2f^2 image", nv**0.5)
+#         pbar = get_pbar("Ray casting",
+#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-        if self.comm.rank is 0 and fn is not None:
-            # This assumes Density; this is a relatively safe assumption.
-            import matplotlib.figure
-            import matplotlib.backends.backend_agg
-            phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
-            pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
-            image *= self.radius * self.pf['cm']
-            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+#         total_cells = 0
+#         for brick in self.volume.traverse(None, self.center, image):
+#             brick.cast_plane(tfp, vector_plane)
+#             total_cells += na.prod(brick.my_data[0].shape)
+#             pbar.update(total_cells)
+#         pbar.finish()
 
-            fig = matplotlib.figure.Figure((10, 5))
-            ax = fig.add_subplot(1,1,1,projection='mollweide')
-            implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
-            cb = fig.colorbar(implot, orientation='horizontal')
-            cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
-            if clim is not None: cb.set_clim(*clim)
-            ax.xaxis.set_ticks(())
-            ax.yaxis.set_ticks(())
-            canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
-            canvas.print_figure(fn)
-        return image
+#         if self.comm.rank is 0 and fn is not None:
+#             # This assumes Density; this is a relatively safe assumption.
+#             import matplotlib.figure
+#             import matplotlib.backends.backend_agg
+#             phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
+#             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
+#             image *= self.radius * self.pf['cm']
+#             img = na.log10(image[:,0,0][pixi]).reshape((800,800))
 
+#             fig = matplotlib.figure.Figure((10, 5))
+#             ax = fig.add_subplot(1,1,1,projection='mollweide')
+#             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+#             cb = fig.colorbar(implot, orientation='horizontal')
+#             cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+#             if clim is not None: cb.set_clim(*clim)
+#             ax.xaxis.set_ticks(())
+#             ax.yaxis.set_ticks(())
+#             canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
+#             canvas.print_figure(fn)
+#         return image
 
-class AdaptiveHEALpixCamera(Camera):
-    def __init__(self, center, radius, nside,
-                 transfer_function = None, fields = None,
-                 sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, use_kd=True, no_ghost=False,
-                 rays_per_cell = 0.1, max_nside = 8192):
-        ParallelAnalysisInterface.__init__(self)
-        if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
-        self.radius = radius
-        self.use_kd = use_kd
-        if transfer_function is None:
-            transfer_function = ProjectionTransferFunction()
-        self.transfer_function = transfer_function
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.sub_samples = sub_samples
-        self.log_fields = log_fields
-        if volume is None:
-            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields)
-        self.use_kd = isinstance(volume, AMRKDTree)
-        self.volume = volume
-        self.initial_nside = nside
-        self.rays_per_cell = rays_per_cell
-        self.max_nside = max_nside
 
-    def snapshot(self, fn = None):
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Adaptively rendering.")
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
-        total_cells = 0
-        bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-        left_edges = na.array([b.LeftEdge for b in bricks])
-        right_edges = na.array([b.RightEdge for b in bricks])
-        min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
-                     for b in bricks))
-        # We jitter a bit if we're on a boundary of our initial grid
-        for i in range(3):
-            if bricks[0].LeftEdge[i] == self.center[i]:
-                self.center += 1e-2 * min_dx
-            elif bricks[0].RightEdge[i] == self.center[i]:
-                self.center -= 1e-2 * min_dx
-        ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
-                                       self.initial_nside, self.radius,
-                                       bricks, left_edges, right_edges, self.max_nside)
-        for i,brick in enumerate(bricks):
-            ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
-                                       bricks)
-            total_cells += na.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        info, values = ray_source.get_rays()
-        return info, values
+# class AdaptiveHEALpixCamera(Camera):
+#     def __init__(self, center, radius, nside,
+#                  transfer_function = None, fields = None,
+#                  sub_samples = 5, log_fields = None, volume = None,
+#                  pf = None, use_kd=True, no_ghost=False,
+#                  rays_per_cell = 0.1, max_nside = 8192):
+#         ParallelAnalysisInterface.__init__(self)
+#         if pf is not None: self.pf = pf
+#         self.center = na.array(center, dtype='float64')
+#         self.radius = radius
+#         self.use_kd = use_kd
+#         if transfer_function is None:
+#             transfer_function = ProjectionTransferFunction()
+#         self.transfer_function = transfer_function
+#         if fields is None: fields = ["Density"]
+#         self.fields = fields
+#         self.sub_samples = sub_samples
+#         self.log_fields = log_fields
+#         if volume is None:
+#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+#                                log_fields=log_fields)
+#         self.use_kd = isinstance(volume, AMRKDTree)
+#         self.volume = volume
+#         self.initial_nside = nside
+#         self.rays_per_cell = rays_per_cell
+#         self.max_nside = max_nside
 
-class StereoPairCamera(Camera):
-    def __init__(self, original_camera, relative_separation = 0.005):
-        ParallelAnalysisInterface.__init__(self)
-        self.original_camera = original_camera
-        self.relative_separation = relative_separation
+#     def snapshot(self, fn = None):
+#         tfp = TransferFunctionProxy(self.transfer_function)
+#         tfp.ns = self.sub_samples
+#         self.volume.initialize_source()
+#         mylog.info("Adaptively rendering.")
+#         pbar = get_pbar("Ray casting",
+#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+#         total_cells = 0
+#         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
+#         left_edges = na.array([b.LeftEdge for b in bricks])
+#         right_edges = na.array([b.RightEdge for b in bricks])
+#         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
+#                      for b in bricks))
+#         # We jitter a bit if we're on a boundary of our initial grid
+#         for i in range(3):
+#             if bricks[0].LeftEdge[i] == self.center[i]:
+#                 self.center += 1e-2 * min_dx
+#             elif bricks[0].RightEdge[i] == self.center[i]:
+#                 self.center -= 1e-2 * min_dx
+#         ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
+#                                        self.initial_nside, self.radius,
+#                                        bricks, left_edges, right_edges, self.max_nside)
+#         for i,brick in enumerate(bricks):
+#             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
+#                                        bricks)
+#             total_cells += na.prod(brick.my_data[0].shape)
+#             pbar.update(total_cells)
+#         pbar.finish()
+#         info, values = ray_source.get_rays()
+#         return info, values
 
-    def split(self):
-        oc = self.original_camera
-        uv = oc.unit_vectors
-        c = oc.center
-        fc = oc.front_center
-        wx, wy, wz = oc.width
-        left_normal = fc + uv[1] * 0.5*self.relative_separation * wx - c
-        right_normal = fc - uv[1] * 0.5*self.relative_separation * wx - c
-        left_camera = Camera(c, left_normal, oc.width,
-                             oc.resolution, oc.transfer_function, uv[0],
-                             oc.volume, oc.fields, oc.log_fields,
-                             oc.sub_samples, oc.pf)
-        right_camera = Camera(c, right_normal, oc.width,
-                             oc.resolution, oc.transfer_function, uv[0],
-                             oc.volume, oc.fields, oc.log_fields,
-                             oc.sub_samples, oc.pf)
-        return (left_camera, right_camera)
+# class StereoPairCamera(Camera):
+#     def __init__(self, original_camera, relative_separation = 0.005):
+#         ParallelAnalysisInterface.__init__(self)
+#         self.original_camera = original_camera
+#         self.relative_separation = relative_separation
 
-class FisheyeCamera(Camera):
-    def __init__(self, center, radius, fov, resolution,
-                 transfer_function = None, fields = None,
-                 sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, no_ghost=False, rotation = None):
-        ParallelAnalysisInterface.__init__(self)
-        if rotation is None: rotation = na.eye(3)
-        self.rotation_matrix = rotation
-        if pf is not None: self.pf = pf
-        self.center = na.array(center, dtype='float64')
-        self.radius = radius
-        self.fov = fov
-        if iterable(resolution):
-            raise RuntimeError("Resolution must be a single int")
-        self.resolution = resolution
-        if transfer_function is None:
-            transfer_function = ProjectionTransferFunction()
-        self.transfer_function = transfer_function
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.sub_samples = sub_samples
-        self.log_fields = log_fields
-        if volume is None:
-            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields)
-        self.volume = volume
+#     def split(self):
+#         oc = self.original_camera
+#         uv = oc.unit_vectors
+#         c = oc.center
+#         fc = oc.front_center
+#         wx, wy, wz = oc.width
+#         left_normal = fc + uv[1] * 0.5*self.relative_separation * wx - c
+#         right_normal = fc - uv[1] * 0.5*self.relative_separation * wx - c
+#         left_camera = Camera(c, left_normal, oc.width,
+#                              oc.resolution, oc.transfer_function, uv[0],
+#                              oc.volume, oc.fields, oc.log_fields,
+#                              oc.sub_samples, oc.pf)
+#         right_camera = Camera(c, right_normal, oc.width,
+#                              oc.resolution, oc.transfer_function, uv[0],
+#                              oc.volume, oc.fields, oc.log_fields,
+#                              oc.sub_samples, oc.pf)
+#         return (left_camera, right_camera)
 
-    def snapshot(self):
-        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
-        # We now follow figures 4-7 of:
-        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
-        # ...but all in Cython.
-        vp = arr_fisheye_vectors(self.resolution, self.fov)
-        vp.shape = (self.resolution**2,1,3)
-        vp2 = vp.copy()
-        for i in range(3):
-            vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
-        del vp2
-        vp *= self.radius
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
-        vector_plane = VectorPlane(positions, vp, self.center,
-                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Rendering fisheye of %s^2", self.resolution)
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+# class FisheyeCamera(Camera):
+#     def __init__(self, center, radius, fov, resolution,
+#                  transfer_function = None, fields = None,
+#                  sub_samples = 5, log_fields = None, volume = None,
+#                  pf = None, no_ghost=False, rotation = None):
+#         ParallelAnalysisInterface.__init__(self)
+#         if rotation is None: rotation = na.eye(3)
+#         self.rotation_matrix = rotation
+#         if pf is not None: self.pf = pf
+#         self.center = na.array(center, dtype='float64')
+#         self.radius = radius
+#         self.fov = fov
+#         if iterable(resolution):
+#             raise RuntimeError("Resolution must be a single int")
+#         self.resolution = resolution
+#         if transfer_function is None:
+#             transfer_function = ProjectionTransferFunction()
+#         self.transfer_function = transfer_function
+#         if fields is None: fields = ["Density"]
+#         self.fields = fields
+#         self.sub_samples = sub_samples
+#         self.log_fields = log_fields
+#         if volume is None:
+#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+#                                log_fields=log_fields)
+#         self.volume = volume
 
-        total_cells = 0
-        for brick in self.volume.traverse(None, self.center, image):
-            brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        image.shape = (self.resolution, self.resolution, 3)
-        return image
+#     def snapshot(self):
+#         image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+#         # We now follow figures 4-7 of:
+#         # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+#         # ...but all in Cython.
+#         vp = arr_fisheye_vectors(self.resolution, self.fov)
+#         vp.shape = (self.resolution**2,1,3)
+#         vp2 = vp.copy()
+#         for i in range(3):
+#             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
+#         del vp2
+#         vp *= self.radius
+#         uv = na.ones(3, dtype='float64')
+#         positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+#         vector_plane = VectorPlane(positions, vp, self.center,
+#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+#         tfp = TransferFunctionProxy(self.transfer_function)
+#         tfp.ns = self.sub_samples
+#         self.volume.initialize_source()
+#         mylog.info("Rendering fisheye of %s^2", self.resolution)
+#         pbar = get_pbar("Ray casting",
+#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-class MosaicFisheyeCamera(Camera):
-    def __init__(self, center, radius, fov, resolution, focal_center=None,
-                 transfer_function=None, fields=None,
-                 sub_samples=5, log_fields=None, volume=None,
-                 pf=None, l_max=None, no_ghost=False,nimx=1, nimy=1, procs_per_wg=None,
-                 rotation=None):
-        r"""A fisheye lens camera, taking adantage of image plane decomposition
-        for parallelism..
+#         total_cells = 0
+#         for brick in self.volume.traverse(None, self.center, image):
+#             brick.cast_plane(tfp, vector_plane)
+#             total_cells += na.prod(brick.my_data[0].shape)
+#             pbar.update(total_cells)
+#         pbar.finish()
+#         image.shape = (self.resolution, self.resolution, 3)
+#         return image
 
-        The camera represents the eye of an observer, which will be used to
-        generate ray-cast volume renderings of the domain. In this case, the
-        rays are defined by a fisheye lens
+# class MosaicFisheyeCamera(Camera):
+#     def __init__(self, center, radius, fov, resolution, focal_center=None,
+#                  transfer_function=None, fields=None,
+#                  sub_samples=5, log_fields=None, volume=None,
+#                  pf=None, l_max=None, no_ghost=False,nimx=1, nimy=1, procs_per_wg=None,
+#                  rotation=None):
+#         r"""A fisheye lens camera, taking adantage of image plane decomposition
+#         for parallelism..
 
-        Parameters
-        ----------
-        center : array_like
-            The current "center" of the observer, from which the rays will be
-            cast
-        radius : float
-            The radial distance to cast to
-        resolution : int
-            The number of pixels in each direction.  Must be a single int.
-        volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
-            The volume to ray cast through.  Can be specified for finer-grained
-            control, but otherwise will be automatically generated.
-        fields : list of fields, optional
-            This is the list of fields we want to volume render; defaults to
-            Density.
-        log_fields : list of bool, optional
-            Whether we should take the log of the fields before supplying them to
-            the volume rendering mechanism.
-        sub_samples : int, optional
-            The number of samples to take inside every cell per ray.
-        pf : `~yt.data_objects.api.StaticOutput`
-            For now, this is a require parameter!  But in the future it will become
-            optional.  This is the parameter file to volume render.
-        l_max: int, optional
-            Specifies the maximum level to be rendered.  Also
-            specifies the maximum level used in the AMRKDTree
-            construction.  Defaults to None (all levels), and only
-            applies if use_kd=True.
-        no_ghost: bool, optional
-            Optimization option.  If True, homogenized bricks will
-            extrapolate out from grid instead of interpolating from
-            ghost zones that have to first be calculated.  This can
-            lead to large speed improvements, but at a loss of
-            accuracy/smoothness in resulting image.  The effects are
-            less notable when the transfer function is smooth and
-            broad. Default: False
-        nimx: int, optional
-            The number by which to decompose the image plane into in the x
-            direction.  Must evenly divide the resolution.
-        nimy: int, optional
-            The number by which to decompose the image plane into in the y 
-            direction.  Must evenly divide the resolution.
-        procs_per_wg: int, optional
-            The number of processors to use on each sub-image. Within each
-            subplane, the volume will be decomposed using the AMRKDTree with
-            procs_per_wg processors.  
+#         The camera represents the eye of an observer, which will be used to
+#         generate ray-cast volume renderings of the domain. In this case, the
+#         rays are defined by a fisheye lens
 
-        Notes
-        -----
-            The product of nimx*nimy*procs_per_wg must be equal to or less than
-            the total number of mpi processes.  
+#         Parameters
+#         ----------
+#         center : array_like
+#             The current "center" of the observer, from which the rays will be
+#             cast
+#         radius : float
+#             The radial distance to cast to
+#         resolution : int
+#             The number of pixels in each direction.  Must be a single int.
+#         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+#             The volume to ray cast through.  Can be specified for finer-grained
+#             control, but otherwise will be automatically generated.
+#         fields : list of fields, optional
+#             This is the list of fields we want to volume render; defaults to
+#             Density.
+#         log_fields : list of bool, optional
+#             Whether we should take the log of the fields before supplying them to
+#             the volume rendering mechanism.
+#         sub_samples : int, optional
+#             The number of samples to take inside every cell per ray.
+#         pf : `~yt.data_objects.api.StaticOutput`
+#             For now, this is a require parameter!  But in the future it will become
+#             optional.  This is the parameter file to volume render.
+#         l_max: int, optional
+#             Specifies the maximum level to be rendered.  Also
+#             specifies the maximum level used in the AMRKDTree
+#             construction.  Defaults to None (all levels), and only
+#             applies if use_kd=True.
+#         no_ghost: bool, optional
+#             Optimization option.  If True, homogenized bricks will
+#             extrapolate out from grid instead of interpolating from
+#             ghost zones that have to first be calculated.  This can
+#             lead to large speed improvements, but at a loss of
+#             accuracy/smoothness in resulting image.  The effects are
+#             less notable when the transfer function is smooth and
+#             broad. Default: False
+#         nimx: int, optional
+#             The number by which to decompose the image plane into in the x
+#             direction.  Must evenly divide the resolution.
+#         nimy: int, optional
+#             The number by which to decompose the image plane into in the y 
+#             direction.  Must evenly divide the resolution.
+#         procs_per_wg: int, optional
+#             The number of processors to use on each sub-image. Within each
+#             subplane, the volume will be decomposed using the AMRKDTree with
+#             procs_per_wg processors.  
 
-            Unlike the non-Mosaic camera, this will only return each sub-image
-            to the root processor of each sub-image workgroup in order to save
-            memory.  To save the final image, one must then call
-            MosaicFisheyeCamera.save_image('filename')
+#         Notes
+#         -----
+#             The product of nimx*nimy*procs_per_wg must be equal to or less than
+#             the total number of mpi processes.  
 
-        Examples
-        --------
+#             Unlike the non-Mosaic camera, this will only return each sub-image
+#             to the root processor of each sub-image workgroup in order to save
+#             memory.  To save the final image, one must then call
+#             MosaicFisheyeCamera.save_image('filename')
 
-        >>> from yt.mods import *
+#         Examples
+#         --------
+
+#         >>> from yt.mods import *
         
-        >>> pf = load('DD1717')
+#         >>> pf = load('DD1717')
         
-        >>> N = 512 # Pixels (1024^2)
-        >>> c = (pf.domain_right_edge + pf.domain_left_edge)/2. # Center
-        >>> radius = (pf.domain_right_edge - pf.domain_left_edge)/2.
-        >>> fov = 180.0
+#         >>> N = 512 # Pixels (1024^2)
+#         >>> c = (pf.domain_right_edge + pf.domain_left_edge)/2. # Center
+#         >>> radius = (pf.domain_right_edge - pf.domain_left_edge)/2.
+#         >>> fov = 180.0
         
-        >>> field='Density'
-        >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-        >>> mi,ma = na.log10(mi), na.log10(ma)
+#         >>> field='Density'
+#         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
+#         >>> mi,ma = na.log10(mi), na.log10(ma)
         
-        # You may want to comment out the above lines and manually set the min and max
-        # of the log of the Density field. For example:
-        # mi,ma = -30.5,-26.5
+#         # You may want to comment out the above lines and manually set the min and max
+#         # of the log of the Density field. For example:
+#         # mi,ma = -30.5,-26.5
         
-        # Another good place to center the camera is close to the maximum density.
-        # v,c = pf.h.find_max('Density')
-        # c -= 0.1*radius
+#         # Another good place to center the camera is close to the maximum density.
+#         # v,c = pf.h.find_max('Density')
+#         # c -= 0.1*radius
         
        
-        # Construct transfer function
-        >>> tf = ColorTransferFunction((mi-1, ma+1),nbins=1024)
+#         # Construct transfer function
+#         >>> tf = ColorTransferFunction((mi-1, ma+1),nbins=1024)
         
-        # Sample transfer function with Nc gaussians.  Use col_bounds keyword to limit
-        # the color range to the min and max values, rather than the transfer function
-        # bounds.
-        >>> Nc = 5
-        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
-        >>>         colormap='RdBu_r')
-        >>> 
-        # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
-        # spent creating vertex-centered data. In this case I'm running with 8
-        # processors, and am splitting the image plane into 4 pieces and using 2
-        # processors on each piece.
-        >>> cam = MosaicFisheyeCamera(c, radius, fov, N,
-        >>>         transfer_function = tf, 
-        >>>         sub_samples = 5, 
-        >>>         pf=pf, 
-        >>>         nimx=2,nimy=2,procs_per_wg=2)
+#         # Sample transfer function with Nc gaussians.  Use col_bounds keyword to limit
+#         # the color range to the min and max values, rather than the transfer function
+#         # bounds.
+#         >>> Nc = 5
+#         >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+#         >>>         colormap='RdBu_r')
+#         >>> 
+#         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
+#         # spent creating vertex-centered data. In this case I'm running with 8
+#         # processors, and am splitting the image plane into 4 pieces and using 2
+#         # processors on each piece.
+#         >>> cam = MosaicFisheyeCamera(c, radius, fov, N,
+#         >>>         transfer_function = tf, 
+#         >>>         sub_samples = 5, 
+#         >>>         pf=pf, 
+#         >>>         nimx=2,nimy=2,procs_per_wg=2)
         
-        # Take a snapshot
-        >>> im = cam.snapshot()
+#         # Take a snapshot
+#         >>> im = cam.snapshot()
         
-        # Save the image
-        >>> cam.save_image('fisheye_mosaic.png')
+#         # Save the image
+#         >>> cam.save_image('fisheye_mosaic.png')
 
-        """
+#         """
 
-        ParallelAnalysisInterface.__init__(self)
-        self.image_decomp = self.comm.size>1
-        if self.image_decomp:
-            PP = ProcessorPool()
-            npatches = nimy*nimx
-            if procs_per_wg is None:
-                if (PP.size % npatches):
-                    raise RuntimeError("Cannot evenly divide %i procs to %i patches" % (PP.size,npatches))
-                else:
-                    procs_per_wg = PP.size / npatches
-            if (PP.size != npatches*procs_per_wg):
-               raise RuntimeError("You need %i processors to utilize %i procs per one patch in [%i,%i] grid" 
-                     % (npatches*procs_per_wg,procs_per_wg,nimx,nimy))
+#         ParallelAnalysisInterface.__init__(self)
+#         self.image_decomp = self.comm.size>1
+#         if self.image_decomp:
+#             PP = ProcessorPool()
+#             npatches = nimy*nimx
+#             if procs_per_wg is None:
+#                 if (PP.size % npatches):
+#                     raise RuntimeError("Cannot evenly divide %i procs to %i patches" % (PP.size,npatches))
+#                 else:
+#                     procs_per_wg = PP.size / npatches
+#             if (PP.size != npatches*procs_per_wg):
+#                raise RuntimeError("You need %i processors to utilize %i procs per one patch in [%i,%i] grid" 
+#                      % (npatches*procs_per_wg,procs_per_wg,nimx,nimy))
  
-            for j in range(nimy):
-                for i in range(nimx):
-                    PP.add_workgroup(size=procs_per_wg, name='%04i_%04i'%(i,j))
+#             for j in range(nimy):
+#                 for i in range(nimx):
+#                     PP.add_workgroup(size=procs_per_wg, name='%04i_%04i'%(i,j))
                     
-            for wg in PP.workgroups:
-                if self.comm.rank in wg.ranks:
-                    my_wg = wg
+#             for wg in PP.workgroups:
+#                 if self.comm.rank in wg.ranks:
+#                     my_wg = wg
             
-            self.global_comm = self.comm
-            self.comm = my_wg.comm
-            self.wg = my_wg
-            self.imi = int(self.wg.name[0:4])
-            self.imj = int(self.wg.name[5:9])
-            print 'My new communicator has the name %s' % self.wg.name
-            self.nimx = nimx
-            self.nimy = nimy
-        else:
-            self.imi = 0
-            self.imj = 0
-            self.nimx = 1
-            self.nimy = 1
-        if pf is not None: self.pf = pf
+#             self.global_comm = self.comm
+#             self.comm = my_wg.comm
+#             self.wg = my_wg
+#             self.imi = int(self.wg.name[0:4])
+#             self.imj = int(self.wg.name[5:9])
+#             print 'My new communicator has the name %s' % self.wg.name
+#             self.nimx = nimx
+#             self.nimy = nimy
+#         else:
+#             self.imi = 0
+#             self.imj = 0
+#             self.nimx = 1
+#             self.nimy = 1
+#         if pf is not None: self.pf = pf
         
-        if rotation is None: rotation = na.eye(3)
-        self.rotation_matrix = rotation
+#         if rotation is None: rotation = na.eye(3)
+#         self.rotation_matrix = rotation
         
-        self.normal_vector = na.array([0.,0.,1])
-        self.north_vector = na.array([1.,0.,0.])
-        self.east_vector = na.array([0.,1.,0.])
-        self.rotation_vector = self.north_vector
+#         self.normal_vector = na.array([0.,0.,1])
+#         self.north_vector = na.array([1.,0.,0.])
+#         self.east_vector = na.array([0.,1.,0.])
+#         self.rotation_vector = self.north_vector
 
-        if iterable(resolution):
-            raise RuntimeError("Resolution must be a single int")
-        self.resolution = resolution
-        self.center = na.array(center, dtype='float64')
-        self.focal_center = focal_center
-        self.radius = radius
-        self.fov = fov
-        if transfer_function is None:
-            transfer_function = ProjectionTransferFunction()
-        self.transfer_function = transfer_function
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.sub_samples = sub_samples
-        self.log_fields = log_fields
-        if volume is None:
-            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-                               log_fields=log_fields,l_max=l_max)
-        self.volume = volume
-        self.vp = None
-        self.image = None 
+#         if iterable(resolution):
+#             raise RuntimeError("Resolution must be a single int")
+#         self.resolution = resolution
+#         self.center = na.array(center, dtype='float64')
+#         self.focal_center = focal_center
+#         self.radius = radius
+#         self.fov = fov
+#         if transfer_function is None:
+#             transfer_function = ProjectionTransferFunction()
+#         self.transfer_function = transfer_function
+#         if fields is None: fields = ["Density"]
+#         self.fields = fields
+#         self.sub_samples = sub_samples
+#         self.log_fields = log_fields
+#         if volume is None:
+#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+#                                log_fields=log_fields,l_max=l_max)
+#         self.volume = volume
+#         self.vp = None
+#         self.image = None 
 
-    def get_vector_plane(self):
-        if self.focal_center is not None:
-            rvec =  na.array(self.focal_center) - na.array(self.center)
-            rvec /= (rvec**2).sum()**0.5
-            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
-                (rvec**2).sum()**0.5))
-            rot_vector = na.cross(rvec, self.normal_vector)
-            rot_vector /= (rot_vector**2).sum()**0.5
+#     def get_vector_plane(self):
+#         if self.focal_center is not None:
+#             rvec =  na.array(self.focal_center) - na.array(self.center)
+#             rvec /= (rvec**2).sum()**0.5
+#             angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+#                 (rvec**2).sum()**0.5))
+#             rot_vector = na.cross(rvec, self.normal_vector)
+#             rot_vector /= (rot_vector**2).sum()**0.5
             
-            self.rotation_matrix = self.get_rotation_matrix(angle,rot_vector)
-            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
-        else:
-            self.focal_center = self.center + self.radius*self.normal_vector  
-        dist = ((self.focal_center - self.center)**2).sum()**0.5
-        # We now follow figures 4-7 of:
-        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
-        # ...but all in Cython.
+#             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
+#             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
+#             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
+#             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+#         else:
+#             self.focal_center = self.center + self.radius*self.normal_vector  
+#         dist = ((self.focal_center - self.center)**2).sum()**0.5
+#         # We now follow figures 4-7 of:
+#         # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+#         # ...but all in Cython.
         
-        self.vp = arr_fisheye_vectors(self.resolution, self.fov, self.nimx, 
-                self.nimy, self.imi, self.imj)
+#         self.vp = arr_fisheye_vectors(self.resolution, self.fov, self.nimx, 
+#                 self.nimy, self.imi, self.imj)
         
-        self.vp = rotate_vectors(self.vp, self.rotation_matrix)
+#         self.vp = rotate_vectors(self.vp, self.rotation_matrix)
 
-        self.center = self.focal_center - dist*self.normal_vector
-        self.vp *= self.radius
-        nx, ny = self.vp.shape[0], self.vp.shape[1]
-        self.vp.shape = (nx*ny,1,3)
+#         self.center = self.focal_center - dist*self.normal_vector
+#         self.vp *= self.radius
+#         nx, ny = self.vp.shape[0], self.vp.shape[1]
+#         self.vp.shape = (nx*ny,1,3)
 
-    def snapshot(self):
-        if self.vp is None:
-            self.get_vector_plane()
+#     def snapshot(self):
+#         if self.vp is None:
+#             self.get_vector_plane()
 
-        nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-        uv = na.ones(3, dtype='float64')
-        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
-        vector_plane = VectorPlane(positions, self.vp, self.center,
-                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        self.volume.initialize_source()
-        mylog.info("Rendering fisheye of %s^2", self.resolution)
-        pbar = get_pbar("Ray casting",
-                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+#         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
+#         image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
+#         uv = na.ones(3, dtype='float64')
+#         positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+#         vector_plane = VectorPlane(positions, self.vp, self.center,
+#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+#         tfp = TransferFunctionProxy(self.transfer_function)
+#         tfp.ns = self.sub_samples
+#         self.volume.initialize_source()
+#         mylog.info("Rendering fisheye of %s^2", self.resolution)
+#         pbar = get_pbar("Ray casting",
+#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-        total_cells = 0
-        for brick in self.volume.traverse(None, self.center, image):
-            brick.cast_plane(tfp, vector_plane)
-            total_cells += na.prod(brick.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        image.shape = (nx, ny, 3)
+#         total_cells = 0
+#         for brick in self.volume.traverse(None, self.center, image):
+#             brick.cast_plane(tfp, vector_plane)
+#             total_cells += na.prod(brick.my_data[0].shape)
+#             pbar.update(total_cells)
+#         pbar.finish()
+#         image.shape = (nx, ny, 3)
 
-        if self.image is not None:
-            del self.image
-        self.image = image
+#         if self.image is not None:
+#             del self.image
+#         self.image = image
        
-        return image
+#         return image
 
-    def save_image(self, fn, clip_ratio=None):
-        if '.png' not in fn:
-            fn = fn + '.png'
+#     def save_image(self, fn, clip_ratio=None):
+#         if '.png' not in fn:
+#             fn = fn + '.png'
         
-        try:
-            image = self.image
-        except:
-            mylog.error('You must first take a snapshot')
-            raise(UserWarning)
+#         try:
+#             image = self.image
+#         except:
+#             mylog.error('You must first take a snapshot')
+#             raise(UserWarning)
         
-        image = self.image
-        nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-        if self.image_decomp:
-            if self.comm.rank == 0:
-                if self.global_comm.rank == 0:
-                    final_image = na.empty((nx*self.nimx, 
-                        ny*self.nimy, 3),
-                        dtype='float64',order='C')
-                    final_image[:nx, :ny, :] = image
-                    for j in range(self.nimy):
-                        for i in range(self.nimx):
-                            if i==0 and j==0: continue
-                            arr = self.global_comm.recv_array((self.wg.size)*(j*self.nimx + i), tag = (self.wg.size)*(j*self.nimx + i))
+#         image = self.image
+#         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
+#         if self.image_decomp:
+#             if self.comm.rank == 0:
+#                 if self.global_comm.rank == 0:
+#                     final_image = na.empty((nx*self.nimx, 
+#                         ny*self.nimy, 3),
+#                         dtype='float64',order='C')
+#                     final_image[:nx, :ny, :] = image
+#                     for j in range(self.nimy):
+#                         for i in range(self.nimx):
+#                             if i==0 and j==0: continue
+#                             arr = self.global_comm.recv_array((self.wg.size)*(j*self.nimx + i), tag = (self.wg.size)*(j*self.nimx + i))
 
-                            final_image[i*nx:(i+1)*nx, j*ny:(j+1)*ny,:] = arr
-                            del arr
-                    if clip_ratio is not None:
-                        write_bitmap(final_image, fn, clip_ratio*final_image.std())
-                    else:
-                        write_bitmap(final_image, fn)
-                else:
-                    self.global_comm.send_array(image, 0, tag = self.global_comm.rank)
-        else:
-            if self.comm.rank == 0:
-                if clip_ratio is not None:
-                    write_bitmap(image, fn, clip_ratio*image.std())
-                else:
-                    write_bitmap(image, fn)
-        return
+#                             final_image[i*nx:(i+1)*nx, j*ny:(j+1)*ny,:] = arr
+#                             del arr
+#                     if clip_ratio is not None:
+#                         write_bitmap(final_image, fn, clip_ratio*final_image.std())
+#                     else:
+#                         write_bitmap(final_image, fn)
+#                 else:
+#                     self.global_comm.send_array(image, 0, tag = self.global_comm.rank)
+#         else:
+#             if self.comm.rank == 0:
+#                 if clip_ratio is not None:
+#                     write_bitmap(image, fn, clip_ratio*image.std())
+#                 else:
+#                     write_bitmap(image, fn)
+#         return
 
-    def rotate(self, theta, rot_vector=None, keep_focus=True):
-        r"""Rotate by a given angle
+#     def rotate(self, theta, rot_vector=None, keep_focus=True):
+#         r"""Rotate by a given angle
 
-        Rotate the view.  If `rot_vector` is None, rotation will occur
-        around the `north_vector`.
+#         Rotate the view.  If `rot_vector` is None, rotation will occur
+#         around the `north_vector`.
 
-        Parameters
-        ----------
-        theta : float, in radians
-             Angle (in radians) by which to rotate the view.
-        rot_vector  : array_like, optional
-            Specify the rotation vector around which rotation will
-            occur.  Defaults to None, which sets rotation around
-            `north_vector`
+#         Parameters
+#         ----------
+#         theta : float, in radians
+#              Angle (in radians) by which to rotate the view.
+#         rot_vector  : array_like, optional
+#             Specify the rotation vector around which rotation will
+#             occur.  Defaults to None, which sets rotation around
+#             `north_vector`
 
-        Examples
-        --------
+#         Examples
+#         --------
 
-        >>> cam.rotate(na.pi/4)
-        """
-        if rot_vector is None:
-            rot_vector = self.north_vector
+#         >>> cam.rotate(na.pi/4)
+#         """
+#         if rot_vector is None:
+#             rot_vector = self.north_vector
         
-        dist = ((self.focal_center - self.center)**2).sum()**0.5
+#         dist = ((self.focal_center - self.center)**2).sum()**0.5
         
-        R = self.get_rotation_matrix(theta, rot_vector)
+#         R = get_rotation_matrix(theta, rot_vector)
 
-        self.vp = rotate_vectors(self.vp, R)
-        self.normal_vector = na.dot(R,self.normal_vector)
-        self.north_vector = na.dot(R,self.north_vector)
-        self.east_vector = na.dot(R,self.east_vector)
+#         self.vp = rotate_vectors(self.vp, R)
+#         self.normal_vector = na.dot(R,self.normal_vector)
+#         self.north_vector = na.dot(R,self.north_vector)
+#         self.east_vector = na.dot(R,self.east_vector)
 
-        if keep_focus:
-            self.center = self.focal_center - dist*self.normal_vector
+#         if keep_focus:
+#             self.center = self.focal_center - dist*self.normal_vector
 
-    def rotation(self, theta, n_steps, rot_vector=None, keep_focus=True):
-        r"""Loop over rotate, creating a rotation
+#     def rotation(self, theta, n_steps, rot_vector=None, keep_focus=True):
+#         r"""Loop over rotate, creating a rotation
 
-        This will yield `n_steps` snapshots until the current view has been
-        rotated by an angle `theta`
+#         This will yield `n_steps` snapshots until the current view has been
+#         rotated by an angle `theta`
 
-        Parameters
-        ----------
-        theta : float, in radians
-            Angle (in radians) by which to rotate the view.
-        n_steps : int
-            The number of look_at snapshots to make.
-        rot_vector  : array_like, optional
-            Specify the rotation vector around which rotation will
-            occur.  Defaults to None, which sets rotation around the
-            original `north_vector`
+#         Parameters
+#         ----------
+#         theta : float, in radians
+#             Angle (in radians) by which to rotate the view.
+#         n_steps : int
+#             The number of look_at snapshots to make.
+#         rot_vector  : array_like, optional
+#             Specify the rotation vector around which rotation will
+#             occur.  Defaults to None, which sets rotation around the
+#             original `north_vector`
 
-        Examples
-        --------
+#         Examples
+#         --------
 
-        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
-        ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
-        """
+#         >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+#         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
+#         """
 
-        dtheta = (1.0*theta)/n_steps
-        for i in xrange(n_steps):
-            self.rotate(dtheta, rot_vector=rot_vector, keep_focus=keep_focus)
-            yield self.snapshot()
+#         dtheta = (1.0*theta)/n_steps
+#         for i in xrange(n_steps):
+#             self.rotate(dtheta, rot_vector=rot_vector, keep_focus=keep_focus)
+#             yield self.snapshot()
 
-    def move_to(self,final,n_steps,exponential=False):
-        r"""Loop over a look_at
+#     def move_to(self,final,n_steps,exponential=False):
+#         r"""Loop over a look_at
 
-        This will yield `n_steps` snapshots until the current view has been
-        moved to a final center of `final`.
+#         This will yield `n_steps` snapshots until the current view has been
+#         moved to a final center of `final`.
 
-        Parameters
-        ----------
-        final : array_like
-            The final center to move to after `n_steps`
-        n_steps : int
-            The number of look_at snapshots to make.
-        exponential : boolean
-            Specifies whether the move/zoom transition follows an
-            exponential path toward the destination or linear
+#         Parameters
+#         ----------
+#         final : array_like
+#             The final center to move to after `n_steps`
+#         n_steps : int
+#             The number of look_at snapshots to make.
+#         exponential : boolean
+#             Specifies whether the move/zoom transition follows an
+#             exponential path toward the destination or linear
             
-        Examples
-        --------
+#         Examples
+#         --------
 
-        >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
-        ...     cam.save_image("move_%04i.png" % i)
-        """
+#         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
+#         ...     cam.save_image("move_%04i.png" % i)
+#         """
 
-        if exponential:
-            position_diff = (na.array(final)/self.center)*1.0
-            dx = position_diff**(1.0/n_steps)
-        else:
-            dx = (na.array(final) - self.center)*1.0/n_steps
-        for i in xrange(n_steps):
-            if exponential:
-                self.center *= dx
-            else:
-                self.center += dx
-            yield self.snapshot()
-
-    def get_rotation_matrix(self, theta, rot_vector):
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
-
-        return R
+#         if exponential:
+#             position_diff = (na.array(final)/self.center)*1.0
+#             dx = position_diff**(1.0/n_steps)
+#         else:
+#             dx = (na.array(final) - self.center)*1.0/n_steps
+#         for i in xrange(n_steps):
+#             if exponential:
+#                 self.center *= dx
+#             else:
+#                 self.center += dx
+#             yield self.snapshot()
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, volume = None, no_ghost = True,



https://bitbucket.org/yt_analysis/yt/changeset/c011e232034f/
changeset:   c011e232034f
branch:      yt
user:        ngoldbaum
date:        2012-05-03 03:45:50
summary:     Adding the Orientation logic to the AMRCuttingPlaneBase.
affected #:  1 file

diff -r b4877489f6548bd0a0cfd8dfb18c495463fb4ec2 -r c011e232034f0a4f1d6ad879813b4af95c25852b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1207,16 +1207,11 @@
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
+        self.orienter = Orientation(normal, north_vector = north_vector)
+        self._norm_vec = self.orienter.normal_vector
         self._d = -1.0 * na.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = self.orienter.unit_vectors[0]
+        self._y_vec = self.orienter.unit_vectors[1]
         self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
         self._inv_mat = na.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)



https://bitbucket.org/yt_analysis/yt/changeset/96d8ef9b9107/
changeset:   96d8ef9b9107
branch:      yt
user:        ngoldbaum
date:        2012-05-03 07:48:23
summary:     Forgot to add this.
affected #:  1 file

diff -r c011e232034f0a4f1d6ad879813b4af95c25852b -r 96d8ef9b910721fd07a33e81a945aa0f0e023d39 yt/utilities/orientation.py
--- /dev/null
+++ b/yt/utilities/orientation.py
@@ -0,0 +1,101 @@
+import numpy as na
+
+from yt.funcs import *
+from yt.utilities.math_utils import get_rotation_matrix
+
+class Orientation:
+    def __init__(self, normal_vector, north_vector=None, steady_north=False):
+        r"""An object that returns a set of basis vectors for orienting
+        cameras a data containers.
+
+        Parameters
+        ----------
+        center        : array_like
+           The current "center" of the view port -- the normal_vector connects
+           the center and the origin
+        normal_vector : array_like
+           A vector normal to the image plane
+        north_vector  : array_like, optional
+           The 'up' direction to orient the image plane.  
+           If not specified, gets calculated automatically
+        steady_north  : bool, optional
+           Boolean to control whether to normalize the north_vector
+           by subtracting off the dot product of it and the normal 
+           vector.  Makes it easier to do rotations along a single
+           axis.  If north_vector is specified, is switched to
+           True.  Default: False
+           
+        """
+        self.steady_north = steady_north
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector = None
+        if north_vector is not None: self.steady_north = True
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def _setup_normalized_vectors(self, normal_vector, north_vector):
+        # Now we set up our various vectors
+        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        if north_vector is None:
+            vecs = na.identity(3)
+            t = na.cross(normal_vector, vecs).sum(axis=1)
+            ax = t.argmax()
+            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            if self.rotation_vector is None:
+                self.rotation_vector=north_vector
+        else:
+            if self.steady_north:
+                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
+        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+        east_vector = -na.cross(north_vector, normal_vector).ravel()
+        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+        self.normal_vector = normal_vector
+        self.north_vector = north_vector
+        self.unit_vectors = [east_vector, north_vector, normal_vector]
+        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+
+    def look_at(self, new_center, north_vector = None):
+        r"""Change the view direction based on a new focal point.
+
+        This will recalculate all the necessary vectors and vector planes to orient
+        the image plane so that it points at a new location.
+
+        Parameters
+        ----------
+        new_center : array_like
+            The new "center" of the view port -- the focal point for the
+            camera.
+        north_vector : array_like, optional
+            The "up" direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        normal_vector = self.front_center - new_center
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+
+    def switch_orientation(self, normal_vector=None, center=None, north_vector=None):
+        r"""Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes related
+        to a camera with new normal vectors, widths, centers, or north vectors.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        width: float or array of floats, optional
+            The new width.  Can be a single value W -> [W,W,W] or an
+            array [W1, W2, W3] (left/right, top/bottom, front/back)
+        center: array_like, optional
+            Specifies the new center.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.front_center-center
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+        



https://bitbucket.org/yt_analysis/yt/changeset/58b3ec160d93/
changeset:   58b3ec160d93
branch:      yt
user:        ngoldbaum
date:        2012-05-03 08:15:45
summary:     Small bugfixes.  Seems to work with off_axis_projection (and thus Camera).
affected #:  1 file

diff -r 96d8ef9b910721fd07a33e81a945aa0f0e023d39 -r 58b3ec160d930b03109c16634be4588d2a6716d6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -183,7 +183,7 @@
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
         self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
-        self._setup_box_properties(width, center, orienter.unit_vectors)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
         if transfer_function is None:
@@ -266,7 +266,7 @@
             normal_vector = self.front_cemter - self.center
         self.orienter.switch_orientation(normal_vector = normal_vector, center = center,
                                          north_vector = north_vector)
-        self._setup_box_properties(width, center, orienter.unit_vectors)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
 
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
@@ -276,7 +276,7 @@
                          self.resolution[0])[:,None]
         py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]



https://bitbucket.org/yt_analysis/yt/changeset/4c5f8dd31136/
changeset:   4c5f8dd31136
branch:      yt
user:        ngoldbaum
date:        2012-05-03 09:05:37
summary:     Forgot to import Orientation
affected #:  1 file

diff -r 58b3ec160d930b03109c16634be4588d2a6716d6 -r 4c5f8dd31136ba5ba889ca25c9a3ac8602375189 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -55,6 +55,7 @@
     ParameterFileStore
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
+from yt.utilities.orientation import Orientation
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \



https://bitbucket.org/yt_analysis/yt/changeset/b0f829fa62ba/
changeset:   b0f829fa62ba
branch:      yt
user:        ngoldbaum
date:        2012-05-03 09:07:49
summary:     Don't need rotation_vector for the orientation object.
affected #:  1 file

diff -r 4c5f8dd31136ba5ba889ca25c9a3ac8602375189 -r b0f829fa62bad31c3e5fcf5c38b24324c6ee9c75 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -41,8 +41,6 @@
             t = na.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
             north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            if self.rotation_vector is None:
-                self.rotation_vector=north_vector
         else:
             if self.steady_north:
                 north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector



https://bitbucket.org/yt_analysis/yt/changeset/2b5f02d15ac1/
changeset:   2b5f02d15ac1
branch:      yt
user:        ngoldbaum
date:        2012-05-03 22:13:12
summary:     More bugfixes, this works with the cutting plane and the normal camera.
affected #:  3 files

diff -r b0f829fa62bad31c3e5fcf5c38b24324c6ee9c75 -r 2b5f02d15ac1703bb5e94cca22e64737f024f735 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1332,7 +1332,7 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
-        height : height specifier
+        height : height specifier, optional
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.


diff -r b0f829fa62bad31c3e5fcf5c38b24324c6ee9c75 -r 2b5f02d15ac1703bb5e94cca22e64737f024f735 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -40,18 +40,19 @@
             vecs = na.identity(3)
             t = na.cross(normal_vector, vecs).sum(axis=1)
             ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = na.cross(normal_vector, east_vector).ravel()
         else:
             if self.steady_north:
                 north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
+            east_vector = na.cross(north_vector, normal_vector).ravel()
         north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
         east_vector /= na.sqrt(na.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
         self.north_vector = north_vector
         self.unit_vectors = [east_vector, north_vector, normal_vector]
         self.inv_mat = na.linalg.pinv(self.unit_vectors)
-
+        
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
 


diff -r b0f829fa62bad31c3e5fcf5c38b24324c6ee9c75 -r 2b5f02d15ac1703bb5e94cca22e64737f024f735 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -374,6 +374,6 @@
                                self.data_source.center, self.data_source._inv_mat, indices,
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
-                               self.bounds).transpose()
+                               self.bounds)
         self[item] = buff
         return buff



https://bitbucket.org/yt_analysis/yt/changeset/29b123c9da43/
changeset:   29b123c9da43
branch:      yt
user:        ngoldbaum
date:        2012-05-03 22:16:35
summary:     Updates to perspective camera so that it works with the new __init__ logic in
the base class.
affected #:  1 file

diff -r 2b5f02d15ac1703bb5e94cca22e64737f024f735 -r 29b123c9da434faefb099365d70e94876edd3560 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -511,722 +511,722 @@
 
 data_object_registry["camera"] = Camera
 
-# class InteractiveCamera(Camera):
-#     def __init__(self, center, normal_vector, width,
-#                  resolution, transfer_function,
-#                  north_vector = None, steady_north=False,
-#                  volume = None, fields = None,
-#                  log_fields = None,
-#                  sub_samples = 5, pf = None,
-#                  use_kd=True, l_max=None, no_ghost=True,
-#                  tree_type='domain',le=None, re=None):
-#         self.frames = []
-#         Camera.__init__(self, center, normal_vector, width,
-#                  resolution, transfer_function,
-#                  north_vector = north_vector, steady_north=steady_north,
-#                  volume = volume, fields = fields,
-#                  log_fields = log_fields,
-#                  sub_samples = sub_samples, pf = pf,
-#                  use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-#                  tree_type=tree_type,le=le, re=re)
+class InteractiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain',le=None, re=None):
+        self.frames = []
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = north_vector, steady_north=steady_north,
+                 volume = volume, fields = fields,
+                 log_fields = log_fields,
+                 sub_samples = sub_samples, pf = pf,
+                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+                 tree_type=tree_type,le=le, re=re)
 
-#     def snapshot(self, fn = None, clip_ratio = None):
-#         import matplotlib
-#         matplotlib.pylab.figure(2)
-#         self.transfer_function.show()
-#         matplotlib.pylab.draw()
-#         im = Camera.snapshot(self, fn, clip_ratio)
-#         matplotlib.pylab.figure(1)
-#         matplotlib.pylab.imshow(im/im.max())
-#         matplotlib.pylab.draw()
-#         self.frames.append(im)
+    def snapshot(self, fn = None, clip_ratio = None):
+        import matplotlib
+        matplotlib.pylab.figure(2)
+        self.transfer_function.show()
+        matplotlib.pylab.draw()
+        im = Camera.snapshot(self, fn, clip_ratio)
+        matplotlib.pylab.figure(1)
+        matplotlib.pylab.imshow(im/im.max())
+        matplotlib.pylab.draw()
+        self.frames.append(im)
         
-#     def rotation(self, theta, n_steps, rot_vector=None):
-#         for frame in Camera.rotation(self, theta, n_steps, rot_vector):
-#             if frame is not None:
-#                 self.frames.append(frame)
+    def rotation(self, theta, n_steps, rot_vector=None):
+        for frame in Camera.rotation(self, theta, n_steps, rot_vector):
+            if frame is not None:
+                self.frames.append(frame)
                 
-#     def zoomin(self, final, n_steps):
-#         for frame in Camera.zoomin(self, final, n_steps):
-#             if frame is not None:
-#                 self.frames.append(frame)
+    def zoomin(self, final, n_steps):
+        for frame in Camera.zoomin(self, final, n_steps):
+            if frame is not None:
+                self.frames.append(frame)
                 
-#     def clear_frames(self):
-#         del self.frames
-#         self.frames = []
+    def clear_frames(self):
+        del self.frames
+        self.frames = []
         
-#     def save_frames(self, basename, clip_ratio=None):
-#         for i, frame in enumerate(self.frames):
-#             fn = basename + '_%04i.png'%i
-#             if clip_ratio is not None:
-#                 write_bitmap(frame, fn, clip_ratio*image.std())
-#             else:
-#                 write_bitmap(frame, fn)
+    def save_frames(self, basename, clip_ratio=None):
+        for i, frame in enumerate(self.frames):
+            fn = basename + '_%04i.png'%i
+            if clip_ratio is not None:
+                write_bitmap(frame, fn, clip_ratio*image.std())
+            else:
+                write_bitmap(frame, fn)
 
-# data_object_registry["interactive_camera"] = InteractiveCamera
+data_object_registry["interactive_camera"] = InteractiveCamera
 
-# class PerspectiveCamera(Camera):
-#     def __init__(self, center, normal_vector, width,
-#                  resolution, transfer_function,
-#                  north_vector = None, steady_north=False,
-#                  volume = None, fields = None,
-#                  log_fields = None,
-#                  sub_samples = 5, pf = None,
-#                  use_kd=True, l_max=None, no_ghost=True,
-#                  tree_type='domain', expand_factor = 1.0,
-#                  le=None, re=None):
-#         self.expand_factor = 1.0
-#         Camera.__init__(self, center, normal_vector, width,
-#                  resolution, transfer_function,
-#                  north_vector = None, steady_north=False,
-#                  volume = None, fields = None,
-#                  log_fields = None,
-#                  sub_samples = 5, pf = None,
-#                  use_kd=True, l_max=None, no_ghost=True,
-#                  tree_type='domain', le=None, re=None)
+class PerspectiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', expand_factor = 1.0,
+                 le=None, re=None):
+        self.expand_factor = 1.0
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', le=None, re=None)
         
 
-#     def get_vector_plane(self, image):
-#         # We should move away from pre-generation of vectors like this and into
-#         # the usage of on-the-fly generation in the VolumeIntegrator module
-#         # We might have a different width and back_center
-#         dl = (self.back_center - self.front_center)
-#         self.front_center += dl
-#         self.back_center -= dl
-#         px = self.expand_factor*na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
-#                          self.resolution[0])[:,None]
-#         py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
-#                          self.resolution[1])[None,:]
-#         inv_mat = self.inv_mat
-#         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
-#                           dtype='float64', order='C')
-#         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
-#         positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
-#         positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
-#         bounds = (px.min(), px.max(), py.min(), py.max())
+    def get_vector_plane(self, image):
+        # We should move away from pre-generation of vectors like this and into
+        # the usage of on-the-fly generation in the VolumeIntegrator module
+        # We might have a different width and back_center
+        dl = (self.back_center - self.front_center)
+        self.front_center += dl
+        self.back_center -= dl
+        px = self.expand_factor*na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+                         self.resolution[0])[:,None]
+        py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+                         self.resolution[1])[None,:]
+        inv_mat = self.orienter.inv_mat
+        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+                          dtype='float64', order='C')
+        positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
+        positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
+        positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
+        bounds = (px.min(), px.max(), py.min(), py.max())
         
-#         # We are likely adding on an odd cutting condition here
-#         vectors = self.front_center - positions
-#         positions = self.front_center - 2.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
-#         vectors = (self.front_center - positions)
+        # We are likely adding on an odd cutting condition here
+        vectors = self.front_center - positions
+        positions = self.front_center - 2.0*(((self.back_center-self.front_center)**2).sum())**0.5*vectors
+        vectors = (self.front_center - positions)
 
-#         vector_plane = VectorPlane(positions, vectors,
-#                                       self.back_center, bounds, image,
-#                                       self.unit_vectors[0],
-#                                       self.unit_vectors[1])
-#         return vector_plane
+        vector_plane = VectorPlane(positions, vectors,
+                                      self.back_center, bounds, image,
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
+        return vector_plane
 
-# def corners(left_edge, right_edge):
-#     return na.array([
-#       [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
-#       [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
-#       [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
-#       [right_edge[:,0], right_edge[:,1], right_edge[:,2]],
-#       [left_edge[:,0], right_edge[:,1], right_edge[:,2]],
-#       [left_edge[:,0], left_edge[:,1], right_edge[:,2]],
-#       [right_edge[:,0], left_edge[:,1], right_edge[:,2]],
-#       [left_edge[:,0], right_edge[:,1], left_edge[:,2]],
-#     ], dtype='float64')
+def corners(left_edge, right_edge):
+    return na.array([
+      [left_edge[:,0], left_edge[:,1], left_edge[:,2]],
+      [right_edge[:,0], left_edge[:,1], left_edge[:,2]],
+      [right_edge[:,0], right_edge[:,1], left_edge[:,2]],
+      [right_edge[:,0], right_edge[:,1], right_edge[:,2]],
+      [left_edge[:,0], right_edge[:,1], right_edge[:,2]],
+      [left_edge[:,0], left_edge[:,1], right_edge[:,2]],
+      [right_edge[:,0], left_edge[:,1], right_edge[:,2]],
+      [left_edge[:,0], right_edge[:,1], left_edge[:,2]],
+    ], dtype='float64')
 
-# class HEALpixCamera(Camera):
-#     def __init__(self, center, radius, nside,
-#                  transfer_function = None, fields = None,
-#                  sub_samples = 5, log_fields = None, volume = None,
-#                  pf = None, use_kd=True, no_ghost=False):
-#         ParallelAnalysisInterface.__init__(self)
-#         if pf is not None: self.pf = pf
-#         self.center = na.array(center, dtype='float64')
-#         self.radius = radius
-#         self.nside = nside
-#         self.use_kd = use_kd
-#         if transfer_function is None:
-#             transfer_function = ProjectionTransferFunction()
-#         self.transfer_function = transfer_function
-#         if fields is None: fields = ["Density"]
-#         self.fields = fields
-#         self.sub_samples = sub_samples
-#         self.log_fields = log_fields
-#         if volume is None:
-#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-#                                log_fields=log_fields)
-#         self.use_kd = isinstance(volume, AMRKDTree)
-#         self.volume = volume
+class HEALpixCamera(Camera):
+    def __init__(self, center, radius, nside,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, use_kd=True, no_ghost=False):
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.nside = nside
+        self.use_kd = use_kd
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields)
+        self.use_kd = isinstance(volume, AMRKDTree)
+        self.volume = volume
 
-#     def snapshot(self, fn = None, clim = None):
-#         nv = 12*self.nside**2
-#         image = na.zeros((nv,1,3), dtype='float64', order='C')
-#         vs = arr_pix2vec_nest(self.nside, na.arange(nv))
-#         vs *= self.radius
-#         vs.shape = (nv,1,3)
-#         uv = na.ones(3, dtype='float64')
-#         positions = na.ones((nv, 1, 3), dtype='float64') * self.center
-#         vector_plane = VectorPlane(positions, vs, self.center,
-#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-#         tfp = TransferFunctionProxy(self.transfer_function)
-#         tfp.ns = self.sub_samples
-#         self.volume.initialize_source()
-#         mylog.info("Rendering equivalent of %0.2f^2 image", nv**0.5)
-#         pbar = get_pbar("Ray casting",
-#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+    def snapshot(self, fn = None, clim = None):
+        nv = 12*self.nside**2
+        image = na.zeros((nv,1,3), dtype='float64', order='C')
+        vs = arr_pix2vec_nest(self.nside, na.arange(nv))
+        vs *= self.radius
+        vs.shape = (nv,1,3)
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((nv, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, vs, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering equivalent of %0.2f^2 image", nv**0.5)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-#         total_cells = 0
-#         for brick in self.volume.traverse(None, self.center, image):
-#             brick.cast_plane(tfp, vector_plane)
-#             total_cells += na.prod(brick.my_data[0].shape)
-#             pbar.update(total_cells)
-#         pbar.finish()
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
 
-#         if self.comm.rank is 0 and fn is not None:
-#             # This assumes Density; this is a relatively safe assumption.
-#             import matplotlib.figure
-#             import matplotlib.backends.backend_agg
-#             phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
-#             pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
-#             image *= self.radius * self.pf['cm']
-#             img = na.log10(image[:,0,0][pixi]).reshape((800,800))
+        if self.comm.rank is 0 and fn is not None:
+            # This assumes Density; this is a relatively safe assumption.
+            import matplotlib.figure
+            import matplotlib.backends.backend_agg
+            phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
+            pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
+            image *= self.radius * self.pf['cm']
+            img = na.log10(image[:,0,0][pixi]).reshape((800,800))
 
-#             fig = matplotlib.figure.Figure((10, 5))
-#             ax = fig.add_subplot(1,1,1,projection='mollweide')
-#             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
-#             cb = fig.colorbar(implot, orientation='horizontal')
-#             cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
-#             if clim is not None: cb.set_clim(*clim)
-#             ax.xaxis.set_ticks(())
-#             ax.yaxis.set_ticks(())
-#             canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
-#             canvas.print_figure(fn)
-#         return image
+            fig = matplotlib.figure.Figure((10, 5))
+            ax = fig.add_subplot(1,1,1,projection='mollweide')
+            implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+            cb = fig.colorbar(implot, orientation='horizontal')
+            cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+            if clim is not None: cb.set_clim(*clim)
+            ax.xaxis.set_ticks(())
+            ax.yaxis.set_ticks(())
+            canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
+            canvas.print_figure(fn)
+        return image
 
 
-# class AdaptiveHEALpixCamera(Camera):
-#     def __init__(self, center, radius, nside,
-#                  transfer_function = None, fields = None,
-#                  sub_samples = 5, log_fields = None, volume = None,
-#                  pf = None, use_kd=True, no_ghost=False,
-#                  rays_per_cell = 0.1, max_nside = 8192):
-#         ParallelAnalysisInterface.__init__(self)
-#         if pf is not None: self.pf = pf
-#         self.center = na.array(center, dtype='float64')
-#         self.radius = radius
-#         self.use_kd = use_kd
-#         if transfer_function is None:
-#             transfer_function = ProjectionTransferFunction()
-#         self.transfer_function = transfer_function
-#         if fields is None: fields = ["Density"]
-#         self.fields = fields
-#         self.sub_samples = sub_samples
-#         self.log_fields = log_fields
-#         if volume is None:
-#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-#                                log_fields=log_fields)
-#         self.use_kd = isinstance(volume, AMRKDTree)
-#         self.volume = volume
-#         self.initial_nside = nside
-#         self.rays_per_cell = rays_per_cell
-#         self.max_nside = max_nside
+class AdaptiveHEALpixCamera(Camera):
+    def __init__(self, center, radius, nside,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, use_kd=True, no_ghost=False,
+                 rays_per_cell = 0.1, max_nside = 8192):
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.use_kd = use_kd
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields)
+        self.use_kd = isinstance(volume, AMRKDTree)
+        self.volume = volume
+        self.initial_nside = nside
+        self.rays_per_cell = rays_per_cell
+        self.max_nside = max_nside
 
-#     def snapshot(self, fn = None):
-#         tfp = TransferFunctionProxy(self.transfer_function)
-#         tfp.ns = self.sub_samples
-#         self.volume.initialize_source()
-#         mylog.info("Adaptively rendering.")
-#         pbar = get_pbar("Ray casting",
-#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
-#         total_cells = 0
-#         bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
-#         left_edges = na.array([b.LeftEdge for b in bricks])
-#         right_edges = na.array([b.RightEdge for b in bricks])
-#         min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
-#                      for b in bricks))
-#         # We jitter a bit if we're on a boundary of our initial grid
-#         for i in range(3):
-#             if bricks[0].LeftEdge[i] == self.center[i]:
-#                 self.center += 1e-2 * min_dx
-#             elif bricks[0].RightEdge[i] == self.center[i]:
-#                 self.center -= 1e-2 * min_dx
-#         ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
-#                                        self.initial_nside, self.radius,
-#                                        bricks, left_edges, right_edges, self.max_nside)
-#         for i,brick in enumerate(bricks):
-#             ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
-#                                        bricks)
-#             total_cells += na.prod(brick.my_data[0].shape)
-#             pbar.update(total_cells)
-#         pbar.finish()
-#         info, values = ray_source.get_rays()
-#         return info, values
+    def snapshot(self, fn = None):
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Adaptively rendering.")
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        total_cells = 0
+        bricks = [b for b in self.volume.traverse(None, self.center, None)][::-1]
+        left_edges = na.array([b.LeftEdge for b in bricks])
+        right_edges = na.array([b.RightEdge for b in bricks])
+        min_dx = min(((b.RightEdge[0] - b.LeftEdge[0])/b.my_data[0].shape[0]
+                     for b in bricks))
+        # We jitter a bit if we're on a boundary of our initial grid
+        for i in range(3):
+            if bricks[0].LeftEdge[i] == self.center[i]:
+                self.center += 1e-2 * min_dx
+            elif bricks[0].RightEdge[i] == self.center[i]:
+                self.center -= 1e-2 * min_dx
+        ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
+                                       self.initial_nside, self.radius,
+                                       bricks, left_edges, right_edges, self.max_nside)
+        for i,brick in enumerate(bricks):
+            ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
+                                       bricks)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        info, values = ray_source.get_rays()
+        return info, values
 
-# class StereoPairCamera(Camera):
-#     def __init__(self, original_camera, relative_separation = 0.005):
-#         ParallelAnalysisInterface.__init__(self)
-#         self.original_camera = original_camera
-#         self.relative_separation = relative_separation
+class StereoPairCamera(Camera):
+    def __init__(self, original_camera, relative_separation = 0.005):
+        ParallelAnalysisInterface.__init__(self)
+        self.original_camera = original_camera
+        self.relative_separation = relative_separation
 
-#     def split(self):
-#         oc = self.original_camera
-#         uv = oc.unit_vectors
-#         c = oc.center
-#         fc = oc.front_center
-#         wx, wy, wz = oc.width
-#         left_normal = fc + uv[1] * 0.5*self.relative_separation * wx - c
-#         right_normal = fc - uv[1] * 0.5*self.relative_separation * wx - c
-#         left_camera = Camera(c, left_normal, oc.width,
-#                              oc.resolution, oc.transfer_function, uv[0],
-#                              oc.volume, oc.fields, oc.log_fields,
-#                              oc.sub_samples, oc.pf)
-#         right_camera = Camera(c, right_normal, oc.width,
-#                              oc.resolution, oc.transfer_function, uv[0],
-#                              oc.volume, oc.fields, oc.log_fields,
-#                              oc.sub_samples, oc.pf)
-#         return (left_camera, right_camera)
+    def split(self):
+        oc = self.original_camera
+        uv = oc.unit_vectors
+        c = oc.center
+        fc = oc.front_center
+        wx, wy, wz = oc.width
+        left_normal = fc + uv[1] * 0.5*self.relative_separation * wx - c
+        right_normal = fc - uv[1] * 0.5*self.relative_separation * wx - c
+        left_camera = Camera(c, left_normal, oc.width,
+                             oc.resolution, oc.transfer_function, uv[0],
+                             oc.volume, oc.fields, oc.log_fields,
+                             oc.sub_samples, oc.pf)
+        right_camera = Camera(c, right_normal, oc.width,
+                             oc.resolution, oc.transfer_function, uv[0],
+                             oc.volume, oc.fields, oc.log_fields,
+                             oc.sub_samples, oc.pf)
+        return (left_camera, right_camera)
 
-# class FisheyeCamera(Camera):
-#     def __init__(self, center, radius, fov, resolution,
-#                  transfer_function = None, fields = None,
-#                  sub_samples = 5, log_fields = None, volume = None,
-#                  pf = None, no_ghost=False, rotation = None):
-#         ParallelAnalysisInterface.__init__(self)
-#         if rotation is None: rotation = na.eye(3)
-#         self.rotation_matrix = rotation
-#         if pf is not None: self.pf = pf
-#         self.center = na.array(center, dtype='float64')
-#         self.radius = radius
-#         self.fov = fov
-#         if iterable(resolution):
-#             raise RuntimeError("Resolution must be a single int")
-#         self.resolution = resolution
-#         if transfer_function is None:
-#             transfer_function = ProjectionTransferFunction()
-#         self.transfer_function = transfer_function
-#         if fields is None: fields = ["Density"]
-#         self.fields = fields
-#         self.sub_samples = sub_samples
-#         self.log_fields = log_fields
-#         if volume is None:
-#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-#                                log_fields=log_fields)
-#         self.volume = volume
+class FisheyeCamera(Camera):
+    def __init__(self, center, radius, fov, resolution,
+                 transfer_function = None, fields = None,
+                 sub_samples = 5, log_fields = None, volume = None,
+                 pf = None, no_ghost=False, rotation = None):
+        ParallelAnalysisInterface.__init__(self)
+        if rotation is None: rotation = na.eye(3)
+        self.rotation_matrix = rotation
+        if pf is not None: self.pf = pf
+        self.center = na.array(center, dtype='float64')
+        self.radius = radius
+        self.fov = fov
+        if iterable(resolution):
+            raise RuntimeError("Resolution must be a single int")
+        self.resolution = resolution
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields)
+        self.volume = volume
 
-#     def snapshot(self):
-#         image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
-#         # We now follow figures 4-7 of:
-#         # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
-#         # ...but all in Cython.
-#         vp = arr_fisheye_vectors(self.resolution, self.fov)
-#         vp.shape = (self.resolution**2,1,3)
-#         vp2 = vp.copy()
-#         for i in range(3):
-#             vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
-#         del vp2
-#         vp *= self.radius
-#         uv = na.ones(3, dtype='float64')
-#         positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
-#         vector_plane = VectorPlane(positions, vp, self.center,
-#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-#         tfp = TransferFunctionProxy(self.transfer_function)
-#         tfp.ns = self.sub_samples
-#         self.volume.initialize_source()
-#         mylog.info("Rendering fisheye of %s^2", self.resolution)
-#         pbar = get_pbar("Ray casting",
-#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+    def snapshot(self):
+        image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+        # We now follow figures 4-7 of:
+        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+        # ...but all in Cython.
+        vp = arr_fisheye_vectors(self.resolution, self.fov)
+        vp.shape = (self.resolution**2,1,3)
+        vp2 = vp.copy()
+        for i in range(3):
+            vp[:,:,i] = (vp2 * self.rotation_matrix[:,i]).sum(axis=2)
+        del vp2
+        vp *= self.radius
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, vp, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering fisheye of %s^2", self.resolution)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-#         total_cells = 0
-#         for brick in self.volume.traverse(None, self.center, image):
-#             brick.cast_plane(tfp, vector_plane)
-#             total_cells += na.prod(brick.my_data[0].shape)
-#             pbar.update(total_cells)
-#         pbar.finish()
-#         image.shape = (self.resolution, self.resolution, 3)
-#         return image
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        image.shape = (self.resolution, self.resolution, 3)
+        return image
 
-# class MosaicFisheyeCamera(Camera):
-#     def __init__(self, center, radius, fov, resolution, focal_center=None,
-#                  transfer_function=None, fields=None,
-#                  sub_samples=5, log_fields=None, volume=None,
-#                  pf=None, l_max=None, no_ghost=False,nimx=1, nimy=1, procs_per_wg=None,
-#                  rotation=None):
-#         r"""A fisheye lens camera, taking adantage of image plane decomposition
-#         for parallelism..
+class MosaicFisheyeCamera(Camera):
+    def __init__(self, center, radius, fov, resolution, focal_center=None,
+                 transfer_function=None, fields=None,
+                 sub_samples=5, log_fields=None, volume=None,
+                 pf=None, l_max=None, no_ghost=False,nimx=1, nimy=1, procs_per_wg=None,
+                 rotation=None):
+        r"""A fisheye lens camera, taking adantage of image plane decomposition
+        for parallelism..
 
-#         The camera represents the eye of an observer, which will be used to
-#         generate ray-cast volume renderings of the domain. In this case, the
-#         rays are defined by a fisheye lens
+        The camera represents the eye of an observer, which will be used to
+        generate ray-cast volume renderings of the domain. In this case, the
+        rays are defined by a fisheye lens
 
-#         Parameters
-#         ----------
-#         center : array_like
-#             The current "center" of the observer, from which the rays will be
-#             cast
-#         radius : float
-#             The radial distance to cast to
-#         resolution : int
-#             The number of pixels in each direction.  Must be a single int.
-#         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
-#             The volume to ray cast through.  Can be specified for finer-grained
-#             control, but otherwise will be automatically generated.
-#         fields : list of fields, optional
-#             This is the list of fields we want to volume render; defaults to
-#             Density.
-#         log_fields : list of bool, optional
-#             Whether we should take the log of the fields before supplying them to
-#             the volume rendering mechanism.
-#         sub_samples : int, optional
-#             The number of samples to take inside every cell per ray.
-#         pf : `~yt.data_objects.api.StaticOutput`
-#             For now, this is a require parameter!  But in the future it will become
-#             optional.  This is the parameter file to volume render.
-#         l_max: int, optional
-#             Specifies the maximum level to be rendered.  Also
-#             specifies the maximum level used in the AMRKDTree
-#             construction.  Defaults to None (all levels), and only
-#             applies if use_kd=True.
-#         no_ghost: bool, optional
-#             Optimization option.  If True, homogenized bricks will
-#             extrapolate out from grid instead of interpolating from
-#             ghost zones that have to first be calculated.  This can
-#             lead to large speed improvements, but at a loss of
-#             accuracy/smoothness in resulting image.  The effects are
-#             less notable when the transfer function is smooth and
-#             broad. Default: False
-#         nimx: int, optional
-#             The number by which to decompose the image plane into in the x
-#             direction.  Must evenly divide the resolution.
-#         nimy: int, optional
-#             The number by which to decompose the image plane into in the y 
-#             direction.  Must evenly divide the resolution.
-#         procs_per_wg: int, optional
-#             The number of processors to use on each sub-image. Within each
-#             subplane, the volume will be decomposed using the AMRKDTree with
-#             procs_per_wg processors.  
+        Parameters
+        ----------
+        center : array_like
+            The current "center" of the observer, from which the rays will be
+            cast
+        radius : float
+            The radial distance to cast to
+        resolution : int
+            The number of pixels in each direction.  Must be a single int.
+        volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+            The volume to ray cast through.  Can be specified for finer-grained
+            control, but otherwise will be automatically generated.
+        fields : list of fields, optional
+            This is the list of fields we want to volume render; defaults to
+            Density.
+        log_fields : list of bool, optional
+            Whether we should take the log of the fields before supplying them to
+            the volume rendering mechanism.
+        sub_samples : int, optional
+            The number of samples to take inside every cell per ray.
+        pf : `~yt.data_objects.api.StaticOutput`
+            For now, this is a require parameter!  But in the future it will become
+            optional.  This is the parameter file to volume render.
+        l_max: int, optional
+            Specifies the maximum level to be rendered.  Also
+            specifies the maximum level used in the AMRKDTree
+            construction.  Defaults to None (all levels), and only
+            applies if use_kd=True.
+        no_ghost: bool, optional
+            Optimization option.  If True, homogenized bricks will
+            extrapolate out from grid instead of interpolating from
+            ghost zones that have to first be calculated.  This can
+            lead to large speed improvements, but at a loss of
+            accuracy/smoothness in resulting image.  The effects are
+            less notable when the transfer function is smooth and
+            broad. Default: False
+        nimx: int, optional
+            The number by which to decompose the image plane into in the x
+            direction.  Must evenly divide the resolution.
+        nimy: int, optional
+            The number by which to decompose the image plane into in the y 
+            direction.  Must evenly divide the resolution.
+        procs_per_wg: int, optional
+            The number of processors to use on each sub-image. Within each
+            subplane, the volume will be decomposed using the AMRKDTree with
+            procs_per_wg processors.  
 
-#         Notes
-#         -----
-#             The product of nimx*nimy*procs_per_wg must be equal to or less than
-#             the total number of mpi processes.  
+        Notes
+        -----
+            The product of nimx*nimy*procs_per_wg must be equal to or less than
+            the total number of mpi processes.  
 
-#             Unlike the non-Mosaic camera, this will only return each sub-image
-#             to the root processor of each sub-image workgroup in order to save
-#             memory.  To save the final image, one must then call
-#             MosaicFisheyeCamera.save_image('filename')
+            Unlike the non-Mosaic camera, this will only return each sub-image
+            to the root processor of each sub-image workgroup in order to save
+            memory.  To save the final image, one must then call
+            MosaicFisheyeCamera.save_image('filename')
 
-#         Examples
-#         --------
+        Examples
+        --------
 
-#         >>> from yt.mods import *
+        >>> from yt.mods import *
         
-#         >>> pf = load('DD1717')
+        >>> pf = load('DD1717')
         
-#         >>> N = 512 # Pixels (1024^2)
-#         >>> c = (pf.domain_right_edge + pf.domain_left_edge)/2. # Center
-#         >>> radius = (pf.domain_right_edge - pf.domain_left_edge)/2.
-#         >>> fov = 180.0
+        >>> N = 512 # Pixels (1024^2)
+        >>> c = (pf.domain_right_edge + pf.domain_left_edge)/2. # Center
+        >>> radius = (pf.domain_right_edge - pf.domain_left_edge)/2.
+        >>> fov = 180.0
         
-#         >>> field='Density'
-#         >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
-#         >>> mi,ma = na.log10(mi), na.log10(ma)
+        >>> field='Density'
+        >>> mi,ma = pf.h.all_data().quantities['Extrema']('Density')[0]
+        >>> mi,ma = na.log10(mi), na.log10(ma)
         
-#         # You may want to comment out the above lines and manually set the min and max
-#         # of the log of the Density field. For example:
-#         # mi,ma = -30.5,-26.5
+        # You may want to comment out the above lines and manually set the min and max
+        # of the log of the Density field. For example:
+        # mi,ma = -30.5,-26.5
         
-#         # Another good place to center the camera is close to the maximum density.
-#         # v,c = pf.h.find_max('Density')
-#         # c -= 0.1*radius
+        # Another good place to center the camera is close to the maximum density.
+        # v,c = pf.h.find_max('Density')
+        # c -= 0.1*radius
         
        
-#         # Construct transfer function
-#         >>> tf = ColorTransferFunction((mi-1, ma+1),nbins=1024)
+        # Construct transfer function
+        >>> tf = ColorTransferFunction((mi-1, ma+1),nbins=1024)
         
-#         # Sample transfer function with Nc gaussians.  Use col_bounds keyword to limit
-#         # the color range to the min and max values, rather than the transfer function
-#         # bounds.
-#         >>> Nc = 5
-#         >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
-#         >>>         colormap='RdBu_r')
-#         >>> 
-#         # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
-#         # spent creating vertex-centered data. In this case I'm running with 8
-#         # processors, and am splitting the image plane into 4 pieces and using 2
-#         # processors on each piece.
-#         >>> cam = MosaicFisheyeCamera(c, radius, fov, N,
-#         >>>         transfer_function = tf, 
-#         >>>         sub_samples = 5, 
-#         >>>         pf=pf, 
-#         >>>         nimx=2,nimy=2,procs_per_wg=2)
+        # Sample transfer function with Nc gaussians.  Use col_bounds keyword to limit
+        # the color range to the min and max values, rather than the transfer function
+        # bounds.
+        >>> Nc = 5
+        >>> tf.add_layers(Nc,w=0.005, col_bounds = (mi,ma), alpha=na.logspace(-2,0,Nc),
+        >>>         colormap='RdBu_r')
+        >>> 
+        # Create the camera object. Use the keyword: no_ghost=True if a lot of time is
+        # spent creating vertex-centered data. In this case I'm running with 8
+        # processors, and am splitting the image plane into 4 pieces and using 2
+        # processors on each piece.
+        >>> cam = MosaicFisheyeCamera(c, radius, fov, N,
+        >>>         transfer_function = tf, 
+        >>>         sub_samples = 5, 
+        >>>         pf=pf, 
+        >>>         nimx=2,nimy=2,procs_per_wg=2)
         
-#         # Take a snapshot
-#         >>> im = cam.snapshot()
+        # Take a snapshot
+        >>> im = cam.snapshot()
         
-#         # Save the image
-#         >>> cam.save_image('fisheye_mosaic.png')
+        # Save the image
+        >>> cam.save_image('fisheye_mosaic.png')
 
-#         """
+        """
 
-#         ParallelAnalysisInterface.__init__(self)
-#         self.image_decomp = self.comm.size>1
-#         if self.image_decomp:
-#             PP = ProcessorPool()
-#             npatches = nimy*nimx
-#             if procs_per_wg is None:
-#                 if (PP.size % npatches):
-#                     raise RuntimeError("Cannot evenly divide %i procs to %i patches" % (PP.size,npatches))
-#                 else:
-#                     procs_per_wg = PP.size / npatches
-#             if (PP.size != npatches*procs_per_wg):
-#                raise RuntimeError("You need %i processors to utilize %i procs per one patch in [%i,%i] grid" 
-#                      % (npatches*procs_per_wg,procs_per_wg,nimx,nimy))
+        ParallelAnalysisInterface.__init__(self)
+        self.image_decomp = self.comm.size>1
+        if self.image_decomp:
+            PP = ProcessorPool()
+            npatches = nimy*nimx
+            if procs_per_wg is None:
+                if (PP.size % npatches):
+                    raise RuntimeError("Cannot evenly divide %i procs to %i patches" % (PP.size,npatches))
+                else:
+                    procs_per_wg = PP.size / npatches
+            if (PP.size != npatches*procs_per_wg):
+               raise RuntimeError("You need %i processors to utilize %i procs per one patch in [%i,%i] grid" 
+                     % (npatches*procs_per_wg,procs_per_wg,nimx,nimy))
  
-#             for j in range(nimy):
-#                 for i in range(nimx):
-#                     PP.add_workgroup(size=procs_per_wg, name='%04i_%04i'%(i,j))
+            for j in range(nimy):
+                for i in range(nimx):
+                    PP.add_workgroup(size=procs_per_wg, name='%04i_%04i'%(i,j))
                     
-#             for wg in PP.workgroups:
-#                 if self.comm.rank in wg.ranks:
-#                     my_wg = wg
+            for wg in PP.workgroups:
+                if self.comm.rank in wg.ranks:
+                    my_wg = wg
             
-#             self.global_comm = self.comm
-#             self.comm = my_wg.comm
-#             self.wg = my_wg
-#             self.imi = int(self.wg.name[0:4])
-#             self.imj = int(self.wg.name[5:9])
-#             print 'My new communicator has the name %s' % self.wg.name
-#             self.nimx = nimx
-#             self.nimy = nimy
-#         else:
-#             self.imi = 0
-#             self.imj = 0
-#             self.nimx = 1
-#             self.nimy = 1
-#         if pf is not None: self.pf = pf
+            self.global_comm = self.comm
+            self.comm = my_wg.comm
+            self.wg = my_wg
+            self.imi = int(self.wg.name[0:4])
+            self.imj = int(self.wg.name[5:9])
+            print 'My new communicator has the name %s' % self.wg.name
+            self.nimx = nimx
+            self.nimy = nimy
+        else:
+            self.imi = 0
+            self.imj = 0
+            self.nimx = 1
+            self.nimy = 1
+        if pf is not None: self.pf = pf
         
-#         if rotation is None: rotation = na.eye(3)
-#         self.rotation_matrix = rotation
+        if rotation is None: rotation = na.eye(3)
+        self.rotation_matrix = rotation
         
-#         self.normal_vector = na.array([0.,0.,1])
-#         self.north_vector = na.array([1.,0.,0.])
-#         self.east_vector = na.array([0.,1.,0.])
-#         self.rotation_vector = self.north_vector
+        self.normal_vector = na.array([0.,0.,1])
+        self.north_vector = na.array([1.,0.,0.])
+        self.east_vector = na.array([0.,1.,0.])
+        self.rotation_vector = self.north_vector
 
-#         if iterable(resolution):
-#             raise RuntimeError("Resolution must be a single int")
-#         self.resolution = resolution
-#         self.center = na.array(center, dtype='float64')
-#         self.focal_center = focal_center
-#         self.radius = radius
-#         self.fov = fov
-#         if transfer_function is None:
-#             transfer_function = ProjectionTransferFunction()
-#         self.transfer_function = transfer_function
-#         if fields is None: fields = ["Density"]
-#         self.fields = fields
-#         self.sub_samples = sub_samples
-#         self.log_fields = log_fields
-#         if volume is None:
-#             volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
-#                                log_fields=log_fields,l_max=l_max)
-#         self.volume = volume
-#         self.vp = None
-#         self.image = None 
+        if iterable(resolution):
+            raise RuntimeError("Resolution must be a single int")
+        self.resolution = resolution
+        self.center = na.array(center, dtype='float64')
+        self.focal_center = focal_center
+        self.radius = radius
+        self.fov = fov
+        if transfer_function is None:
+            transfer_function = ProjectionTransferFunction()
+        self.transfer_function = transfer_function
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.sub_samples = sub_samples
+        self.log_fields = log_fields
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+                               log_fields=log_fields,l_max=l_max)
+        self.volume = volume
+        self.vp = None
+        self.image = None 
 
-#     def get_vector_plane(self):
-#         if self.focal_center is not None:
-#             rvec =  na.array(self.focal_center) - na.array(self.center)
-#             rvec /= (rvec**2).sum()**0.5
-#             angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
-#                 (rvec**2).sum()**0.5))
-#             rot_vector = na.cross(rvec, self.normal_vector)
-#             rot_vector /= (rot_vector**2).sum()**0.5
+    def get_vector_plane(self):
+        if self.focal_center is not None:
+            rvec =  na.array(self.focal_center) - na.array(self.center)
+            rvec /= (rvec**2).sum()**0.5
+            angle = na.arccos( (self.normal_vector*rvec).sum()/( (self.normal_vector**2).sum()**0.5 *
+                (rvec**2).sum()**0.5))
+            rot_vector = na.cross(rvec, self.normal_vector)
+            rot_vector /= (rot_vector**2).sum()**0.5
             
-#             self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
-#             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
-#             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
-#             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
-#         else:
-#             self.focal_center = self.center + self.radius*self.normal_vector  
-#         dist = ((self.focal_center - self.center)**2).sum()**0.5
-#         # We now follow figures 4-7 of:
-#         # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
-#         # ...but all in Cython.
+            self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
+            self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
+            self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
+            self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
+        else:
+            self.focal_center = self.center + self.radius*self.normal_vector  
+        dist = ((self.focal_center - self.center)**2).sum()**0.5
+        # We now follow figures 4-7 of:
+        # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+        # ...but all in Cython.
         
-#         self.vp = arr_fisheye_vectors(self.resolution, self.fov, self.nimx, 
-#                 self.nimy, self.imi, self.imj)
+        self.vp = arr_fisheye_vectors(self.resolution, self.fov, self.nimx, 
+                self.nimy, self.imi, self.imj)
         
-#         self.vp = rotate_vectors(self.vp, self.rotation_matrix)
+        self.vp = rotate_vectors(self.vp, self.rotation_matrix)
 
-#         self.center = self.focal_center - dist*self.normal_vector
-#         self.vp *= self.radius
-#         nx, ny = self.vp.shape[0], self.vp.shape[1]
-#         self.vp.shape = (nx*ny,1,3)
+        self.center = self.focal_center - dist*self.normal_vector
+        self.vp *= self.radius
+        nx, ny = self.vp.shape[0], self.vp.shape[1]
+        self.vp.shape = (nx*ny,1,3)
 
-#     def snapshot(self):
-#         if self.vp is None:
-#             self.get_vector_plane()
+    def snapshot(self):
+        if self.vp is None:
+            self.get_vector_plane()
 
-#         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-#         image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
-#         uv = na.ones(3, dtype='float64')
-#         positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
-#         vector_plane = VectorPlane(positions, self.vp, self.center,
-#                         (0.0, 1.0, 0.0, 1.0), image, uv, uv)
-#         tfp = TransferFunctionProxy(self.transfer_function)
-#         tfp.ns = self.sub_samples
-#         self.volume.initialize_source()
-#         mylog.info("Rendering fisheye of %s^2", self.resolution)
-#         pbar = get_pbar("Ray casting",
-#                         (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+        nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
+        image = na.zeros((nx*ny,1,3), dtype='float64', order='C')
+        uv = na.ones(3, dtype='float64')
+        positions = na.ones((nx*ny, 1, 3), dtype='float64') * self.center
+        vector_plane = VectorPlane(positions, self.vp, self.center,
+                        (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        self.volume.initialize_source()
+        mylog.info("Rendering fisheye of %s^2", self.resolution)
+        pbar = get_pbar("Ray casting",
+                        (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
 
-#         total_cells = 0
-#         for brick in self.volume.traverse(None, self.center, image):
-#             brick.cast_plane(tfp, vector_plane)
-#             total_cells += na.prod(brick.my_data[0].shape)
-#             pbar.update(total_cells)
-#         pbar.finish()
-#         image.shape = (nx, ny, 3)
+        total_cells = 0
+        for brick in self.volume.traverse(None, self.center, image):
+            brick.cast_plane(tfp, vector_plane)
+            total_cells += na.prod(brick.my_data[0].shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        image.shape = (nx, ny, 3)
 
-#         if self.image is not None:
-#             del self.image
-#         self.image = image
+        if self.image is not None:
+            del self.image
+        self.image = image
        
-#         return image
+        return image
 
-#     def save_image(self, fn, clip_ratio=None):
-#         if '.png' not in fn:
-#             fn = fn + '.png'
+    def save_image(self, fn, clip_ratio=None):
+        if '.png' not in fn:
+            fn = fn + '.png'
         
-#         try:
-#             image = self.image
-#         except:
-#             mylog.error('You must first take a snapshot')
-#             raise(UserWarning)
+        try:
+            image = self.image
+        except:
+            mylog.error('You must first take a snapshot')
+            raise(UserWarning)
         
-#         image = self.image
-#         nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
-#         if self.image_decomp:
-#             if self.comm.rank == 0:
-#                 if self.global_comm.rank == 0:
-#                     final_image = na.empty((nx*self.nimx, 
-#                         ny*self.nimy, 3),
-#                         dtype='float64',order='C')
-#                     final_image[:nx, :ny, :] = image
-#                     for j in range(self.nimy):
-#                         for i in range(self.nimx):
-#                             if i==0 and j==0: continue
-#                             arr = self.global_comm.recv_array((self.wg.size)*(j*self.nimx + i), tag = (self.wg.size)*(j*self.nimx + i))
+        image = self.image
+        nx,ny = self.resolution/self.nimx, self.resolution/self.nimy
+        if self.image_decomp:
+            if self.comm.rank == 0:
+                if self.global_comm.rank == 0:
+                    final_image = na.empty((nx*self.nimx, 
+                        ny*self.nimy, 3),
+                        dtype='float64',order='C')
+                    final_image[:nx, :ny, :] = image
+                    for j in range(self.nimy):
+                        for i in range(self.nimx):
+                            if i==0 and j==0: continue
+                            arr = self.global_comm.recv_array((self.wg.size)*(j*self.nimx + i), tag = (self.wg.size)*(j*self.nimx + i))
 
-#                             final_image[i*nx:(i+1)*nx, j*ny:(j+1)*ny,:] = arr
-#                             del arr
-#                     if clip_ratio is not None:
-#                         write_bitmap(final_image, fn, clip_ratio*final_image.std())
-#                     else:
-#                         write_bitmap(final_image, fn)
-#                 else:
-#                     self.global_comm.send_array(image, 0, tag = self.global_comm.rank)
-#         else:
-#             if self.comm.rank == 0:
-#                 if clip_ratio is not None:
-#                     write_bitmap(image, fn, clip_ratio*image.std())
-#                 else:
-#                     write_bitmap(image, fn)
-#         return
+                            final_image[i*nx:(i+1)*nx, j*ny:(j+1)*ny,:] = arr
+                            del arr
+                    if clip_ratio is not None:
+                        write_bitmap(final_image, fn, clip_ratio*final_image.std())
+                    else:
+                        write_bitmap(final_image, fn)
+                else:
+                    self.global_comm.send_array(image, 0, tag = self.global_comm.rank)
+        else:
+            if self.comm.rank == 0:
+                if clip_ratio is not None:
+                    write_bitmap(image, fn, clip_ratio*image.std())
+                else:
+                    write_bitmap(image, fn)
+        return
 
-#     def rotate(self, theta, rot_vector=None, keep_focus=True):
-#         r"""Rotate by a given angle
+    def rotate(self, theta, rot_vector=None, keep_focus=True):
+        r"""Rotate by a given angle
 
-#         Rotate the view.  If `rot_vector` is None, rotation will occur
-#         around the `north_vector`.
+        Rotate the view.  If `rot_vector` is None, rotation will occur
+        around the `north_vector`.
 
-#         Parameters
-#         ----------
-#         theta : float, in radians
-#              Angle (in radians) by which to rotate the view.
-#         rot_vector  : array_like, optional
-#             Specify the rotation vector around which rotation will
-#             occur.  Defaults to None, which sets rotation around
-#             `north_vector`
+        Parameters
+        ----------
+        theta : float, in radians
+             Angle (in radians) by which to rotate the view.
+        rot_vector  : array_like, optional
+            Specify the rotation vector around which rotation will
+            occur.  Defaults to None, which sets rotation around
+            `north_vector`
 
-#         Examples
-#         --------
+        Examples
+        --------
 
-#         >>> cam.rotate(na.pi/4)
-#         """
-#         if rot_vector is None:
-#             rot_vector = self.north_vector
+        >>> cam.rotate(na.pi/4)
+        """
+        if rot_vector is None:
+            rot_vector = self.north_vector
         
-#         dist = ((self.focal_center - self.center)**2).sum()**0.5
+        dist = ((self.focal_center - self.center)**2).sum()**0.5
         
-#         R = get_rotation_matrix(theta, rot_vector)
+        R = get_rotation_matrix(theta, rot_vector)
 
-#         self.vp = rotate_vectors(self.vp, R)
-#         self.normal_vector = na.dot(R,self.normal_vector)
-#         self.north_vector = na.dot(R,self.north_vector)
-#         self.east_vector = na.dot(R,self.east_vector)
+        self.vp = rotate_vectors(self.vp, R)
+        self.normal_vector = na.dot(R,self.normal_vector)
+        self.north_vector = na.dot(R,self.north_vector)
+        self.east_vector = na.dot(R,self.east_vector)
 
-#         if keep_focus:
-#             self.center = self.focal_center - dist*self.normal_vector
+        if keep_focus:
+            self.center = self.focal_center - dist*self.normal_vector
 
-#     def rotation(self, theta, n_steps, rot_vector=None, keep_focus=True):
-#         r"""Loop over rotate, creating a rotation
+    def rotation(self, theta, n_steps, rot_vector=None, keep_focus=True):
+        r"""Loop over rotate, creating a rotation
 
-#         This will yield `n_steps` snapshots until the current view has been
-#         rotated by an angle `theta`
+        This will yield `n_steps` snapshots until the current view has been
+        rotated by an angle `theta`
 
-#         Parameters
-#         ----------
-#         theta : float, in radians
-#             Angle (in radians) by which to rotate the view.
-#         n_steps : int
-#             The number of look_at snapshots to make.
-#         rot_vector  : array_like, optional
-#             Specify the rotation vector around which rotation will
-#             occur.  Defaults to None, which sets rotation around the
-#             original `north_vector`
+        Parameters
+        ----------
+        theta : float, in radians
+            Angle (in radians) by which to rotate the view.
+        n_steps : int
+            The number of look_at snapshots to make.
+        rot_vector  : array_like, optional
+            Specify the rotation vector around which rotation will
+            occur.  Defaults to None, which sets rotation around the
+            original `north_vector`
 
-#         Examples
-#         --------
+        Examples
+        --------
 
-#         >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
-#         ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
-#         """
+        >>> for i, snapshot in enumerate(cam.rotation(na.pi, 10)):
+        ...     iw.write_bitmap(snapshot, 'rotation_%04i.png' % i)
+        """
 
-#         dtheta = (1.0*theta)/n_steps
-#         for i in xrange(n_steps):
-#             self.rotate(dtheta, rot_vector=rot_vector, keep_focus=keep_focus)
-#             yield self.snapshot()
+        dtheta = (1.0*theta)/n_steps
+        for i in xrange(n_steps):
+            self.rotate(dtheta, rot_vector=rot_vector, keep_focus=keep_focus)
+            yield self.snapshot()
 
-#     def move_to(self,final,n_steps,exponential=False):
-#         r"""Loop over a look_at
+    def move_to(self,final,n_steps,exponential=False):
+        r"""Loop over a look_at
 
-#         This will yield `n_steps` snapshots until the current view has been
-#         moved to a final center of `final`.
+        This will yield `n_steps` snapshots until the current view has been
+        moved to a final center of `final`.
 
-#         Parameters
-#         ----------
-#         final : array_like
-#             The final center to move to after `n_steps`
-#         n_steps : int
-#             The number of look_at snapshots to make.
-#         exponential : boolean
-#             Specifies whether the move/zoom transition follows an
-#             exponential path toward the destination or linear
+        Parameters
+        ----------
+        final : array_like
+            The final center to move to after `n_steps`
+        n_steps : int
+            The number of look_at snapshots to make.
+        exponential : boolean
+            Specifies whether the move/zoom transition follows an
+            exponential path toward the destination or linear
             
-#         Examples
-#         --------
+        Examples
+        --------
 
-#         >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
-#         ...     cam.save_image("move_%04i.png" % i)
-#         """
+        >>> for i, snapshot in enumerate(cam.move_to([0.2,0.3,0.6], 10)):
+        ...     cam.save_image("move_%04i.png" % i)
+        """
 
-#         if exponential:
-#             position_diff = (na.array(final)/self.center)*1.0
-#             dx = position_diff**(1.0/n_steps)
-#         else:
-#             dx = (na.array(final) - self.center)*1.0/n_steps
-#         for i in xrange(n_steps):
-#             if exponential:
-#                 self.center *= dx
-#             else:
-#                 self.center += dx
-#             yield self.snapshot()
+        if exponential:
+            position_diff = (na.array(final)/self.center)*1.0
+            dx = position_diff**(1.0/n_steps)
+        else:
+            dx = (na.array(final) - self.center)*1.0/n_steps
+        for i in xrange(n_steps):
+            if exponential:
+                self.center *= dx
+            else:
+                self.center += dx
+            yield self.snapshot()
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, volume = None, no_ghost = True,



https://bitbucket.org/yt_analysis/yt/changeset/e6ff5f2c2b30/
changeset:   e6ff5f2c2b30
branch:      yt
user:        ngoldbaum
date:        2012-05-05 08:46:11
summary:     The look_at and switch_view functionality was broken.  This fixes it.
affected #:  2 files

diff -r 29b123c9da434faefb099365d70e94876edd3560 -r e6ff5f2c2b303537dbee35afe21480ad4a64d0a6 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -53,40 +53,16 @@
         self.unit_vectors = [east_vector, north_vector, normal_vector]
         self.inv_mat = na.linalg.pinv(self.unit_vectors)
         
-    def look_at(self, new_center, north_vector = None):
-        r"""Change the view direction based on a new focal point.
-
-        This will recalculate all the necessary vectors and vector planes to orient
-        the image plane so that it points at a new location.
-
-        Parameters
-        ----------
-        new_center : array_like
-            The new "center" of the view port -- the focal point for the
-            camera.
-        north_vector : array_like, optional
-            The "up" direction for the plane of rays.  If not specific,
-            calculated automatically.
-        """
-        normal_vector = self.front_center - new_center
-        self._setup_normalized_vectors(normal_vector, north_vector)
-
-
-    def switch_orientation(self, normal_vector=None, center=None, north_vector=None):
+    def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes related
-        to a camera with new normal vectors, widths, centers, or north vectors.
+        to a an orientable object.
 
         Parameters
         ----------
         normal_vector: array_like, optional
             The new looking vector.
-        width: float or array of floats, optional
-            The new width.  Can be a single value W -> [W,W,W] or an
-            array [W1, W2, W3] (left/right, top/bottom, front/back)
-        center: array_like, optional
-            Specifies the new center.
         north_vector : array_like, optional
             The 'up' direction for the plane of rays.  If not specific,
             calculated automatically.
@@ -94,7 +70,7 @@
         if north_vector is None:
             north_vector = self.north_vector
         if normal_vector is None:
-            normal_vector = self.front_center-center
+            normal_vector = self.normal_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
 
         


diff -r 29b123c9da434faefb099365d70e94876edd3560 -r e6ff5f2c2b303537dbee35afe21480ad4a64d0a6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -232,7 +232,9 @@
             The "up" direction for the plane of rays.  If not specific,
             calculated automatically.
         """
-        self.orienter.look_at(new_center, north_vector = north_vector)
+        normal_vector = self.front_center - new_center
+        self.orienter.switch_orientation(normal_vector=normal_vector,
+                                         north_vector = north_vector)
 
     def switch_view(self, normal_vector=None, width=None, center=None, north_vector=None):
         r"""Change the view based on any of the view parameters.
@@ -264,7 +266,7 @@
             north_vector = self.orienter.north_vector
         if normal_vector is None:
             normal_vector = self.front_cemter - self.center
-        self.orienter.switch_orientation(normal_vector = normal_vector, center = center,
+        self.orienter.switch_orientation(normal_vector = normal_vector,
                                          north_vector = north_vector)
         self._setup_box_properties(width, center, self.orienter.unit_vectors)
 



https://bitbucket.org/yt_analysis/yt/changeset/e7fa34220993/
changeset:   e7fa34220993
branch:      yt
user:        ngoldbaum
date:        2012-05-05 09:39:57
summary:     Adding copyright boilerplate.
affected #:  1 file

diff -r e6ff5f2c2b303537dbee35afe21480ad4a64d0a6 -r e7fa34220993cf34058bfa6da77c28ad27f2f3a2 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -1,3 +1,27 @@
+"""
+Commonly used mathematical functions.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+License:
+  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
 import numpy as na
 
 from yt.funcs import *



https://bitbucket.org/yt_analysis/yt/changeset/c3ae8d65c043/
changeset:   c3ae8d65c043
branch:      yt
user:        ngoldbaum
date:        2012-05-05 09:44:30
summary:     Forgot to update descriptive blurb.
affected #:  1 file

diff -r e7fa34220993cf34058bfa6da77c28ad27f2f3a2 -r c3ae8d65c043fc9af2ecbb53ec5a02d962d3c06e yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -1,5 +1,6 @@
 """
-Commonly used mathematical functions.
+A class that manages the coordinate system for orientable data
+containers and cameras.
 
 Author: Nathan Goldbaum <goldbaum at ucolick.org>
 Affiliation: UCSC Astronomy

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list