[yt-svn] commit/yt: 8 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Mar 10 06:37:55 PST 2016


8 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/416ccec2a998/
Changeset:   416ccec2a998
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:02:44+00:00
Summary:     Make error message printed by ImageSampler get printed along with the RuntimeError
Affected #:  1 file

diff -r f5e332e8c41181fd5b1b771911e780bbf13cd527 -r 416ccec2a9984799e882a0e73e66f5d40013410a yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -281,11 +281,11 @@
         else:
             if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
                not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
-                print "Bad lense shape / direction for %s" % (self.lens_type)
-                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
+                msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
                     vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
                     vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
-                raise RuntimeError
+                raise RuntimeError(msg)
             self.extent_function = calculate_extent_null
             self.vector_function = generate_vector_info_null
         self.sampler = NULL


https://bitbucket.org/yt_analysis/yt/commits/990a49ba751f/
Changeset:   990a49ba751f
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:06:20+00:00
Summary:     Assume px, py, dz are already numpy arrays when calling zlines
Affected #:  1 file

diff -r 416ccec2a9984799e882a0e73e66f5d40013410a -r 990a49ba751ff21a731eefb63036d86d99b9d1ad yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -813,15 +813,18 @@
             z.shape = (camera.resolution[0], camera.resolution[1])
 
         if len(px.shape) == 1:
-            zlines(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
+            zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-            # For stereo-lens, two sets of pos for each eye are contained in px...pz
-            zlines(empty, z, px.d[0,:], py.d[0,:], dz.d[0,:], self.colors, self.color_stride)
-            zlines(empty, z, px.d[1,:], py.d[1,:], dz.d[1,:], self.colors, self.color_stride)
 
         if 'plane-parallel' not in str(camera.lens):
             empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
             z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+            # For stereo-lens, two sets of pos for each eye are contained
+            # in px...pz
+            zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors, 
+                   self.color_stride)
+            zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors, 
+                   self.color_stride)
 
         self.zbuffer = zbuffer
         return zbuffer
@@ -1115,15 +1118,18 @@
             z.shape = (camera.resolution[0], camera.resolution[1])
 
         if len(px.shape) == 1:
-            zlines(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
+            zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-            # For stereo-lens, two sets of pos for each eye are contained in px...pz
-            zlines(empty, z, px.d[0,:], py.d[0,:], dz.d[0,:], self.colors, self.color_stride)
-            zlines(empty, z, px.d[1,:], py.d[1,:], dz.d[1,:], self.colors, self.color_stride)
 
         if 'plane-parallel' not in str(camera.lens):
             empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
             z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+            # For stereo-lens, two sets of pos for each eye are contained
+            # in px...pz
+            zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
+                   self.color_stride)
+            zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
+                   self.color_stride)
 
         # Set the new zbuffer
         self.zbuffer = zbuffer


https://bitbucket.org/yt_analysis/yt/commits/f273dc406050/
Changeset:   f273dc406050
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:06:55+00:00
Summary:     Remove unnecessary zbuffer reshaping in opaque sources
Affected #:  1 file

diff -r 990a49ba751ff21a731eefb63036d86d99b9d1ad -r f273dc406050370a3416db3267509fa0feb67a82 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -806,19 +806,9 @@
         camera.lens.setup_box_properties(camera)
         px, py, dz = camera.lens.project_to_plane(camera, vertices)
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get lines plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
         if len(px.shape) == 1:
             zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
             # For stereo-lens, two sets of pos for each eye are contained
             # in px...pz
             zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors, 
@@ -1111,19 +1101,9 @@
 
         # Draw the vectors
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get lines plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
         if len(px.shape) == 1:
             zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
             # For stereo-lens, two sets of pos for each eye are contained
             # in px...pz
             zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,


https://bitbucket.org/yt_analysis/yt/commits/e1727adec3ad/
Changeset:   e1727adec3ad
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:07:26+00:00
Summary:     More carefully construct projected positions in project_to_plane
Affected #:  1 file

diff -r f273dc406050370a3416db3267509fa0feb67a82 -r e1727adec3ade1f769fe07476b9fd07d13c92cea yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -122,17 +122,23 @@
     def project_to_plane(self, camera, pos, res=None):
         if res is None:
             res = camera.resolution
-        dx = np.dot(pos - self.origin.d, camera.unit_vectors[1])
-        dy = np.dot(pos - self.origin.d, camera.unit_vectors[0])
-        dz = np.dot(pos - self.front_center.d, -camera.unit_vectors[2])
+
+        origin = self.origin.in_units('code_length').d
+        front_center = self.front_center.in_units('code_length').d
+        width = camera.width.in_units('code_length').d
+
+        dx = np.array(np.dot(pos - origin, camera.unit_vectors[1]))
+        dy = np.array(np.dot(pos - origin, camera.unit_vectors[0]))
+        dz = np.array(np.dot(pos - front_center, -camera.unit_vectors[2]))
         # Transpose into image coords.
-        py = (res[0]*(dx/camera.width[0].d)).astype('int')
-        px = (res[1]*(dy/camera.width[1].d)).astype('int')
+
+        py = (res[0]*(dx/width[0])).astype('int')
+        px = (res[1]*(dy/width[1])).astype('int')
         return px, py, dz
 
     def __repr__(self):
-        disp = "<Lens Object>:\n\tlens_type:plane-parallel\n\tviewpoint:%s" %\
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:plane-parallel\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -226,38 +232,49 @@
     def project_to_plane(self, camera, pos, res=None):
         if res is None:
             res = camera.resolution
-        sight_vector = pos - camera.position.d
+        width = camera.width.in_units('code_length').d
+        position = camera.position.in_units('code_length').d
+
+        sight_vector = pos - position
         pos1 = sight_vector
         for i in range(0, sight_vector.shape[0]):
             sight_vector_norm = np.sqrt(np.dot(sight_vector[i], sight_vector[i]))
-            sight_vector[i] = sight_vector[i] / sight_vector_norm
+            if sight_vector_norm != 0:
+                sight_vector[i] = sight_vector[i] / sight_vector_norm
+
         sight_center = camera.position + camera.width[2] * camera.unit_vectors[2]
 
+        sight_center = sight_center.in_units('code_length').d
+
         for i in range(0, sight_vector.shape[0]):
             sight_angle_cos = np.dot(sight_vector[i], camera.unit_vectors[2])
+            # clip sight_angle_cos since floating point noise might
+            # go outside the domain of arccos
+            sight_angle_cos = np.clip(sight_angle_cos, -1.0, 1.0)
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
-                sight_length = camera.width[2] / sight_angle_cos
+                sight_length = width[2] / sight_angle_cos
             else:
                 # If the corner is on the backwards, then we put it outside of
                 # the image It can not be simply removed because it may connect
                 # to other corner within the image, which produces visible
                 # domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = np.sqrt(width[0]**2 + width[1]**2)
                 sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
-            pos1[i] = camera.position + sight_length * sight_vector[i]
+            pos1[i] = position + sight_length * sight_vector[i]
 
-        dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
-        dy = np.dot(pos1 - sight_center.d, camera.unit_vectors[1])
-        dz = np.dot(pos - camera.position.d, camera.unit_vectors[2])
+        dx = np.dot(pos1 - sight_center, camera.unit_vectors[0])
+        dy = np.dot(pos1 - sight_center, camera.unit_vectors[1])
+        dz = np.dot(pos - position, camera.unit_vectors[2])
 
         # Transpose into image coords.
         px = (res[0] * 0.5 + res[0] / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
+
         return px, py, dz
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:perspective viewpoint:%s" % \
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:perspective\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -451,8 +468,8 @@
         self.viewpoint = self.front_center
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:perspective viewpoint:%s" % \
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:perspective\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -520,8 +537,9 @@
         self.viewpoint = camera.position
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:fisheye viewpoint:%s fov:%s radius:" %\
-            (self.viewpoint, self.fov, self.radius)
+        disp = ("<Lens Object>:\n\tlens_type:fisheye\n\tviewpoint:%s"
+                "\nt\tfov:%s\n\tradius:%s" %
+                (self.viewpoint, self.fov, self.radius))
         return disp
 
     def project_to_plane(self, camera, pos, res=None):
@@ -533,26 +551,31 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         # First, we transform lpos into *relative to the camera* coordinates.
-        lpos = camera.position.d - pos
+
+        position = camera.position.in_units('code_length').d
+
+        lpos = position - pos
         lpos = lpos.dot(self.rotation_matrix)
-        # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
+
+        # screen out NaN values that would result from dividing by mag
+        mag[mag == 0] = 1
         lpos /= mag[:, None]
-        dz = mag / self.radius
+
+        dz = (mag / self.radius).in_units('1/code_length').d
         theta = np.arccos(lpos[:, 2])
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
         phi = np.arctan2(lpos[:, 1], lpos[:, 0])
         px = r * np.cos(phi)
         py = r * np.sin(phi)
-        u = camera.focus.uq
-        length_unit = u / u.d
+
         # dz is distance the ray would travel
         px = (px + 1.0) * res[0] / 2.0
         py = (py + 1.0) * res[1] / 2.0
         # px and py should be dimensionless
-        px = (u * np.rint(px) / length_unit).astype("int64")
-        py = (u * np.rint(py) / length_unit).astype("int64")
+        px = np.rint(px).astype("int64")
+        py = np.rint(py).astype("int64")
         return px, py, dz
 
 
@@ -638,11 +661,15 @@
             res = camera.resolution
         # Much of our setup here is the same as in the fisheye, except for the
         # actual conversion back to the px, py values.
-        lpos = camera.position.d - pos
-        # inv_mat = np.linalg.inv(self.rotation_matrix)
-        # lpos = lpos.dot(self.rotation_matrix)
+        position = camera.position.in_units('code_length').d
+
+        lpos = position - pos
         mag = (lpos * lpos).sum(axis=1)**0.5
+
+        # screen out NaN values that would result from dividing by mag
+        mag[mag == 0] = 1
         lpos /= mag[:, None]
+
         # originally:
         #  the x vector is cos(px) * cos(py)
         #  the y vector is sin(px) * cos(py)
@@ -654,14 +681,12 @@
         px = np.arctan2(lpos[:, 1], lpos[:, 0])
         py = np.arcsin(lpos[:, 2])
         dz = mag / self.radius
-        u = camera.focus.uq
-        length_unit = u / u.d
         # dz is distance the ray would travel
         px = ((-px + np.pi) / (2.0*np.pi)) * res[0]
         py = ((-py + np.pi/2.0) / np.pi) * res[1]
         # px and py should be dimensionless
-        px = (u * np.rint(px) / length_unit).astype("int64")
-        py = (u * np.rint(py) / length_unit).astype("int64")
+        px = np.rint(px).astype("int64")
+        py = np.rint(py).astype("int64")
         return px, py, dz
 
 


https://bitbucket.org/yt_analysis/yt/commits/8494b05deee1/
Changeset:   8494b05deee1
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:34:18+00:00
Summary:     Implement fixes for StereoPerspectiveLens
Affected #:  1 file

diff -r e1727adec3ade1f769fe07476b9fd07d13c92cea -r 8494b05deee1ddf124be7de92dcf777f9a0040dc yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -427,7 +427,9 @@
         normal_vec_rot = np.dot(R, normal_vec)
 
         camera_position_shift = camera.position + east_vec * disparity
-        sight_vector = pos - camera_position_shift.d
+        camera_position_shift = camera_position_shift.in_units('code_length').d
+        width = camera.width.in_units('code_length').d
+        sight_vector = pos - camera_position_shift
         pos1 = sight_vector
 
         for i in range(0, sight_vector.shape[0]):
@@ -437,21 +439,24 @@
 
         for i in range(0, sight_vector.shape[0]):
             sight_angle_cos = np.dot(sight_vector[i], normal_vec_rot)
+            # clip sight_angle_cos since floating point noise might
+            # cause it go outside the domain of arccos
+            sight_angle_cos = np.clip(sight_angle_cos, -1.0, 1.0)
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
-                sight_length = camera.width[2] / sight_angle_cos
+                sight_length = width[2] / sight_angle_cos
             else:
                 # If the corner is on the backwards, then we put it outside of
                 # the image It can not be simply removed because it may connect
                 # to other corner within the image, which produces visible
                 # domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = np.sqrt(width[0]**2 + width[1]**2)
                 sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera_position_shift + sight_length * sight_vector[i]
 
-        dx = np.dot(pos1 - sight_center.d, east_vec_rot)
-        dy = np.dot(pos1 - sight_center.d, north_vec)
-        dz = np.dot(pos - camera_position_shift.d, normal_vec_rot)
-        
+        dx = np.dot(pos1 - sight_center, east_vec_rot)
+        dy = np.dot(pos1 - sight_center, north_vec)
+        dz = np.dot(pos - camera_position_shift, normal_vec_rot)
+
         # Transpose into image coords.
         if disparity > 0:
             px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')


https://bitbucket.org/yt_analysis/yt/commits/c1b32f343db5/
Changeset:   c1b32f343db5
Branch:      yt
User:        ngoldbaum
Date:        2016-03-08 20:07:17+00:00
Summary:     Fix test for point sources
Affected #:  1 file

diff -r 8494b05deee1ddf124be7de92dcf777f9a0040dc -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -680,17 +680,7 @@
         camera.lens.setup_box_properties(camera)
         px, py, dz = camera.lens.project_to_plane(camera, vertices)
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get points plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
-        zpoints(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+        zpoints(empty, z, px, py, dz, self.colors, self.color_stride)
 
         self.zbuffer = zbuffer
         return zbuffer


https://bitbucket.org/yt_analysis/yt/commits/972a43b4c388/
Changeset:   972a43b4c388
Branch:      yt
User:        ngoldbaum
Date:        2016-03-10 02:04:12+00:00
Summary:     Merging, clearing a conflict
Affected #:  33 files

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,6 +246,8 @@
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
+.. _region-reference:
+
 3D Objects
 """"""""""
 
@@ -256,8 +258,6 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
-.. _region-reference:
-
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,31 +1,30 @@
 import yt
 import numpy as np
 
-# Follow the simple_volume_rendering cookbook for the first part of this.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
+ds = yt.load("MOOSE_sample_data/out.e-s010")
 sc = yt.create_scene(ds)
 cam = sc.camera
-cam.resolution = (512, 512)
-cam.set_width(ds.domain_width/20.0)
 
-# Find the maximum density location, store it in max_c
-v, max_c = ds.find_max('density')
+# save an image at the starting position
+frame = 0
+sc.save('camera_movement_%04i.png' % frame)
+frame += 1
 
-frame = 0
-# Move to the maximum density location over 5 frames
-for _ in cam.iter_move(max_c, 5):
+# Zoom out by a factor of 2 over 5 frames
+for _ in cam.iter_zoom(0.5, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Zoom in by a factor of 10 over 5 frames
-for _ in cam.iter_zoom(10.0, 5):
+# Move to the position [-10.0, 10.0, -10.0] over 5 frames
+pos = ds.arr([-10.0, 10.0, -10.0], 'code_length')
+for _ in cam.iter_move(pos, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Do a rotation over 5 frames
+# Rotate by 180 degrees over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -195,7 +195,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 In this recipe, we move a camera through a domain and take multiple volume
-rendering snapshots.
+rendering snapshots. This recipe uses an unstructured mesh dataset (see
+:ref:`unstructured_mesh_rendering`), which makes it easier to visualize what 
+the Camera is doing, but you can manipulate the Camera for other dataset types 
+in exactly the same manner.
+
 See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -175,6 +175,7 @@
 .. toctree::
    :hidden:
 
+   intro/index
    installing
    yt Quickstart <quickstart/index>
    yt3differences

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -19,7 +19,7 @@
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
   will probably want to use the bash all-in-one installation script.  This builds 
-  python, numpy, matplotlib, and yt from source to set up an isolated scientific 
+  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific 
   python environment inside of a single folder in your home directory. See
   :ref:`install-script` for more details.
 
@@ -35,9 +35,9 @@
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via linux package managers so long as you
+  for python environments set up via Linux package managers so long as you
   have the the necessary compilers installed (e.g. the ``build-essentials``
-  package on debian and ubuntu).
+  package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -199,13 +199,12 @@
 
 If you do not want to install the full anaconda python distribution, you can
 install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
+visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...`` 
+script for your platform and system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  bash Miniconda-3.3.0-Linux-x86_64.sh
+  bash Miniconda-latest-Linux-x86_64.sh
 
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
@@ -214,7 +213,28 @@
 
   conda install yt
 
-which will install yt along with all of its dependencies.
+which will install stable branch of yt along with all of its dependencies.
+
+If you would like to install latest development version of yt, you can download
+it from our custom anaconda channel:
+
+.. code-block:: bash
+
+  conda install -c http://use.yt/with_conda/ yt
+
+New packages for development branch are built after every pull request is
+merged. In order to make sure you are running latest version, it's recommended
+to update frequently:
+
+.. code-block:: bash
+
+  conda update -c http://use.yt/with_conda/ yt
+
+Location of our channel can be added to ``.condarc`` to avoid retyping it during
+each *conda* invocation. Please refer to `Conda Manual
+<http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
+detailed instructions.
+
 
 Obtaining Source Code
 ^^^^^^^^^^^^^^^^^^^^^
@@ -252,7 +272,7 @@
 
   git clone https://github.com/conda/conda-recipes
 
-Then navigate to the repository root and invoke `conda build`:
+Then navigate to the repository root and invoke ``conda build``:
 
 .. code-block:: bash
 
@@ -290,7 +310,7 @@
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython cython h5py nose sympy
+  $ pip install numpy matplotlib cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
@@ -366,7 +386,7 @@
   yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
+any changes from Bitbucket, and then recompile yt if necessary.
 
 .. _testing-installation:
 

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -49,7 +49,7 @@
 the :ref:`units system <units>` works to tag every individual field and 
 quantity with a physical unit (e.g. cm, AU, kpc, Mpc, etc.), and it describes 
 ways of analyzing multiple chronological data outputs from the same underlying 
-dataset known as :ref:`time series <time-series-analysis`.  Lastly, it includes 
+dataset known as :ref:`time series <time-series-analysis>`.  Lastly, it includes 
 information on how to enable yt to operate :ref:`in parallel over multiple 
 processors simultaneously <parallel-computation>`.
 

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/reference/index.rst
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -14,5 +14,6 @@
    command-line
    api/api
    configuration
+   python_introduction
    field_list
    changelog

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  TransferFunctionHelper is a utility class that makes it easy to visualize he probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
+    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  Creating a custom transfer function is a process that usually involves some trial-and-error. TransferFunctionHelper is a utility class designed to help you visualize the probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
     "\n",
     "First, we set up our namespace and define a convenience function to display volume renderings inline in the notebook.  Using `%matplotlib inline` makes it so matplotlib plots display inline in the notebook."
    ]
@@ -132,8 +132,8 @@
     "tfh.set_log(True)\n",
     "tfh.build_transfer_function()\n",
     "tfh.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
-    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=10.0)\n",
-    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.)\n",
+    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds')\n",
+    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r')\n",
     "\n",
     "tfh.plot(profile_field='cell_mass')"
    ]
@@ -142,7 +142,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
+    "Let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
    ]
   },
   {
@@ -166,7 +166,55 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "That looks okay, but the red gas (associated with temperatures between 1e6 and 1e8 K) is a bit hard to see in the image. To fix this, we can make that gas contribute a larger alpha value to the image by using the ``scale`` keyword argument in ``map_to_colormap``."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "tfh2 = TransferFunctionHelper(ds)\n",
+    "tfh2.set_field('temperature')\n",
+    "tfh2.set_bounds()\n",
+    "tfh2.set_log(True)\n",
+    "tfh2.build_transfer_function()\n",
+    "tfh2.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
+    "tfh2.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=5.0)\n",
+    "tfh2.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.0)\n",
+    "\n",
+    "tfh2.plot(profile_field='cell_mass')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the height of the red portion of the transfer function has increased by a factor of 5.0. If we use this transfer function to make the final image:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "source.set_transfer_function(tfh2.tf)\n",
+    "im3 = sc.render()\n",
+    "\n",
+    "showme(im3[:,:,:3])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,7 +16,6 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -236,12 +236,13 @@
 The :class:`~yt.visualization.volume_rendering.camera.Camera` object
 is what it sounds like, a camera within the Scene.  It possesses the 
 quantities:
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
- * ``north_vector`` - a vector defining the "up" direction in an image
- * :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
+ 
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
+* ``north_vector`` - a vector defining the "up" direction in an image
+* :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
 
 .. _camera_movement:
 
@@ -482,7 +483,7 @@
 their combination, are described below.
 
 MPI Parallelization
-+++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^
 
 Currently the volume renderer is parallelized using MPI to decompose the volume
 by attempting to split up the
@@ -516,7 +517,7 @@
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
 OpenMP Parallelization
-++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^
 
 The volume rendering also parallelized using the OpenMP interface in Cython.
 While the MPI parallelization is done using domain decomposition, the OpenMP
@@ -532,7 +533,7 @@
 by default by modifying the environment variable OMP_NUM_THREADS. 
 
 Running in Hybrid MPI + OpenMP
-++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The two methods for volume rendering parallelization can be used together to
 leverage large supercomputing resources.  When choosing how to balance the

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -1,54 +1,100 @@
 import sys
 import os
 import yaml
-import multiprocessing as mp
+import multiprocessing
 import nose
-import glob
-from contextlib import closing
+from cStringIO import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
 
-def run_job(argv):
-    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
-        cur_stderr = sys.stderr
-        sys.stderr = fstderr
-        answer = argv[0]
+class NoseWorker(multiprocessing.Process):
+
+    def __init__(self, task_queue, result_queue):
+        multiprocessing.Process.__init__(self)
+        self.task_queue = task_queue
+        self.result_queue = result_queue
+
+    def run(self):
+        proc_name = self.name
+        while True:
+            next_task = self.task_queue.get()
+            if next_task is None:
+                print("%s: Exiting" % proc_name)
+                self.task_queue.task_done()
+                break
+            print '%s: %s' % (proc_name, next_task)
+            result = next_task()
+            self.task_queue.task_done()
+            self.result_queue.put(result)
+        return
+
+class NoseTask(object):
+    def __init__(self, argv):
+        self.argv = argv
+        self.name = argv[0]
+
+    def __call__(self):
+        old_stderr = sys.stderr
+        sys.stderr = mystderr = StringIO()
         test_dir = ytcfg.get("yt", "test_data_dir")
         answers_dir = os.path.join(test_dir, "answers")
-        if not os.path.isdir(os.path.join(answers_dir, answer)):
-            nose.run(argv=argv + ['--answer-store'],
+        if '--with-answer-testing' in self.argv and \
+                not os.path.isdir(os.path.join(answers_dir, self.name)):
+            nose.run(argv=self.argv + ['--answer-store'],
                      addplugins=[AnswerTesting()], exit=False)
-        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
-    sys.stderr = cur_stderr
+        nose.run(argv=self.argv, addplugins=[AnswerTesting()], exit=False)
+        sys.stderr = old_stderr
+        return mystderr.getvalue()
 
-if __name__ == "__main__":
+    def __str__(self):
+        return 'WILL DO self.name = %s' % self.name
+
+
+def generate_tasks_input():
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
     with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
         tests = yaml.load(obj)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+    base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
-    args = [['unittests', '-v', '-s', '--nologcapture']]
-    for answer in list(tests.keys()):
+    args = []
+
+    for test in list(tests["other_tests"].keys()):
+        args.append([test] + tests["other_tests"][test])
+    for answer in list(tests["answer_tests"].keys()):
         argv = [answer]
         argv += base_argv
-        argv.append('--xunit-file=%s.xml' % answer)
         argv.append('--answer-name=%s' % answer)
-        argv += tests[answer]
+        argv += tests["answer_tests"][answer]
         args.append(argv)
-    
-    processes = [mp.Process(target=run_job, args=(args[i],))
-                 for i in range(len(args))]
-    for p in processes:
-        p.start()
-    for p in processes:
-        p.join(timeout=7200)
-        if p.is_alive():
-            p.terminate()
-            p.join(timeout=30)
-    for fname in glob.glob("*.out"):
-        with open(fname, 'r') as fin:
-            print(fin.read())
-        os.remove(fname)
+
+    args = [item + ['-s', '--nologcapture', '--xunit-file=%s.xml' % item[0]]
+            for item in args]
+    return args
+
+if __name__ == "__main__":
+    # multiprocessing.log_to_stderr(logging.DEBUG)
+    tasks = multiprocessing.JoinableQueue()
+    results = multiprocessing.Queue()
+
+    num_consumers = 6  # TODO 
+    consumers = [NoseWorker(tasks, results) for i in range(num_consumers)]
+    for w in consumers:
+        w.start()
+
+    num_jobs = 0
+    for job in generate_tasks_input():
+        tasks.put(NoseTask(job))
+        num_jobs += 1
+
+    for i in range(num_consumers):
+        tasks.put(None)
+
+    tasks.join()
+
+    while num_jobs:
+        result = results.get()
+        print(result)
+        num_jobs -= 1

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -1,51 +1,63 @@
-local_artio_270:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_270:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_270:
-  - yt/frontends/athena
+  local_athena_270:
+    - yt/frontends/athena
 
-local_chombo_270:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_270:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_270:
-  - yt/frontends/enzo
+  local_enzo_270:
+    - yt/frontends/enzo
 
-local_fits_270:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_270:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_270:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_270:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_270:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_270:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_270:
-  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_270:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+  
+  local_owls_270:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_270:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_270:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_270:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_owls_270:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_orion_270:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_270:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_270:
+    - yt/frontends/ytdata
 
-local_pw_270:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-
-local_tipsy_270:
-  - yt/frontends/tipsy/tests/test_outputs.py
-
-local_varia_270:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-
-local_orion_270:
-  - yt/frontends/boxlib/tests/test_orion.py
-
-local_ramses_270:
-  - yt/frontends/ramses/tests/test_outputs.py
-
-local_ytdata_270:
-  - yt/frontends/ytdata
\ No newline at end of file
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 tests/tests_3.4.yaml
--- a/tests/tests_3.4.yaml
+++ b/tests/tests_3.4.yaml
@@ -1,49 +1,57 @@
-local_artio_340:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_340:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_340:
-  - yt/frontends/athena
+  local_athena_340:
+    - yt/frontends/athena
 
-local_chombo_340:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_340:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_340:
-  - yt/frontends/enzo
+  local_enzo_340:
+    - yt/frontends/enzo
 
-local_fits_340:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_340:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_340:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_340:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_340:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_340:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_340:
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_340:
+    - yt/frontends/owls_subfind/tests/test_outputs.py
 
-local_owls_340:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_owls_340:
+    - yt/frontends/owls/tests/test_outputs.py
 
-local_pw_340:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_pw_340:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
 
-local_tipsy_340:
-  - yt/frontends/tipsy/tests/test_outputs.py
+  local_tipsy_340:
+    - yt/frontends/tipsy/tests/test_outputs.py
 
-local_varia_340:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+  local_varia_340:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_orion_340:
-  - yt/frontends/boxlib/tests/test_orion.py
+  local_orion_340:
+    - yt/frontends/boxlib/tests/test_orion.py
 
-local_ramses_340:
-  - yt/frontends/ramses/tests/test_outputs.py
+  local_ramses_340:
+    - yt/frontends/ramses/tests/test_outputs.py
 
-local_ytdata_340:
-  - yt/frontends/ytdata
\ No newline at end of file
+  local_ytdata_340:
+    - yt/frontends/ytdata
+
+other_tests:
+  unittests:
+    - '-v'
+  cookbook:
+    - 'doc/source/cookbook/tests/test_cookbook.py'
+    - '-P'

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -860,6 +860,7 @@
     """
     _type_name = "smoothed_covering_grid"
     filename = None
+    _min_level = None
     @wraps(YTCoveringGrid.__init__)
     def __init__(self, *args, **kwargs):
         ds = kwargs['ds']
@@ -886,12 +887,47 @@
         self._pdata_source.min_level = level_state.current_level
         self._pdata_source.max_level = level_state.current_level
 
+    def _compute_minimum_level(self):
+        # This attempts to determine the minimum level that we should be
+        # starting on for this box.  It does this by identifying the minimum
+        # level that could contribute to the minimum bounding box at that
+        # level; that means that all cells from coarser levels will be replaced.
+        if self._min_level is not None:
+            return self._min_level
+        ils = LevelState()
+        min_level = 0
+        for l in range(self.level, 0, -1):
+            dx = self._base_dx / self.ds.relative_refinement(0, l)
+            start_index, end_index, dims = self._minimal_box(dx)
+            ils.left_edge = start_index * dx + self.ds.domain_left_edge
+            ils.right_edge = ils.left_edge + dx * dims
+            ils.current_dx = dx
+            ils.current_level = l
+            self._setup_data_source(ils)
+            # Reset the max_level
+            ils.data_source.min_level = 0
+            ils.data_source.max_level = l
+            ils.data_source.loose_selection = False
+            min_level = self.level
+            for chunk in ils.data_source.chunks([], "io"):
+                # With our odd selection methods, we can sometimes get no-sized ires.
+                ir = chunk.ires
+                if ir.size == 0: continue
+                min_level = min(ir.min(), min_level)
+            if min_level >= l:
+                break
+        self._min_level = min_level
+        return min_level
 
     def _fill_fields(self, fields):
         fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
         ls = self._initialize_level_state(fields)
+        min_level = self._compute_minimum_level()
         for level in range(self.level + 1):
+            if level < min_level:
+                self._update_level_state(ls)
+                continue
             domain_dims = self.ds.domain_dimensions.astype("int64") \
                         * self.ds.relative_refinement(0, ls.current_level)
             tot = ls.current_dims.prod()

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -43,6 +43,15 @@
     PlutoFieldInfo
 
 
+def is_chombo_hdf5(fn):
+    try:
+        with h5py.File(fn, 'r') as fileh:
+            valid = "Chombo_global" in fileh["/"]
+    except (KeyError, IOError, ImportError):
+        return False
+    return valid
+
+
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
     __slots__ = ["_level_id", "stop_index"]
@@ -351,6 +360,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
@@ -507,6 +519,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
 
         if isinstance(args[0], six.string_types):
@@ -649,6 +664,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
@@ -703,6 +721,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -860,3 +860,55 @@
         return 'unitary'
     else:
         return u
+
+def get_hash(infile, algorithm='md5', BLOCKSIZE=65536):
+    """Generate file hash without reading in the entire file at once.
+
+    Original code licensed under MIT.  Source:
+    http://pythoncentral.io/hashing-files-with-python/
+
+    Parameters
+    ----------
+    infile : str
+        File of interest (including the path).
+    algorithm : str (optional)
+        Hash algorithm of choice. Defaults to 'md5'.
+    BLOCKSIZE : int (optional)
+        How much data in bytes to read in at once.
+
+    Returns
+    -------
+    hash : str
+        The hash of the file.
+
+    Examples
+    --------
+    >>> import yt.funcs as funcs
+    >>> funcs.get_hash('/path/to/test.png')
+    'd38da04859093d430fa4084fd605de60'
+
+    """
+    import hashlib
+
+    try:
+        hasher = getattr(hashlib, algorithm)()
+    except:
+        raise NotImplementedError("'%s' not available!  Available algorithms: %s" %
+                                  (algorithm, hashlib.algorithms))
+
+    filesize   = os.path.getsize(infile)
+    iterations = int(float(filesize)/float(BLOCKSIZE))
+
+    pbar = get_pbar('Generating %s hash' % algorithm, iterations)
+
+    iter = 0
+    with open(infile,'rb') as f:
+        buf = f.read(BLOCKSIZE)
+        while len(buf) > 0:
+            hasher.update(buf)
+            buf = f.read(BLOCKSIZE)
+            iter += 1
+            pbar.update(iter)
+        pbar.finish()
+
+    return hasher.hexdigest()

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -18,7 +18,7 @@
 cimport numpy as np
 cimport cython
 from libc.stdlib cimport malloc, free
-from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin
+from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin, imin, imax
 from .oct_container cimport OctreeContainer, OctAllocationContainer, Oct
 cimport oct_visitors
 from .oct_visitors cimport cind
@@ -806,6 +806,7 @@
     cdef np.float64_t right_edge[3]
     cdef np.float64_t right_edge_shift[3]
     cdef bint loose_selection
+    cdef bint check_period
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -813,16 +814,17 @@
         cdef int i
         # We are modifying dobj.left_edge and dobj.right_edge , so here we will
         # do an in-place conversion of those arrays.
-        cdef np.ndarray[np.float64_t, ndim=1] RE = _ensure_code(dobj.right_edge)
-        cdef np.ndarray[np.float64_t, ndim=1] LE = _ensure_code(dobj.left_edge)
-        cdef np.ndarray[np.float64_t, ndim=1] DW = _ensure_code(dobj.ds.domain_width)
-        cdef np.ndarray[np.float64_t, ndim=1] DLE = _ensure_code(dobj.ds.domain_left_edge)
-        cdef np.ndarray[np.float64_t, ndim=1] DRE = _ensure_code(dobj.ds.domain_right_edge)
+        cdef np.float64_t[:] RE = _ensure_code(dobj.right_edge)
+        cdef np.float64_t[:] LE = _ensure_code(dobj.left_edge)
+        cdef np.float64_t[:] DW = _ensure_code(dobj.ds.domain_width)
+        cdef np.float64_t[:] DLE = _ensure_code(dobj.ds.domain_left_edge)
+        cdef np.float64_t[:] DRE = _ensure_code(dobj.ds.domain_right_edge)
         cdef np.float64_t region_width[3]
         cdef bint p[3]
         # This is for if we want to include zones that overlap and whose
         # centers are not strictly included.
         self.loose_selection = getattr(dobj, "loose_selection", False)
+        self.check_period = False
 
         for i in range(3):
             region_width[i] = RE[i] - LE[i]
@@ -836,6 +838,11 @@
         for i in range(3):
 
             if p[i]:
+                # First, we check if any criteria requires a period check,
+                # without any adjustments.  This is for short-circuiting the
+                # short-circuit of the loop down below in mask filling.
+                if LE[i] < DLE[i] or LE[i] > DRE[i] or RE[i] > DRE[i]:
+                    self.check_period = True
                 # shift so left_edge guaranteed in domain
                 if LE[i] < DLE[i]:
                     LE[i] += DW[i]
@@ -902,6 +909,52 @@
                 return 0
         return 1
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int fill_mask_selector(self, np.float64_t left_edge[3],
+                                np.float64_t right_edge[3],
+                                np.float64_t dds[3], int dim[3],
+                                np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask,
+                                np.ndarray[np.uint8_t, ndim=3] mask,
+                                int level):
+        cdef int i, j, k
+        cdef int total = 0, this_level = 0
+        cdef np.float64_t pos[3]
+        if level < self.min_level or level > self.max_level:
+            return 0
+        if level == self.max_level:
+            this_level = 1
+        cdef int si[3], ei[3]
+        #print self.left_edge[0], self.left_edge[1], self.left_edge[2],
+        #print self.right_edge[0], self.right_edge[1], self.right_edge[2],
+        #print self.right_edge_shift[0], self.right_edge_shift[1], self.right_edge_shift[2]
+        if not self.check_period:
+            for i in range(3):
+                si[i] = <int> ((self.left_edge[i] - left_edge[i])/dds[i])
+                ei[i] = <int> ((self.right_edge[i] - left_edge[i])/dds[i])
+                si[i] = iclip(si[i] - 1, 0, dim[i])
+                ei[i] = iclip(ei[i] + 1, 0, dim[i])
+        else:
+            for i in range(3):
+                si[i] = 0
+                ei[i] = dim[i]
+        with nogil:
+            pos[0] = left_edge[0] + (si[0] + 0.5) * dds[0]
+            for i in range(si[0], ei[0]):
+                pos[1] = left_edge[1] + (si[1] + 0.5) * dds[1]
+                for j in range(si[1], ei[1]):
+                    pos[2] = left_edge[2] + (si[2] + 0.5) * dds[2]
+                    for k in range(si[2], ei[2]):
+                        if child_mask[i, j, k] == 1 or this_level == 1:
+                            mask[i, j, k] = self.select_cell(pos, dds)
+                            total += mask[i, j, k]
+                        pos[2] += dds[2]
+                    pos[1] += dds[1]
+                pos[0] += dds[0]
+        return total
+
+
     def _hash_vals(self):
         return (("left_edge[0]", self.left_edge[0]),
                 ("left_edge[1]", self.left_edge[1]),

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -14,7 +14,7 @@
 
 from yt.units import dimensions
 from yt.utilities.physical_ratios import \
-    cm_per_pc, cm_per_ly, cm_per_au, cm_per_rsun, \
+    cm_per_pc, cm_per_ly, cm_per_au, cm_per_rsun, cm_per_m, \
     mass_sun_grams, sec_per_year, sec_per_day, sec_per_hr, \
     sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \
     metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \
@@ -122,6 +122,8 @@
     "me": (mass_electron_grams, dimensions.mass, 0.0, r"m_e"),
     "mp": (mass_hydrogen_grams, dimensions.mass, 0.0, r"m_p"),
     "mol": (1.0/amu_grams, dimensions.dimensionless, 0.0, r"\rm{mol}"),
+    'Sv': (cm_per_m**2, dimensions.specific_energy, 0.0,
+           r"\rm{Sv}"),
 
     # for AstroPy compatibility
     "solMass": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
@@ -206,6 +208,7 @@
     "statV",
     "ohm",
     "statohm",
+    "Sv",
 )
 
 default_base_units = {

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,6 +28,7 @@
     add_pygrids, \
     find_node, \
     kd_is_leaf, \
+    set_dirty, \
     depth_traverse, \
     depth_first_touch, \
     kd_traverse, \
@@ -165,7 +166,6 @@
         self.brick_dimensions = []
         self.sdx = ds.index.get_smallest_dx()
 
-        self.regenerate_data = True
         self._initialized = False
         try:
             self._id_offset = ds.index.grids[0]._id_offset
@@ -181,15 +181,15 @@
                          min_level=min_level, max_level=max_level,
                          data_source=data_source)
 
-    def set_fields(self, fields, log_fields, no_ghost):
+    def set_fields(self, fields, log_fields, no_ghost, force=False):
         new_fields = self.data_source._determine_fields(fields)
-        self.regenerate_data = \
-            self.fields is None or \
-            len(self.fields) != len(new_fields) or \
-            self.fields != new_fields
+        regenerate_data = self.fields is None or \
+                          len(self.fields) != len(new_fields) or \
+                          self.fields != new_fields or force
+        set_dirty(self.tree.trunk, regenerate_data)
         self.fields = new_fields
 
-        if self.log_fields is not None:
+        if self.log_fields is not None and not regenerate_data:
             flip_log = map(operator.ne, self.log_fields, log_fields)
         else:
             flip_log = [False] * len(log_fields)
@@ -199,6 +199,7 @@
         del self.bricks, self.brick_dimensions
         self.brick_dimensions = []
         bricks = []
+
         for b in self.traverse():
             map(_apply_log, b.my_data, flip_log, log_fields)
             bricks.append(b)
@@ -284,7 +285,7 @@
         return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
-        if node.data is not None and not self.regenerate_data:
+        if node.data is not None and not node.dirty:
             return node.data
         grid = self.ds.index.grids[node.grid - self._id_offset]
         dds = grid.dds.ndarray_view()
@@ -297,7 +298,7 @@
         assert(np.all(grid.LeftEdge <= nle))
         assert(np.all(grid.RightEdge >= nre))
 
-        if grid in self.current_saved_grids and not self.regenerate_data:
+        if grid in self.current_saved_grids and not node.dirty:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
@@ -323,9 +324,9 @@
                                 nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
+        node.dirty = False
         if not self._initialized:
             self.brick_dimensions.append(dims)
-        self.regenerate_data = False
         return brick
 
     def locate_brick(self, position):

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -26,6 +26,7 @@
     cdef public Node right
     cdef public Node parent
     cdef public int grid
+    cdef public bint dirty
     cdef public np.int64_t node_id
     cdef public np.int64_t node_ind
     cdef np.float64_t left_edge[3]

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -38,6 +38,7 @@
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   int grid,
                   np.int64_t node_id):
+        self.dirty = False
         self.left = left
         self.right = right
         self.parent = parent
@@ -49,6 +50,7 @@
         self.node_id = node_id
         self.split == NULL
 
+
     def print_me(self):
         print 'Node %i' % self.node_id
         print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1],
@@ -135,6 +137,10 @@
     else:
         return 0
 
+def set_dirty(Node trunk, bint state):
+    for node in depth_traverse(trunk):
+        node.dirty = state
+
 def kd_traverse(Node trunk, viewpoint=None):
     if viewpoint is None:
         for node in depth_traverse(trunk):

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -38,8 +38,6 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
-    cdef np.int64_t leaf_size
-    cdef np.float64_t[:, ::1] vertices
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -10,6 +10,10 @@
         MAX_NUM_TRI
     int triangulate_hex[MAX_NUM_TRI][3]
 
+# define some constants
+cdef np.float64_t DETERMINANT_EPS = 1.0e-10
+cdef np.float64_t INF = np.inf
+cdef np.int64_t   LEAF_SIZE = 16
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -58,7 +62,7 @@
 
     cdef np.float64_t det, inv_det
     det = dot(e1, P)
-    if(det > -1.0e-10 and det < 1.0e-10): 
+    if(det > -DETERMINANT_EPS and det < DETERMINANT_EPS): 
         return False
     inv_det = 1.0 / det
 
@@ -78,7 +82,7 @@
 
     cdef np.float64_t t = dot(e2, Q) * inv_det
 
-    if(t > 1.0e-10 and t < ray.t_far):
+    if(t > DETERMINANT_EPS and t < ray.t_far):
         ray.t_far = t
         ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
         ray.elem_id = tri.elem_id
@@ -93,8 +97,8 @@
 cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
 # https://tavianator.com/fast-branchless-raybounding-box-intersections/
 
-    cdef np.float64_t tmin = -1.0e300
-    cdef np.float64_t tmax =  1.0e300
+    cdef np.float64_t tmin = -INF
+    cdef np.float64_t tmax =  INF
  
     cdef np.int64_t i
     cdef np.float64_t t1, t2
@@ -127,9 +131,7 @@
                   np.float64_t[:, ::1] vertices,
                   np.int64_t[:, ::1] indices,
                   np.float64_t[:, ::1] field_data):
-        
-        self.leaf_size = 16
-        self.vertices = vertices
+
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
 
@@ -162,7 +164,7 @@
         self.root = self._recursive_build(0, num_tri)
 
     cdef void _recursive_free(self, BVHNode* node) nogil:
-        if node.end - node.begin > self.leaf_size:
+        if node.end - node.begin > LEAF_SIZE:
             self._recursive_free(node.left)
             self._recursive_free(node.right)
         free(node)
@@ -224,7 +226,7 @@
         # check for leaf
         cdef np.int64_t i, hit
         cdef Triangle* tri
-        if (node.end - node.begin) <= self.leaf_size:
+        if (node.end - node.begin) <= LEAF_SIZE:
             for i in range(node.begin, node.end):
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
@@ -245,7 +247,7 @@
         self._get_node_bbox(node, begin, end)
         
         # check for leaf
-        if (end - begin) <= self.leaf_size:
+        if (end - begin) <= LEAF_SIZE:
             return node
         
         # we use the "split in the middle of the longest axis approach"
@@ -300,7 +302,7 @@
         for i in prange(N):
             for j in range(3):
                 ray.origin[j] = origins[N*j + i]
-            ray.t_far = 1e30
+            ray.t_far = INF
             ray.t_near = 0.0
             ray.data_val = 0
             bvh.intersect(ray)

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -844,6 +844,15 @@
     else:
         res_vector = np.resize(vector,(3,1))
     return res_vector
+    
+def normalize_vector(vector):
+    # this function normalizes
+    # an input vector
+    
+    L2 = np.atleast_1d(np.linalg.norm(vector))
+    L2[L2==0] = 1.0
+    vector = vector / L2
+    return vector
 
 def get_sph_theta(coords, normal):
     # The angle (theta) with respect to the normal (J), is the arccos
@@ -852,6 +861,10 @@
     
     res_normal = resize_vector(normal, coords)
 
+    # check if the normal vector is normalized
+    # since arccos requires the vector to be normalised
+    res_normal = normalize_vector(res_normal)
+
     tile_shape = [1] + list(coords.shape)[1:]
     
     J = np.tile(res_normal,tile_shape)
@@ -871,6 +884,7 @@
     # yprime-component and the xprime-component of the coordinate 
     # vector.
 
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, coords)
@@ -890,6 +904,7 @@
     # gives a vector of magnitude equal to the cylindrical radius.
 
     res_normal = resize_vector(normal, coords)
+    res_normal = normalize_vector(res_normal)
 
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
@@ -902,6 +917,7 @@
     # gives the cylindrical height.
 
     res_normal = resize_vector(normal, coords)
+    res_normal = normalize_vector(res_normal)
     
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
@@ -917,6 +933,7 @@
 def get_cyl_r_component(vectors, theta, normal):
     # The r of a vector is the vector dotted with rhat
 
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -933,6 +950,7 @@
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -948,6 +966,7 @@
 
 def get_cyl_z_component(vectors, normal):
     # The z component of a vector is the vector dotted with zhat
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_zprime = resize_vector(zprime, vectors)
@@ -959,7 +978,7 @@
 
 def get_sph_r_component(vectors, theta, phi, normal):
     # The r component of a vector is the vector dotted with rhat
-    
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -980,7 +999,7 @@
 
 def get_sph_phi_component(vectors, phi, normal):
     # The phi component of a vector is the vector dotted with phihat
-
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -996,7 +1015,7 @@
 
 def get_sph_theta_component(vectors, theta, phi, normal):
     # The theta component of a vector is the vector dotted with thetahat
-    
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -37,6 +37,7 @@
 km_per_pc     = 3.08567758e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
+m_per_cm      = 1e-2
 ly_per_cm     = 1.05702341e-18
 rsun_per_cm   = 1.4378145e-11
 au_per_cm     = 6.68458712e-14
@@ -53,6 +54,7 @@
 cm_per_mpc    = 1.0 / mpc_per_cm
 cm_per_kpc    = 1.0 / kpc_per_cm
 cm_per_km     = 1.0 / km_per_cm
+cm_per_m      = 1.0 / m_per_cm
 pc_per_km     = 1.0 / km_per_pc
 cm_per_pc     = 1.0 / pc_per_cm
 cm_per_ly     = 1.0 / ly_per_cm

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -120,6 +120,12 @@
 
     def __init__(self, scene, data_source=None, lens_type='plane-parallel',
                  auto=False):
+        # import this here to avoid an import cycle
+        from .scene import Scene
+        if not isinstance(scene, Scene):
+            raise RuntimeError(
+                'The first argument passed to the Camera initializer is a '
+                '%s object, expected a %s object' % (type(scene), Scene))
         self.scene = scene
         self.lens = None
         self.north_vector = None

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+from __future__ import division
 from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
@@ -160,6 +161,10 @@
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
+        # Enforce width[1] / width[0] = resolution[1] / resolution[0]
+        camera.width[1] = camera.width[0] \
+            * (camera.resolution[1] / camera.resolution[0])
+
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -232,11 +237,16 @@
     def project_to_plane(self, camera, pos, res=None):
         if res is None:
             res = camera.resolution
+
         width = camera.width.in_units('code_length').d
         position = camera.position.in_units('code_length').d
 
+        width[1] = width[0] * res[1] / res[0]
+
         sight_vector = pos - position
+
         pos1 = sight_vector
+
         for i in range(0, sight_vector.shape[0]):
             sight_vector_norm = np.sqrt(np.dot(sight_vector[i], sight_vector[i]))
             if sight_vector_norm != 0:
@@ -294,9 +304,12 @@
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
+        # Enforce width[1] / width[0] = 2 * resolution[1] / resolution[0]
+        # For stereo-type lens, images for left and right eye are pasted together,
+        # so the resolution of single-eye image will be 50% of the whole one.
+        camera.width[1] = camera.width[0] \
+            * (2. * camera.resolution[1] / camera.resolution[0])
+
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
@@ -398,6 +411,11 @@
         if res is None:
             res = camera.resolution
 
+        # Enforce width[1] / width[0] = 2 * resolution[1] / resolution[0]
+        # For stereo-type lens, images for left and right eye are pasted together,
+        # so the resolution of single-eye image will be 50% of the whole one.
+        camera.width[1] = camera.width[0] * (2. * res[1] / res[0])
+
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -173,7 +173,7 @@
 
     sc.add_source(vol)
 
-    vol.set_sampler(camera)
+    vol.set_sampler(camera, interpolated=False)
     assert (vol.sampler is not None)
 
     mylog.debug("Casting rays")

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -20,7 +20,8 @@
 from .transfer_functions import TransferFunction, \
     ProjectionTransferFunction, ColorTransferFunction
 from .utils import new_volume_render_sampler, data_source_or_all, \
-    get_corners, new_projection_sampler, new_mesh_sampler
+    get_corners, new_projection_sampler, new_mesh_sampler, \
+    new_interpolated_projection_sampler
 from yt.visualization.image_writer import apply_colormap
 from yt.data_objects.image_array import ImageArray
 from .zbuffer_array import ZBuffer
@@ -160,6 +161,8 @@
             raise RuntimeError("transfer_function not of correct type")
         if isinstance(transfer_function, ProjectionTransferFunction):
             self.sampler_type = 'projection'
+            self.volume.set_fields([self.field], log_fields=[False], 
+                                   no_ghost=True, force=True)
 
         self.transfer_function = transfer_function
         return self
@@ -202,7 +205,8 @@
         """Set the source's fields to render
 
         Parameters
-        ---------
+        ----------
+
         fields: field name or list of field names
             The field or fields to render
         no_ghost: boolean
@@ -216,15 +220,24 @@
         self.volume.set_fields(fields, log_fields, no_ghost)
         self.field = fields
 
-    def set_sampler(self, camera):
+    def set_sampler(self, camera, interpolated=True):
         """Sets a volume render sampler
 
         The type of sampler is determined based on the ``sampler_type`` attribute
         of the VolumeSource. Currently the ``volume_render`` and ``projection``
         sampler types are supported.
+
+        The 'interpolated' argument is only meaningful for projections. If True,
+        the data is first interpolated to the cell vertices, and then tri-linearly
+        interpolated to the ray sampling positions. If False, then the cell-centered
+        data is simply accumulated along the ray. Interpolation is always performed
+        for volume renderings.
+
         """
         if self.sampler_type == 'volume-render':
             sampler = new_volume_render_sampler(camera, self)
+        elif self.sampler_type == 'projection' and interpolated:
+            sampler = new_interpolated_projection_sampler(camera, self)
         elif self.sampler_type == 'projection':
             sampler = new_projection_sampler(camera, self)
         else:

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -230,6 +230,7 @@
         
     def clear(self):
         self.y[:]=0.0
+        self.features = []
 
     def __repr__(self):
         disp = "<Transfer Function Object>: x_bounds:(%3.2g, %3.2g) nbins:%3.2g features:%s" % \
@@ -807,6 +808,7 @@
     def clear(self):
         for f in self.funcs:
             f.clear()
+        self.features = []
 
     def __repr__(self):
         disp = "<Color Transfer Function Object>:\n" + \

diff -r c1b32f343db58da2b45a1d15ba2c7b98860beb92 -r 972a43b4c388663da1000ed66bee61934890ca37 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -51,7 +51,8 @@
     kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-        args[4][:] = render_source.zbuffer.rgba[:]
+        args[4][:] = np.reshape(render_source.zbuffer.rgba[:], \
+            (camera.resolution[0], camera.resolution[1], 4))
     else:
         kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
 


https://bitbucket.org/yt_analysis/yt/commits/483bf7e1cc94/
Changeset:   483bf7e1cc94
Branch:      yt
User:        ngoldbaum
Date:        2016-03-10 02:23:21+00:00
Summary:     Disable line annotation sources for lenses with nonlinear projection functions
Affected #:  1 file

diff -r 972a43b4c388663da1000ed66bee61934890ca37 -r 483bf7e1cc9484f761be737690ec586a420335e6 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -134,10 +134,21 @@
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)
 
-        if isinstance(render_source, (VolumeSource, MeshSource, GridSource)):
+        data_sources = (VolumeSource, MeshSource, GridSource)
+
+        if isinstance(render_source, data_sources):
             self.set_new_unit_registry(
                 render_source.data_source.ds.unit_registry)
 
+        line_annotation_sources = (MeshSource, BoxSource, CoordinateVectorSource)
+
+        if isinstance(render_source, line_annotation_sources):
+            lens_str = str(self.camera.lens)
+            if 'fisheye' in lens_str or 'spherical' in lens_str:
+                raise NotImplementedError(
+                    "Line annotation sources are not supported for %s."
+                    % (type(self.camera.lens).__name__), )
+
         self.sources[keyname] = render_source
 
         return self
@@ -489,7 +500,7 @@
         r"""
 
         Modifies this scene by drawing the edges of the AMR grids.
-        This adds a new BoxSource to the scene for each AMR grid 
+        This adds a new GridSource to the scene that represents the AMR grid 
         and returns the resulting Scene object.
 
         Parameters

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list