[yt-svn] commit/yt: 25 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Jan 3 21:29:17 PST 2015


25 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/feaf123a5f4b/
Changeset:   feaf123a5f4b
Branch:      yt
User:        MatthewTurk
Date:        2014-11-15 16:25:05+00:00
Summary:     Adding private function for sanity check on spherical coords.
Affected #:  1 file

diff -r e8fb57e66ca42e26052dadf054a5c782740abec9 -r feaf123a5f4b6d8eafe44e9898beafc89b1a64fd yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -219,3 +219,15 @@
             width = [self.ds.domain_width[0],
                      2.0*self.ds.domain_width[0]]
         return width
+
+    def _sanity_check(self):
+        # We just check a few things here.
+        dd = self.ds.all_data()
+        r0 = self.ds.domain_left_edge[0]
+        r1 = self.ds.domain_right_edge[0]
+        v1 = 4.0 * np.pi / 3.0 * (r1**3 - r0**3)
+        print "Total volume should be 4*pi*r**3 = %0.16e" % (v1)
+        v2 = dd.quantities.total_quantity("cell_volume")
+        print "Actual volume is                   %0.16e" % (v2)
+        print "Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1))
+


https://bitbucket.org/yt_analysis/yt/commits/b39160de4e59/
Changeset:   b39160de4e59
Branch:      yt
User:        MatthewTurk
Date:        2014-11-15 17:41:28+00:00
Summary:     Improve spherical projections and plotting.

This fixes an interesting bug where projections would access data_source for
pixelization, which resulted in many many data source generation calls.  This
was harmless for slices, but projections had issues since they do not use the
same semantics for what a field means.
Affected #:  3 files

diff -r feaf123a5f4b6d8eafe44e9898beafc89b1a64fd -r b39160de4e5908458a6c10e566b22cec0f98b5c0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -375,7 +375,9 @@
             dl = 1.0
         else:
             # This gets explicitly converted to cm
-            dl = chunk.fwidth[:, self.axis]
+            ax_name = self.ds.coordinates.axis_name[self.axis]
+            dl = chunk["index", "path_%s" % (ax_name)]
+            #dl = chunk.fwidth[:, self.axis]
             dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i in range(len(fields)):

diff -r feaf123a5f4b6d8eafe44e9898beafc89b1a64fd -r b39160de4e5908458a6c10e566b22cec0f98b5c0 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -73,6 +73,26 @@
                  function=_SphericalVolume,
                  units = "code_length**3")
 
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_phi(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] \
+                    * data["index", "dphi"] \
+                    * np.sin(data["index", "theta"])
+        registry.add_field(("index", "path_phi"),
+                 function = _path_phi,
+                 units = "code_length")
+
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
         self.period
@@ -102,16 +122,16 @@
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         if dimension == 1:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['phi'],
-                                     data_source['dphi'] / 2.0, # half-widths
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
                                      size, data_source[field], bounds)
         elif dimension == 2:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['theta'],
-                                     data_source['dtheta'] / 2.0, # half-widths
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
                                      size, data_source[field], bounds)
             buff = buff.transpose()
         else:

diff -r feaf123a5f4b6d8eafe44e9898beafc89b1a64fd -r b39160de4e5908458a6c10e566b22cec0f98b5c0 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -80,7 +80,11 @@
     104923.1
     """
     _exclude_fields = ('pz','pdz','dx','x','y','z',
-                       ('index','dx'),('index','x'),('index','y'),('index','z'))
+        'r', 'dr', 'phi', 'dphi', 'theta', 'dtheta',
+                       ('index','dx'),('index','x'),('index','y'),('index','z'),
+                       ('index', 'r'), ('index', 'dr'),
+                       ('index', 'phi'), ('index', 'dphi'),
+                       ('index', 'theta'), ('index', 'dtheta'))
     def __init__(self, data_source, bounds, buff_size, antialias = True,
                  periodic = False):
         self.data_source = data_source


https://bitbucket.org/yt_analysis/yt/commits/0078adb32955/
Changeset:   0078adb32955
Branch:      yt
User:        MatthewTurk
Date:        2014-11-15 21:34:01+00:00
Summary:     More adjustments to spherical coordinates and pixelization.
Affected #:  3 files

diff -r b39160de4e5908458a6c10e566b22cec0f98b5c0 -r 0078adb32955d4a5669e450953124435d52e9e56 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -107,18 +107,14 @@
 
     def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
                         dim, periodic):
-        # We should be using fcoords
-        period = self.period[:2].copy() # dummy here
-        period[0] = self.period[self.x_axis[dim]]
-        period[1] = self.period[self.y_axis[dim]]
-        period = period.in_units("code_length").d
-        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
-                             data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
-                             bounds, int(antialias),
-                             period, int(periodic)).transpose()
+        buff = pixelize_aitoff(data_source["py"], data_source["pdy"],
+                               data_source["px"], data_source["pdx"],
+                               size, data_source[field], None,
+                               None, theta_offset = 0,
+                               phi_offset = 0).transpose()
         return buff
 
+
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         if dimension == 1:

diff -r b39160de4e5908458a6c10e566b22cec0f98b5c0 -r 0078adb32955d4a5669e450953124435d52e9e56 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -579,7 +579,9 @@
                     np.ndarray[np.float64_t, ndim=1] dphi,
                     buff_size,
                     np.ndarray[np.float64_t, ndim=1] field,
-                    extents, input_img = None):
+                    extents, input_img = None,
+                    np.float64_t theta_offset = 0.0,
+                    np.float64_t phi_offset = 0.0):
     # http://paulbourke.net/geometry/transformationprojection/
     # longitude is -pi to pi
     # latitude is -pi/2 to pi/2
@@ -610,9 +612,9 @@
     dx = 2.0 / (img.shape[0] - 1)
     dy = 2.0 / (img.shape[1] - 1)
     for fi in range(nf):
-        theta_p = theta[fi] - PI
+        theta_p = (theta[fi] + theta_offset) - PI
         dtheta_p = dtheta[fi]
-        phi_p = phi[fi] - PI/2.0
+        phi_p = (phi[fi] + phi_offset) - PI/2.0
         dphi_p = dphi[fi]
         # Four transformations
         aitoff_thetaphi_to_xy(theta_p - dtheta_p, phi_p - dphi_p, &x, &y)

diff -r b39160de4e5908458a6c10e566b22cec0f98b5c0 -r 0078adb32955d4a5669e450953124435d52e9e56 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -81,7 +81,7 @@
     from pyparsing import ParseFatalException
 
 def get_window_parameters(axis, center, width, ds):
-    if ds.geometry in ("cartesian", "spectral_cube", "spherical"):
+    if ds.geometry in ("cartesian", "spectral_cube"):
         width = ds.coordinates.sanitize_width(axis, width, None)
         center, display_center = ds.coordinates.sanitize_center(center, axis)
     elif ds.geometry in ("polar", "cylindrical"):
@@ -89,6 +89,14 @@
         width = [ds.domain_right_edge[0]*2.0, ds.domain_right_edge[0]*2.0]
         center = ds.arr([0.0, 0.0, 0.0], "code_length")
         display_center = center.copy()
+    elif ds.geometry == "spherical":
+        center, display_center = ds.coordinates.sanitize_center(center, axis)
+        if axis == 0:
+            # latitude slice
+            width = ds.arr([2*np.pi, np.pi], "code_length")
+        else:
+            width = [2.0*ds.domain_right_edge[2], 2.0*ds.domain_right_edge[2]]
+            center[2] = 0.0
     elif ds.geometry == "geographic":
         c_r = ((ds.domain_right_edge + ds.domain_left_edge)/2.0)[2]
         center = ds.arr([0.0, 0.0, c_r], "code_length")


https://bitbucket.org/yt_analysis/yt/commits/f77b96367e00/
Changeset:   f77b96367e00
Branch:      yt
User:        MatthewTurk
Date:        2014-11-16 01:33:13+00:00
Summary:     Adding in path length for cartesian coordinates.
Affected #:  1 file

diff -r 0078adb32955d4a5669e450953124435d52e9e56 -r f77b96367e006867c10687f3ef9a2f1bca711811 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -32,6 +32,9 @@
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")
+            registry.add_field(("index", "path_%s" % ax), function = f1,
+                               display_field = False,
+                               units = "code_length")
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")


https://bitbucket.org/yt_analysis/yt/commits/401924a7b8a7/
Changeset:   401924a7b8a7
Branch:      yt
User:        MatthewTurk
Date:        2014-12-02 21:05:47+00:00
Summary:     Changing path_ to path_element_
Affected #:  3 files

diff -r f77b96367e006867c10687f3ef9a2f1bca711811 -r 401924a7b8a7765f35c449e29c411955980c2414 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -376,7 +376,7 @@
         else:
             # This gets explicitly converted to cm
             ax_name = self.ds.coordinates.axis_name[self.axis]
-            dl = chunk["index", "path_%s" % (ax_name)]
+            dl = chunk["index", "path_element_%s" % (ax_name)]
             #dl = chunk.fwidth[:, self.axis]
             dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")

diff -r f77b96367e006867c10687f3ef9a2f1bca711811 -r 401924a7b8a7765f35c449e29c411955980c2414 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -32,7 +32,7 @@
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")
-            registry.add_field(("index", "path_%s" % ax), function = f1,
+            registry.add_field(("index", "path_element_%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")
             registry.add_field(("index", "%s" % ax), function = f2,

diff -r f77b96367e006867c10687f3ef9a2f1bca711811 -r 401924a7b8a7765f35c449e29c411955980c2414 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -75,13 +75,13 @@
 
         def _path_r(field, data):
             return data["index", "dr"]
-        registry.add_field(("index", "path_r"),
+        registry.add_field(("index", "path_element_r"),
                  function = _path_r,
                  units = "code_length")
         def _path_theta(field, data):
             # Note: this already assumes cell-centered
             return data["index", "r"] * data["index", "dtheta"]
-        registry.add_field(("index", "path_theta"),
+        registry.add_field(("index", "path_element_theta"),
                  function = _path_theta,
                  units = "code_length")
         def _path_phi(field, data):
@@ -89,7 +89,7 @@
             return data["index", "r"] \
                     * data["index", "dphi"] \
                     * np.sin(data["index", "theta"])
-        registry.add_field(("index", "path_phi"),
+        registry.add_field(("index", "path_element_phi"),
                  function = _path_phi,
                  units = "code_length")
 


https://bitbucket.org/yt_analysis/yt/commits/32bef070f721/
Changeset:   32bef070f721
Branch:      yt
User:        MatthewTurk
Date:        2014-12-02 21:08:45+00:00
Summary:     Adding a note about the purpose of sanity_check.
Affected #:  1 file

diff -r 401924a7b8a7765f35c449e29c411955980c2414 -r 32bef070f721dd869d9a7a522d84b391e4399259 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -237,6 +237,8 @@
         return width
 
     def _sanity_check(self):
+        """This prints out a handful of diagnostics that help verify the
+        dataset is well formed."""
         # We just check a few things here.
         dd = self.ds.all_data()
         r0 = self.ds.domain_left_edge[0]


https://bitbucket.org/yt_analysis/yt/commits/b0796afabe85/
Changeset:   b0796afabe85
Branch:      yt
User:        MatthewTurk
Date:        2014-12-02 21:22:53+00:00
Summary:     Fixing calls to pixelize_cylinder for other coordinate handlers.
Affected #:  3 files

diff -r 32bef070f721dd869d9a7a522d84b391e4399259 -r b0796afabe85ea19287aab60e8f0cf145e6a3366 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -101,10 +101,10 @@
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr'],
-                                 data_source['theta'],
-                                 data_source['dtheta']/2.0, # half-widths
+        buff = pixelize_cylinder(data_source['px'],
+                                 data_source['pdx'],
+                                 data_source['py'],
+                                 data_source['pdy'],
                                  size, data_source[field], bounds)
         return buff
 

diff -r 32bef070f721dd869d9a7a522d84b391e4399259 -r b0796afabe85ea19287aab60e8f0cf145e6a3366 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -131,20 +131,16 @@
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
-        if dimension == 0:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['theta'],
-                                     data_source['dtheta'] / 2.0, # half-widths
-                                     size, data_source[field], bounds)
-        elif dimension == 1:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['phi'],
-                                     data_source['dphi'] / 2.0, # half-widths
-                                     size, data_source[field], bounds)
-        else:
-            raise RuntimeError
+        surface_height = data_source.get_field_parameter("surface_height")
+        if surface_height is None:
+            surface_height = getattr(data_source.ds, "surface_height", 0.0)
+        r = data_source['py'] + surface_height
+        # Because of the axis-ordering, dimensions 0 and 1 both have r as py
+        # and the angular coordinate as px.
+        buff = pixelize_cylinder(r, data_source['pdy'],
+                                 data_source['px'],
+                                 data_source['pdx'], # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
 

diff -r 32bef070f721dd869d9a7a522d84b391e4399259 -r b0796afabe85ea19287aab60e8f0cf145e6a3366 yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -88,10 +88,10 @@
     def _polar_pixelize(self, data_source, field, bounds, size, antialias):
         # Out bounds here will *always* be what plot window thinks are x0, x1,
         # y0, y1, but which will actually be rmin, rmax, thetamin, thetamax.
-        buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr'],
-                                 data_source['theta'],
-                                 data_source['dtheta'] / 2.0, # half-widths
+        buff = pixelize_cylinder(data_source['px'],
+                                 data_source['pdx'],
+                                 data_source['py'],
+                                 data_source['pdy'], # half-widths
                                  size, data_source[field], bounds)
         return buff
 


https://bitbucket.org/yt_analysis/yt/commits/78a0b96fc01c/
Changeset:   78a0b96fc01c
Branch:      yt
User:        MatthewTurk
Date:        2014-12-02 21:25:08+00:00
Summary:     Adding cylindrical path elements.
Affected #:  1 file

diff -r b0796afabe85ea19287aab60e8f0cf145e6a3366 -r 78a0b96fc01c79946cb9ab0048a8faa6a9d9809a yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -72,6 +72,22 @@
                  function=_CylindricalVolume,
                  units = "code_length**3")
 
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_element_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_element_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_z(field, data):
+            return data["index", "dz"]
+        registry.add_field(("index", "path_element_z"),
+                 function = _path_z,
+                 units = "code_length")
 
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):


https://bitbucket.org/yt_analysis/yt/commits/0dc2e63c6afc/
Changeset:   0dc2e63c6afc
Branch:      yt
User:        MatthewTurk
Date:        2014-12-02 22:06:19+00:00
Summary:     Adding path element for geographic coordinates.
Affected #:  1 file

diff -r 78a0b96fc01c79946cb9ab0048a8faa6a9d9809a -r 0dc2e63c6afcf49c74a4b3ca409d28bce92bab0d yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -74,6 +74,27 @@
                  function=_SphericalVolume,
                  units = "code_length**3")
 
+        def _path_altitude(field, data):
+            return data["index", "daltitude"]
+        registry.add_field(("index", "path_element_altitude"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_longitude(field, data):
+            # We use r here explicitly
+            return data["index", "r"] * \
+                data["index", "dlongitude"] * np.pi/180.0
+        registry.add_field(("index", "path_element_longitude"),
+                 function = _path_longitude,
+                 units = "code_length")
+        def _path_latitude(field, data):
+            # We use r here explicitly
+            return data["index", "r"] \
+                    * data["index", "dlatitude"] * np.pi/180.0 \
+                    * np.sin(data["index", "longitude"] * np.pi/180.0)
+        registry.add_field(("index", "path_element_latitude"),
+                 function = _path_latitude,
+                 units = "code_length")
+
         # Altitude is the radius from the central zone minus the radius of the
         # surface.
         def _altitude_to_radius(field, data):


https://bitbucket.org/yt_analysis/yt/commits/51a09bc9d58a/
Changeset:   51a09bc9d58a
Branch:      yt
User:        MatthewTurk
Date:        2014-12-08 23:16:55+00:00
Summary:     Address issues for FITS cubes.
Affected #:  1 file

diff -r 0dc2e63c6afcf49c74a4b3ca409d28bce92bab0d -r 51a09bc9d58a784391b2338ed2ffbb2c4874a2c8 yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -17,6 +17,8 @@
 import numpy as np
 from .cartesian_coordinates import \
     CartesianCoordinateHandler
+from .coordinate_handler import \
+    _get_coord_fields
 
 class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
 
@@ -58,6 +60,38 @@
         self.axis_field = {}
         self.axis_field[self.ds.spec_axis] = _spec_axis
 
+    def setup_fields(self, registry):
+        if self.ds.no_cgs_equiv_length == False:
+            return super(self, SpectralCubeCoordinateHandler
+                    ).setup_fields(registry)
+        for axi, ax in enumerate('xyz'):
+            f1, f2 = _get_coord_fields(axi)
+            def _get_length_func():
+                def _length_func(field, data):
+                    return 1.0
+                return _length_func
+            registry.add_field(("index", "d%s" % ax), function = f1,
+                               display_field = False,
+                               units = "code_length")
+            registry.add_field(("index", "path_element_%s" % ax),
+                               function = _get_length_func(),
+                               display_field = False,
+                               units = "")
+            registry.add_field(("index", "%s" % ax), function = f2,
+                               display_field = False,
+                               units = "code_length")
+        def _cell_volume(field, data):
+            rv  = data["index", "dx"].copy(order='K')
+            rv *= data["index", "dy"]
+            rv *= data["index", "dz"]
+            return rv
+        registry.add_field(("index", "cell_volume"), function=_cell_volume,
+                           display_field=False, units = "code_length**3")
+        registry.check_derived_fields(
+            [("index", "dx"), ("index", "dy"), ("index", "dz"),
+             ("index", "x"), ("index", "y"), ("index", "z"),
+             ("index", "cell_volume")])
+
     def convert_to_cylindrical(self, coord):
         raise NotImplementedError
 


https://bitbucket.org/yt_analysis/yt/commits/9b0c98097a7a/
Changeset:   9b0c98097a7a
Branch:      yt
User:        MatthewTurk
Date:        2014-12-09 17:05:52+00:00
Summary:     Adjust the axis name for path elements in spec cube.
Affected #:  1 file

diff -r 51a09bc9d58a784391b2338ed2ffbb2c4874a2c8 -r 9b0c98097a7a98448e00cffb4bcda6cb282cfc8b yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -64,7 +64,7 @@
         if self.ds.no_cgs_equiv_length == False:
             return super(self, SpectralCubeCoordinateHandler
                     ).setup_fields(registry)
-        for axi, ax in enumerate('xyz'):
+        for axi, ax in enumerate(self.axis_name):
             f1, f2 = _get_coord_fields(axi)
             def _get_length_func():
                 def _length_func(field, data):


https://bitbucket.org/yt_analysis/yt/commits/85a21f3d5bc1/
Changeset:   85a21f3d5bc1
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 00:35:01+00:00
Summary:     First in a set of new tests.
Affected #:  1 file

diff -r 9b0c98097a7a98448e00cffb4bcda6cb282cfc8b -r 85a21f3d5bc17a2694980589dc521fc28a955318 yt/geometry/coordinates/tests/test_cartesian_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
@@ -0,0 +1,25 @@
+# Some tests for the Cartesian coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cartesian_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds()
+    axes = list(set(ds.coordinates.axis_name.values()))
+    axes.sort()
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i]
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i]
+        yield assert_equal, dd[fd].min(), ds.index.get_smallest_dx()
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i]
+        yield assert_equal, dd[fd], dd[fp]


https://bitbucket.org/yt_analysis/yt/commits/0669cfb14bf5/
Changeset:   0669cfb14bf5
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 01:11:50+00:00
Summary:     Support non-cartesian fake AMR datasets
Affected #:  1 file

diff -r 85a21f3d5bc17a2694980589dc521fc28a955318 -r 0669cfb14bf583061fce54038c660616052d3e53 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -179,11 +179,24 @@
     ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
     return ug
 
-def fake_amr_ds(fields = ("Density",)):
+_geom_transforms = {
+    # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
+    'cartesian'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
+    'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
+    'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
+    'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+}
+
+def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):
     from yt.frontends.stream.api import load_amr_grids
+    LE, RE = _geom_transforms[geometry]
+    LE = np.array(LE)
+    RE = np.array(RE)
     data = []
     for gspec in _amr_grid_index:
         level, left_edge, right_edge, dims = gspec
+        left_edge = left_edge * (RE - LE) + LE
+        right_edge = right_edge * (RE - LE) + LE
         gdata = dict(level = level,
                      left_edge = left_edge,
                      right_edge = right_edge,
@@ -191,7 +204,8 @@
         for f in fields:
             gdata[f] = np.random.random(dims)
         data.append(gdata)
-    return load_amr_grids(data, [32, 32, 32], 1.0)
+    bbox = np.array([LE, RE]).T
+    return load_amr_grids(data, [32, 32, 32], 1.0, geometry=geometry, bbox=bbox)
 
 def fake_particle_ds(
         fields = ("particle_position_x",
@@ -225,8 +239,6 @@
     ds = load_particles(data, 1.0, bbox=bbox)
     return ds
 
-
-
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword


https://bitbucket.org/yt_analysis/yt/commits/0d643a705751/
Changeset:   0d643a705751
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 01:12:03+00:00
Summary:     Add cylindrical geometry tests
Affected #:  2 files

diff -r 0669cfb14bf583061fce54038c660616052d3e53 -r 0d643a70575133eea75dcceabdce1c9aa4bbafe1 yt/geometry/coordinates/tests/test_cartesian_coordinates.py
--- a/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
+++ b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
@@ -23,3 +23,5 @@
         yield assert_equal, dd[fd].min(), ds.index.get_smallest_dx()
         yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i]
         yield assert_equal, dd[fd], dd[fp]
+    yield assert_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        ds.domain_width.prod()

diff -r 0669cfb14bf583061fce54038c660616052d3e53 -r 0d643a70575133eea75dcceabdce1c9aa4bbafe1 yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
@@ -0,0 +1,28 @@
+# Some tests for the Cylindrical coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cylindrical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="cylindrical")
+    axes = ["r", "z", "theta"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[1]
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]


https://bitbucket.org/yt_analysis/yt/commits/e75b031b1543/
Changeset:   e75b031b1543
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 01:24:51+00:00
Summary:     Simplify polar coords and add more tests.
Affected #:  3 files

diff -r 0d643a70575133eea75dcceabdce1c9aa4bbafe1 -r e75b031b1543393fa5cd4fdf22f3939974fd242c yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -47,7 +47,7 @@
                            display_field = False,
                            units = "code_length")
 
-        f1, f2 = _get_coord_fields(1)
+        f1, f2 = _get_coord_fields(self.axis_id['z'])
         registry.add_field(("index", "dz"), function = f1,
                            display_field = False,
                            units = "code_length")
@@ -55,7 +55,7 @@
                            display_field = False,
                            units = "code_length")
 
-        f1, f2 = _get_coord_fields(2, "")
+        f1, f2 = _get_coord_fields(self.axis_id['theta'], "")
         registry.add_field(("index", "dtheta"), function = f1,
                            display_field = False,
                            units = "")

diff -r 0d643a70575133eea75dcceabdce1c9aa4bbafe1 -r e75b031b1543393fa5cd4fdf22f3939974fd242c yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -15,86 +15,22 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.units.yt_array import YTArray
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
+from .cylindrical_coordinates import CylindricalCoordinateHandler
+import yt.visualization._MPL as _MPL
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 
-class PolarCoordinateHandler(CoordinateHandler):
+class PolarCoordinateHandler(CylindricalCoordinateHandler):
 
     def __init__(self, ds, ordering = 'rtz'):
         if ordering != 'rtz': raise NotImplementedError
         super(PolarCoordinateHandler, self).__init__(ds)
 
-    def setup_fields(self, registry):
-        # return the fields for r, z, theta
-        registry.add_field("dx", function=_unknown_coord)
-        registry.add_field("dy", function=_unknown_coord)
-        registry.add_field("x", function=_unknown_coord)
-        registry.add_field("y", function=_unknown_coord)
-
-        f1, f2 = _get_coord_fields(0)
-        registry.add_field(("index", "dr"), function = f1,
-                           display_field = False,
-                           units = "code_length")
-        registry.add_field(("index", "r"), function = f2,
-                           display_field = False,
-                           units = "code_length")
-
-        f1, f2 = _get_coord_fields(1, "")
-        registry.add_field(("index", "dtheta"), function = f1,
-                           display_field = False,
-                           units = "")
-        registry.add_field(("index", "theta"), function = f2,
-                           display_field = False,
-                           units = "")
-
-        f1, f2 = _get_coord_fields(2) 
-        registry.add_field(("index", "dz"), function = f1,
-                           display_field = False,
-                           units = "code_length")
-        registry.add_field(("index", "z"), function = f2,
-                           display_field = False,
-                           units = "code_length")
-
-
-        def _CylindricalVolume(field, data):
-            return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
-        registry.add_field("CellVolume", function=_CylindricalVolume)
-
-    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
-        ax_name = self.axis_name[dimension]
-        if ax_name in ('r', 'theta'):
-            return self._ortho_pixelize(data_source, field, bounds, size,
-                                        antialias)
-        elif ax_name == "z":
-            return self._polar_pixelize(data_source, field, bounds, size,
-                                        antialias)
-        else:
-            # Pixelizing along a cylindrical surface is a bit tricky
-            raise NotImplementedError
-
-
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
-                             data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
-                             bounds, int(antialias),
-                             True, self.period).transpose()
-        return buff
-
-    def _polar_pixelize(self, data_source, field, bounds, size, antialias):
-        # Out bounds here will *always* be what plot window thinks are x0, x1,
-        # y0, y1, but which will actually be rmin, rmax, thetamin, thetamax.
-        buff = pixelize_cylinder(data_source['px'],
-                                 data_source['pdx'],
-                                 data_source['py'],
-                                 data_source['pdy'], # half-widths
-                                 size, data_source[field], bounds)
-        return buff
-
     axis_name = { 0  : 'r',  1  : 'theta',  2  : 'z',
                  'r' : 'r', 'theta' : 'theta', 'z' : 'z',
                  'R' : 'r', 'Theta' : 'theta', 'Z' : 'z'}
@@ -108,23 +44,6 @@
     y_axis = { 'r' : 2, 'theta' : 2, 'z' : 1,
                 0  : 2,  1  : 2,  2  : 1}
 
-    _image_axis_name = None
-    @property
-    def image_axis_name(self):    
-        if self._image_axis_name is not None:
-            return self._image_axis_name
-        # This is the x and y axes labels that get displayed.  For
-        # non-Cartesian coordinates, we usually want to override these for
-        # Cartesian coordinates, since we transform them.
-        rv = {0: ('theta', 'z'),
-              1: ('x', 'y'),
-              2: ('r', 'z')}
-        for i in rv.keys():
-            rv[self.axis_name[i]] = rv[i]
-            rv[self.axis_name[i].upper()] = rv[i]
-        self._image_axis_name = rv
-        return rv
-
     def convert_from_cartesian(self, coord):
         return cartesian_to_cylindrical(coord)
 

diff -r 0d643a70575133eea75dcceabdce1c9aa4bbafe1 -r e75b031b1543393fa5cd4fdf22f3939974fd242c yt/geometry/coordinates/tests/test_polar_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_polar_coordinates.py
@@ -0,0 +1,29 @@
+# Some tests for the polar coordinates handler
+# (Pretty similar to cylindrical, but different ordering)
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cylindrical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="polar")
+    axes = ["r", "theta", "z"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[2]
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]


https://bitbucket.org/yt_analysis/yt/commits/c63a3dd60e83/
Changeset:   c63a3dd60e83
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 01:33:56+00:00
Summary:     Adding spherical coordinate tests
Affected #:  1 file

diff -r e75b031b1543393fa5cd4fdf22f3939974fd242c -r c63a3dd60e83c88de51ef182c18153bfb95af021 yt/geometry/coordinates/tests/test_spherical_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_spherical_coordinates.py
@@ -0,0 +1,35 @@
+# Some tests for the Cylindrical coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_spherical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="spherical")
+    axes = ["r", "theta", "phi"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    # Note that we're using a lot of funny transforms to get to this, so we do
+    # not expect to get actual agreement.  This is a bit of a shame, but I
+    # don't think it is avoidable as of right now.  Real datasets will almost
+    # certainly be correct, if this is correct to 3 decimel places.
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        (4.0/3.0) * np.pi*ds.domain_width[0]**3, \
+                        3 # Allow for GENEROUS error on the volume ...
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]
+    yield assert_equal, dd["index", "path_element_phi"], \
+                        dd["index", "r"] * dd["index", "dphi"] * \
+                          np.sin(dd["index","theta"])


https://bitbucket.org/yt_analysis/yt/commits/3eda6533789f/
Changeset:   3eda6533789f
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 02:46:52+00:00
Summary:     We now fail with a much more telling error.
Affected #:  1 file

diff -r c63a3dd60e83c88de51ef182c18153bfb95af021 -r 3eda6533789f33a662d1a8fa23d34f4be4de11e1 yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -68,7 +68,10 @@
             f1, f2 = _get_coord_fields(axi)
             def _get_length_func():
                 def _length_func(field, data):
-                    return 1.0
+                    # Just use axis 0
+                    rv = data.ds.arr(data.fcoords[...,0].copy(), field.units)
+                    rv[:] = 1.0
+                    return rv
                 return _length_func
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,


https://bitbucket.org/yt_analysis/yt/commits/cc4174df3699/
Changeset:   cc4174df3699
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 04:12:13+00:00
Summary:     Only conditionally convert to cm.
Affected #:  1 file

diff -r 3eda6533789f33a662d1a8fa23d34f4be4de11e1 -r cc4174df3699d7c97e929d5045b5f0b2e81f3781 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -378,7 +378,8 @@
             ax_name = self.ds.coordinates.axis_name[self.axis]
             dl = chunk["index", "path_element_%s" % (ax_name)]
             #dl = chunk.fwidth[:, self.axis]
-            dl.convert_to_units("cm")
+            if not dl.units.is_dimensionless:
+                dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i in range(len(fields)):
             v[:,i] = chunk[fields[i]] * dl


https://bitbucket.org/yt_analysis/yt/commits/6039e7748456/
Changeset:   6039e7748456
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 04:12:51+00:00
Summary:     Adding a comment.
Affected #:  1 file

diff -r cc4174df3699d7c97e929d5045b5f0b2e81f3781 -r 6039e7748456cc0c0b00d64079c72ba9cd02afce yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -377,7 +377,9 @@
             # This gets explicitly converted to cm
             ax_name = self.ds.coordinates.axis_name[self.axis]
             dl = chunk["index", "path_element_%s" % (ax_name)]
-            #dl = chunk.fwidth[:, self.axis]
+            # This is done for cases where our path element does not have a CGS
+            # equivalent.  Eventually it would be nice if we could avoid any
+            # conversion at all.
             if not dl.units.is_dimensionless:
                 dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")


https://bitbucket.org/yt_analysis/yt/commits/be53b3b9973e/
Changeset:   be53b3b9973e
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 16:39:11+00:00
Summary:     Updating comment.
Affected #:  1 file

diff -r 6039e7748456cc0c0b00d64079c72ba9cd02afce -r be53b3b9973e6ccc38934f87a21dfd7ad00c3b6b yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -378,8 +378,9 @@
             ax_name = self.ds.coordinates.axis_name[self.axis]
             dl = chunk["index", "path_element_%s" % (ax_name)]
             # This is done for cases where our path element does not have a CGS
-            # equivalent.  Eventually it would be nice if we could avoid any
-            # conversion at all.
+            # equivalent.  Once "preferred units" have been implemented, this
+            # will not be necessary at all, as the final conversion will occur
+            # at the display layer.
             if not dl.units.is_dimensionless:
                 dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")


https://bitbucket.org/yt_analysis/yt/commits/560590bee632/
Changeset:   560590bee632
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 19:23:41+00:00
Summary:     Geographic coordinates were pretty wrong.  This fixes and tests.
Affected #:  4 files

diff -r be53b3b9973e6ccc38934f87a21dfd7ad00c3b6b -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -190,6 +190,8 @@
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return self.ds.arr(np.random.random(3) * 1e-2, self.fp_units[param])
+        elif param in ['surface_height']:
+            return self.ds.quan(0.0, 'code_length')
         elif param in ['axis']:
             return 0
         elif param.startswith("cp_"):

diff -r be53b3b9973e6ccc38934f87a21dfd7ad00c3b6b -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -77,58 +77,61 @@
         def _path_altitude(field, data):
             return data["index", "daltitude"]
         registry.add_field(("index", "path_element_altitude"),
-                 function = _path_r,
+                 function = _path_altitude,
+                 units = "code_length")
+        def _path_latitude(field, data):
+            # We use r here explicitly
+            return data["index", "r"] * \
+                data["index", "dlatitude"] * np.pi/180.0
+        registry.add_field(("index", "path_element_latitude"),
+                 function = _path_latitude,
                  units = "code_length")
         def _path_longitude(field, data):
             # We use r here explicitly
-            return data["index", "r"] * \
-                data["index", "dlongitude"] * np.pi/180.0
+            return data["index", "r"] \
+                    * data["index", "dlongitude"] * np.pi/180.0 \
+                    * np.sin((data["index", "latitude"] + 90.0) * np.pi/180.0)
         registry.add_field(("index", "path_element_longitude"),
                  function = _path_longitude,
                  units = "code_length")
-        def _path_latitude(field, data):
-            # We use r here explicitly
-            return data["index", "r"] \
-                    * data["index", "dlatitude"] * np.pi/180.0 \
-                    * np.sin(data["index", "longitude"] * np.pi/180.0)
-        registry.add_field(("index", "path_element_latitude"),
-                 function = _path_latitude,
-                 units = "code_length")
 
         # Altitude is the radius from the central zone minus the radius of the
         # surface.
         def _altitude_to_radius(field, data):
             surface_height = data.get_field_parameter("surface_height")
             if surface_height is None:
-                surface_height = getattr(data.ds, "surface_height", 0.0)
+                if hasattr(data.ds, "surface_height"):
+                    surface_height = data.ds.surface_height
+                else:
+                    surface_height = data.ds.quan(0.0, "code_length")
             return data["altitude"] + surface_height
         registry.add_field(("index", "r"),
                  function=_altitude_to_radius,
                  units = "code_length")
         registry.alias(("index", "dr"), ("index", "daltitude"))
 
-        def _longitude_to_theta(field, data):
-            # longitude runs from -180 to 180.
-            return (data["longitude"] + 180) * np.pi/180.0
+        def _latitude_to_theta(field, data):
+            # latitude runs from -90 to 90
+            return (data["latitude"] + 90) * np.pi/180.0
         registry.add_field(("index", "theta"),
-                 function = _longitude_to_theta,
+                 function = _latitude_to_theta,
                  units = "")
-        def _dlongitude_to_dtheta(field, data):
-            return data["dlongitude"] * np.pi/180.0
+        def _dlatitude_to_dtheta(field, data):
+            return data["dlatitude"] * np.pi/180.0
         registry.add_field(("index", "dtheta"),
-                 function = _dlongitude_to_dtheta,
+                 function = _dlatitude_to_dtheta,
                  units = "")
 
-        def _latitude_to_phi(field, data):
-            # latitude runs from -90 to 90
-            return (data["latitude"] + 90) * np.pi/180.0
+        def _longitude_to_phi(field, data):
+            # longitude runs from -180 to 180
+            return (data["longitude"] + 90) * np.pi/180.0
         registry.add_field(("index", "phi"),
-                 function = _latitude_to_phi,
+                 function = _longitude_to_phi,
                  units = "")
-        def _dlatitude_to_dphi(field, data):
-            return data["dlatitude"] * np.pi/180.0
+        def _dlongitude_to_dphi(field, data):
+            return data["dlongitude"] * np.pi/180.0
         registry.add_field(("index", "dphi"),
-                 function = _dlatitude_to_dphi,
+                 function = _dlongitude_to_dphi,
                  units = "")
 
     def pixelize(self, dimension, data_source, field, bounds, size,
@@ -154,7 +157,10 @@
                       dimension):
         surface_height = data_source.get_field_parameter("surface_height")
         if surface_height is None:
-            surface_height = getattr(data_source.ds, "surface_height", 0.0)
+            if hasattr(data_source.ds, "surface_height"):
+                surface_height = data_source.ds.surface_height
+            else:
+                surface_height = data_source.ds.quan(0.0, "code_length")
         r = data_source['py'] + surface_height
         # Because of the axis-ordering, dimensions 0 and 1 both have r as py
         # and the angular coordinate as px.

diff -r be53b3b9973e6ccc38934f87a21dfd7ad00c3b6b -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 yt/geometry/coordinates/tests/test_geographic_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_geographic_coordinates.py
@@ -0,0 +1,52 @@
+# Some tests for the geographic coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_geographic_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+
+    # Note that we are setting it up to have an altitude of 1000 maximum, which
+    # means our volume will be that of a shell 1000 wide, starting at r of
+    # whatever our surface_height is set to.
+    ds = fake_amr_ds(geometry="geographic")
+    ds.surface_height = ds.quan(5000, "code_length")
+    axes = ["latitude", "longitude", "altitude"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    inner_r = ds.surface_height
+    outer_r = ds.surface_height + ds.domain_width[2]
+    yield assert_equal, dd["index","dtheta"], \
+                        dd["index","dlatitude"]*np.pi/180.0
+    yield assert_equal, dd["index","dphi"], \
+                        dd["index","dlongitude"]*np.pi/180.0
+    # Note our terrible agreement here.
+    yield assert_rel_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), \
+                        3
+    yield assert_equal, dd["index", "path_element_altitude"], \
+                        dd["index", "daltitude"]
+    yield assert_equal, dd["index", "path_element_altitude"], \
+                        dd["index", "dr"]
+    # Note that latitude corresponds to theta, longitude to phi
+    yield assert_equal, dd["index", "path_element_latitude"], \
+                        dd["index", "r"] * \
+                        dd["index", "dlatitude"] * np.pi/180.0
+    yield assert_equal, dd["index", "path_element_longitude"], \
+                        dd["index", "r"] * \
+                        dd["index", "dlongitude"] * np.pi/180.0 * \
+                        np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)
+    # We also want to check that our radius is correct
+    yield assert_equal, dd["index","r"], \
+                        dd["index","altitude"] + ds.surface_height

diff -r be53b3b9973e6ccc38934f87a21dfd7ad00c3b6b -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -185,6 +185,7 @@
     'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
     'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
     'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+    'geographic' : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
 }
 
 def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):


https://bitbucket.org/yt_analysis/yt/commits/371e62cd0a58/
Changeset:   371e62cd0a58
Branch:      yt
User:        MatthewTurk
Date:        2014-12-29 22:42:36+00:00
Summary:     Merging from mainline
Affected #:  141 files

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -9,6 +9,7 @@
 yt/frontends/artio/_artio_caller.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
+yt/analysis_modules/ppv_cube/ppv_utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -16,12 +16,14 @@
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
+                Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
+                Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
@@ -38,6 +40,7 @@
                 Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
                 John Regan (john.regan at helsinki.fi)
+                Sherwood Richers (srichers at tapir.caltech.edu)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
@@ -48,10 +51,14 @@
                 Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
+                Aaron Smith (asmith at astro.as.utexas.edu)
                 Britton Smith (brittonsmith at gmail.com)
                 Geoffrey So (gsiisg at gmail.com)
                 Casey Stark (caseywstark at gmail.com)
+                Antoine Strugarek (strugarek at astro.umontreal.ca)
+                Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Benjamin Thompson (bthompson2090 at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e distribute_setup.py
--- a/distribute_setup.py
+++ b/distribute_setup.py
@@ -269,7 +269,7 @@
 
 def _remove_flat_installation(placeholder):
     if not os.path.isdir(placeholder):
-        log.warn('Unkown installation at %s', placeholder)
+        log.warn('Unknown installation at %s', placeholder)
         return False
     found = False
     for file in os.listdir(placeholder):

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -1,9 +1,16 @@
-import os, shutil, string, glob, re
+import errno
+import os
+import shutil
+import string
+import re
+import tempfile
+import uuid
 from sphinx.util.compat import Directive
 from docutils import nodes
 from docutils.parsers.rst import directives
+from IPython.config import Config
 from IPython.nbconvert import html, python
-from IPython.nbformat.current import read, write
+from IPython.nbformat import current as nbformat
 from runipy.notebook_runner import NotebookRunner, NotebookError
 
 class NotebookDirective(Directive):
@@ -14,7 +21,7 @@
     """
     required_arguments = 1
     optional_arguments = 1
-    option_spec = {'skip_exceptions' : directives.flag}
+    option_spec = {'skip_exceptions': directives.flag}
     final_argument_whitespace = True
 
     def run(self): # check if there are spaces in the notebook name
@@ -26,9 +33,11 @@
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
 
+        cwd = os.getcwd()
+        tmpdir = tempfile.mkdtemp()
+        os.chdir(tmpdir)
+
         # get path to notebook
-        source_dir = os.path.dirname(
-            os.path.abspath(self.state.document.current_source))
         nb_filename = self.arguments[0]
         nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
@@ -37,26 +46,24 @@
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        rel_path = os.path.join(rel_dir, nb_basename)
         dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
         dest_path = os.path.join(dest_dir, nb_basename)
 
-        if not os.path.exists(dest_dir):
-            os.makedirs(dest_dir)
+        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
 
-        # Copy unevaluated script
-        try:
-            shutil.copyfile(nb_abs_path, dest_path)
-        except IOError:
-            raise RuntimeError("Unable to copy notebook to build destination.")
+        # Ensure desination build directory exists
+        thread_safe_mkdir(os.path.dirname(dest_path))
 
+        # Copy unevaluated notebook
+        shutil.copyfile(nb_abs_path, dest_path)
+
+        # Construct paths to versions getting copied over
         dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
         dest_path_script = string.replace(dest_path, '.ipynb', '.py')
         rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
         rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
 
         # Create python script vesion
-        unevaluated_text = nb_to_html(nb_abs_path)
         script_text = nb_to_python(nb_abs_path)
         f = open(dest_path_script, 'w')
         f.write(script_text.encode('utf8'))
@@ -64,8 +71,11 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
-                                           skip_exceptions=skip_exceptions)
+        evaluated_text, resources = evaluate_notebook(
+            nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
+
+        evaluated_text = write_notebook_output(
+            resources, image_dir, image_rel_dir, evaluated_text)
 
         # Create link to notebook and script files
         link_rst = "(" + \
@@ -85,12 +95,9 @@
         # add dependency
         self.state.document.settings.record_dependencies.add(nb_abs_path)
 
-        # clean up png files left behind by notebooks.
-        png_files = glob.glob("*.png")
-        fits_files = glob.glob("*.fits")
-        h5_files = glob.glob("*.h5")
-        for file in png_files:
-            os.remove(file)
+        # clean up
+        os.chdir(cwd)
+        shutil.rmtree(tmpdir, True)
 
         return [nb_node]
 
@@ -106,14 +113,18 @@
 
 def nb_to_html(nb_path):
     """convert notebook to html"""
-    exporter = html.HTMLExporter(template_file='full')
-    output, resources = exporter.from_filename(nb_path)
+    c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
+
+    exporter = html.HTMLExporter(template_file='full', config=c)
+    notebook = nbformat.read(open(nb_path), 'json')
+    output, resources = exporter.from_notebook_node(notebook)
     header = output.split('<head>', 1)[1].split('</head>',1)[0]
     body = output.split('<body>', 1)[1].split('</body>',1)[0]
 
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
-    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n',
+                            '')
     header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
@@ -124,17 +135,19 @@
         'uneditable-input{',
         'collapse{',
     ]
+
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
-    line_begin_strings = [
+    line_begin = [
         'pre{',
         'p{margin'
-        ]
+    ]
 
-    header_lines = filter(
-        lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
-    header_lines = filter(
-        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+    filterfunc = lambda x: not any([s in x for s in filter_strings])
+    header_lines = filter(filterfunc, header.split('\n'))
+
+    filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
+    header_lines = filter(filterfunc, header_lines)
 
     header = '\n'.join(header_lines)
 
@@ -143,13 +156,11 @@
     lines.append(header)
     lines.append(body)
     lines.append('</div>')
-    return '\n'.join(lines)
+    return '\n'.join(lines), resources
 
 def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
     # Create evaluated version and save it to the dest path.
-    # Always use --pylab so figures appear inline
-    # perhaps this is questionable?
-    notebook = read(open(nb_path), 'json')
+    notebook = nbformat.read(open(nb_path), 'json')
     nb_runner = NotebookRunner(notebook, pylab=False)
     try:
         nb_runner.run_notebook(skip_exceptions=skip_exceptions)
@@ -158,11 +169,14 @@
         print e
         # Return the traceback, filtering out ANSI color codes.
         # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
-        return 'Notebook conversion failed with the following traceback: \n%s' % \
-            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '', str(e))
+        return "Notebook conversion failed with the " \
+               "following traceback: \n%s" % \
+            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
+                   str(e))
+
     if dest_path is None:
         dest_path = 'temp_evaluated.ipynb'
-    write(nb_runner.nb, open(dest_path, 'w'), 'json')
+    nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
     ret = nb_to_html(dest_path)
     if dest_path is 'temp_evaluated.ipynb':
         os.remove(dest_path)
@@ -186,3 +200,37 @@
                  html=(visit_notebook_node, depart_notebook_node))
 
     app.add_directive('notebook', NotebookDirective)
+
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+def make_image_dir(setup, rst_dir):
+    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
+    rel_dir = os.path.relpath(setup.confdir, rst_dir)
+    image_rel_dir = rel_dir + os.path.sep + '_images'
+    thread_safe_mkdir(image_dir)
+    return image_dir, image_rel_dir
+
+def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
+    my_uuid = uuid.uuid4().hex
+
+    for output in resources['outputs']:
+        new_name = image_dir + os.path.sep + my_uuid + output
+        new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
+        evaluated_text = evaluated_text.replace(output, new_relative_name)
+        with open(new_name, 'wb') as f:
+            f.write(resources['outputs'][output])
+    return evaluated_text
+
+def thread_safe_mkdir(dirname):
+    try:
+        os.makedirs(dirname)
+    except OSError as e:
+        if e.errno != errno.EEXIST:
+            raise
+        pass

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ b/doc/extensions/notebookcell_sphinxext.py
@@ -1,14 +1,14 @@
-import os, shutil, string, glob, io
+import os
+import shutil
+import io
+import tempfile
 from sphinx.util.compat import Directive
 from docutils.parsers.rst import directives
-from IPython.nbconvert import html, python
 from IPython.nbformat import current
-from runipy.notebook_runner import NotebookRunner
-from jinja2 import FileSystemLoader
 from notebook_sphinxext import \
-    notebook_node, nb_to_html, nb_to_python, \
-    visit_notebook_node, depart_notebook_node, \
-    evaluate_notebook
+    notebook_node, visit_notebook_node, depart_notebook_node, \
+    evaluate_notebook, make_image_dir, write_notebook_output
+
 
 class NotebookCellDirective(Directive):
     """Insert an evaluated notebook cell into a document
@@ -19,13 +19,22 @@
     required_arguments = 0
     optional_arguments = 1
     has_content = True
-    option_spec = {'skip_exceptions' : directives.flag}
+    option_spec = {'skip_exceptions': directives.flag}
 
     def run(self):
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
 
+        cwd = os.getcwd()
+        tmpdir = tempfile.mkdtemp()
+        os.chdir(tmpdir)
+
+        rst_file = self.state_machine.document.attributes['source']
+        rst_dir = os.path.abspath(os.path.dirname(rst_file))
+
+        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
+
         # Construct notebook from cell content
         content = "\n".join(self.content)
         with open("temp.py", "w") as f:
@@ -35,7 +44,11 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        evaluated_text = evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
+        evaluated_text, resources = evaluate_notebook(
+            'temp.ipynb', skip_exceptions=skip_exceptions)
+
+        evaluated_text = write_notebook_output(
+            resources, image_dir, image_rel_dir, evaluated_text)
 
         # create notebook node
         attributes = {'format': 'html', 'source': 'nb_path'}
@@ -44,9 +57,8 @@
             self.state_machine.get_source_and_line(self.lineno)
 
         # clean up
-        files = glob.glob("*.png") + ['temp.py', 'temp.ipynb']
-        for file in files:
-            os.remove(file)
+        os.chdir(cwd)
+        shutil.rmtree(tmpdir, True)
 
         return [nb_node]
 
@@ -60,6 +72,14 @@
 
     app.add_directive('notebook-cell', NotebookCellDirective)
 
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
 def convert_to_ipynb(py_file, ipynb_file):
     with io.open(py_file, 'r', encoding='utf-8') as f:
         notebook = current.reads(f.read(), format='py')

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/numpydocmod/numpydoc.py
--- a/doc/extensions/numpydocmod/numpydoc.py
+++ b/doc/extensions/numpydocmod/numpydoc.py
@@ -99,6 +99,15 @@
     app.add_domain(NumpyPythonDomain)
     app.add_domain(NumpyCDomain)
 
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+
 #------------------------------------------------------------------------------
 # Docstring-mangling domains
 #------------------------------------------------------------------------------

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/pythonscript_sphinxext.py
--- a/doc/extensions/pythonscript_sphinxext.py
+++ b/doc/extensions/pythonscript_sphinxext.py
@@ -1,8 +1,13 @@
+import tempfile
+import os
+import glob
+import shutil
+import subprocess
+import uuid
 from sphinx.util.compat import Directive
-from subprocess import Popen,PIPE
-from docutils.parsers.rst import directives
 from docutils import nodes
-import os, glob, base64
+from notebook_sphinxext import make_image_dir
+
 
 class PythonScriptDirective(Directive):
     """Execute an inline python script and display images.
@@ -17,6 +22,15 @@
     has_content = True
 
     def run(self):
+        cwd = os.getcwd()
+        tmpdir = tempfile.mkdtemp()
+        os.chdir(tmpdir)
+
+        rst_file = self.state_machine.document.attributes['source']
+        rst_dir = os.path.abspath(os.path.dirname(rst_file))
+
+        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
+
         # Construct script from cell content
         content = "\n".join(self.content)
         with open("temp.py", "w") as f:
@@ -27,35 +41,44 @@
         print content
         print ""
 
-        codeproc = Popen(['python', 'temp.py'], stdout=PIPE)
-        out = codeproc.stdout.read()
+        subprocess.call(['python', 'temp.py'])
 
-        images = sorted(glob.glob("*.png"))
-        fns = []
         text = ''
-        for im in images:
-            text += get_image_tag(im)
-            os.remove(im)
-            
-        os.remove("temp.py")
+        for im in sorted(glob.glob("*.png")):
+            text += get_image_tag(im, image_dir, image_rel_dir)
 
         code = content
 
-        literal = nodes.literal_block(code,code)
+        literal = nodes.literal_block(code, code)
         literal['language'] = 'python'
 
         attributes = {'format': 'html'}
         img_node = nodes.raw('', text, **attributes)
-        
+
+        # clean up
+        os.chdir(cwd)
+        shutil.rmtree(tmpdir, True)
+
         return [literal, img_node]
 
+
 def setup(app):
     app.add_directive('python-script', PythonScriptDirective)
     setup.app = app
     setup.config = app.config
     setup.confdir = app.confdir
 
-def get_image_tag(filename):
-    with open(filename, "rb") as image_file:
-        encoded_string = base64.b64encode(image_file.read())
-        return '<img src="data:image/png;base64,%s" width="600"><br>' % encoded_string
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+
+def get_image_tag(filename, image_dir, image_rel_dir):
+    my_uuid = uuid.uuid4().hex
+    shutil.move(filename, image_dir + os.path.sep + my_uuid + filename)
+    relative_filename = image_rel_dir + os.path.sep + my_uuid + filename
+    return '<img src="%s" width="600"><br>' % relative_filename

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/youtube.py
--- a/doc/extensions/youtube.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# http://countergram.com/youtube-in-rst
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-CODE = """\
-<object type="application/x-shockwave-flash"
-        width="%(width)s"
-        height="%(height)s"
-        class="youtube-embed"
-        data="http://www.youtube.com/v/%(yid)s">
-    <param name="movie" value="http://www.youtube.com/v/%(yid)s"></param>
-    <param name="wmode" value="transparent"></param>%(extra)s
-</object>
-"""
-
-PARAM = """\n    <param name="%s" value="%s"></param>"""
-
-def youtube(name, args, options, content, lineno,
-            contentOffset, blockText, state, stateMachine):
-    """ Restructured text extension for inserting youtube embedded videos """
-    if len(content) == 0:
-        return
-    string_vars = {
-        'yid': content[0],
-        'width': 425,
-        'height': 344,
-        'extra': ''
-        }
-    extra_args = content[1:] # Because content[0] is ID
-    extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
-    extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
-    extra_args = dict(extra_args)
-    if 'width' in extra_args:
-        string_vars['width'] = extra_args.pop('width')
-    if 'height' in extra_args:
-        string_vars['height'] = extra_args.pop('height')
-    if extra_args:
-        params = [PARAM % (key, extra_args[key]) for key in extra_args]
-        string_vars['extra'] = "".join(params)
-    return [nodes.raw('', CODE % (string_vars), format='html')]
-youtube.content = True
-directives.register_directive('youtube', youtube)

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/yt_colormaps.py
--- a/doc/extensions/yt_colormaps.py
+++ b/doc/extensions/yt_colormaps.py
@@ -15,6 +15,14 @@
     setup.config = app.config
     setup.confdir = app.confdir
 
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
 class ColormapScript(Directive):
     required_arguments = 1
     optional_arguments = 0

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/extensions/yt_cookbook.py
--- a/doc/extensions/yt_cookbook.py
+++ b/doc/extensions/yt_cookbook.py
@@ -15,6 +15,14 @@
     setup.config = app.config
     setup.confdir = app.confdir
 
+    retdict = dict(
+        version='0.1',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
 data_patterns = ["*.h5", "*.out", "*.dat"]
 
 class CookbookScript(Directive):

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -38,7 +38,7 @@
 np.random.seed(int(0x4d3d3d3))
 units = [base_ds._get_field_info(*f).units for f in fields]
 fields = [_strip_ftype(f) for f in fields]
-ds = fake_random_ds(16, fields = fields, units = units)
+ds = fake_random_ds(16, fields=fields, units=units, particles=True)
 ds.parameters["HydroMethod"] = "streaming"
 ds.parameters["EOSType"] = 1.0
 ds.parameters["EOSSoundSpeed"] = 1.0
@@ -126,7 +126,7 @@
     if in_cgs:
         unit_object = unit_object.get_cgs_equivalent()
     latex = unit_object.latex_representation()
-    return latex.replace('\/','~')
+    return latex.replace('\/', '~')
 
 def print_all_fields(fl):
     for fn in sorted(fl):
@@ -163,22 +163,25 @@
 ds.index
 print_all_fields(ds.field_info)
 
-def print_frontend_field(ftype, field, ptype):
-    name = field[0]
-    units = field[1][0]
-    aliases = ["``%s``" % f for f in field[1][1]]
-    if ftype is not "particle_type":
-        ftype = "'"+ftype+"'"
-    s = "(%s, '%s')" % (ftype, name)
-    print s
-    print "^" * len(s)
-    print
-    if len(units) > 0:
-        print "   * Units: :math:`\mathrm{%s}`" % fix_units(units)
-    if len(aliases) > 0:
-        print "   * Aliased to: %s" % " ".join(aliases)
-    print "   * Particle Type: %s" % (ptype)
-    print
+
+class FieldInfo:
+    """ a simple container to hold the information about fields """
+    def __init__(self, ftype, field, ptype):
+        name = field[0]
+        self.units = ""
+        u = field[1][0]
+        if len(u) > 0:
+            self.units = ":math:`\mathrm{%s}`" % fix_units(u)
+        a = ["``%s``" % f for f in field[1][1]]
+        self.aliases = " ".join(a)
+        self.dname = ""
+        if field[1][2] is not None:
+            self.dname = ":math:`{}`".format(field[1][2])
+
+        if ftype is not "particle_type":
+            ftype = "'"+ftype+"'"
+        self.name = "(%s, '%s')" % (ftype, name)
+        self.ptype = ptype
 
 current_frontends = [f for f in _frontends if f not in ["stream"]]
 
@@ -187,9 +190,11 @@
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
 
-    if frontend == "sph":
-        field_info_names = \
-          ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    if frontend == "gadget":
+        # Drop duplicate entry for GadgetHDF5, add special case for FieldInfo
+        # entry
+        dataset_names = ['GadgetDataset']
+        field_info_names = ['SPHFieldInfo']
     elif frontend == "boxlib":
         field_info_names = []
         for d in dataset_names:
@@ -199,6 +204,10 @@
                 field_info_names.append("CastroFieldInfo")
             else: 
                 field_info_names.append("BoxlibFieldInfo")
+    elif frontend == "chombo":
+        # remove low dimensional field info containters for ChomboPIC
+        field_info_names = [f for f in field_info_names if '1D' not in f
+                            and '2D' not in f]
 
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
@@ -220,12 +229,52 @@
             h = "%s-Specific Fields" % dset_name.replace("Dataset", "")
             print h
             print "-" * len(h) + "\n"
+
+            field_stuff = []
             for field in known_other_fields:
-                print_frontend_field(frontend, field, False)
+                field_stuff.append(FieldInfo(frontend, field, False))
             for field in known_particle_fields:
                 if frontend in ["sph", "halo_catalogs", "sdf"]:
-                    print_frontend_field("particle_type", field, True)
+                    field_stuff.append(FieldInfo("particle_type", field, True))
                 else:
-                    print_frontend_field("io", field, True)
+                    field_stuff.append(FieldInfo("io", field, True))
+
+            # output
+            len_name = 10
+            len_units = 5
+            len_aliases = 7
+            len_part = 9
+            len_disp = 12
+            for f in field_stuff:
+                len_name = max(len_name, len(f.name))
+                len_aliases = max(len_aliases, len(f.aliases))
+                len_units = max(len_units, len(f.units))
+                len_disp = max(len_disp, len(f.dname))
+
+            fstr = "{nm:{nw}}  {un:{uw}}  {al:{aw}}  {pt:{pw}}  {dp:{dw}}"
+            header = fstr.format(nm="field name", nw=len_name,
+                                 un="units", uw=len_units,
+                                 al="aliases", aw=len_aliases,
+                                 pt="particle?", pw=len_part,
+                                 dp="display name", dw=len_disp)
+
+            div = fstr.format(nm="="*len_name, nw=len_name,
+                              un="="*len_units, uw=len_units,
+                              al="="*len_aliases, aw=len_aliases,
+                              pt="="*len_part, pw=len_part,
+                              dp="="*len_disp, dw=len_disp)
+            print div
+            print header
+            print div
+
+            for f in field_stuff:
+                print fstr.format(nm=f.name, nw=len_name,
+                                  un=f.units, uw=len_units,
+                                  al=f.aliases, aw=len_aliases,
+                                  pt=f.ptype, pw=len_part,
+                                  dp=f.dname, dw=len_disp)
+                
+            print div
+            print ""
 
 print footer

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ /dev/null
@@ -1,157 +0,0 @@
-How To Develop yt
-=================
-
-We are very happy to accept patches, features, and bugfixes from any member of
-the community!  yt is developed using mercurial, primarily because it enables
-very easy and straightforward submission of changesets.  We're eager to hear
-from you, and if you are developing yt, we encourage you to subscribe to the
-developer mailing list:
-
-http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org
-
-Please feel free to hack around, commit changes, and send them upstream.  If
-you're new to Mercurial, these three resources are pretty great for learning
-the ins and outs:
-
-   * http://hginit.com
-   * http://hgbook.red-bean.com/read/
-   * http://mercurial.selenic.com/
-
-Keep in touch, and happy hacking!  We also provide doc/coding_styleguide.txt
-and an example of a fiducial docstring in doc/docstring_example.txt.  Please
-read them before hacking on the codebase, and feel free to email any of the
-mailing lists for help with the codebase.
-
-Licenses
---------
-
-All code in yt should be under the BSD 3-clause license.
-
-How To Get The Source Code
---------------------------
-
-yt is hosted on BitBucket, and you can see all of the yt repositories at
-http://hg.yt-project.org/ .  With the yt installation script you should have a
-copy of Mercurial.  You can clone the repository like so:
-
-   $ hg clone http://hg.yt-project.org/yt/
-
-You can update to any branch or revision by executing the command:
-
-   $ hg up -C some_revision_specifier
-
-Specifying a branch name in the revspec will update to the latest revision on
-that branch.  If you ran the installation script, you can tell Python to use a
-different version of the library by executing:
-
-   $ python2.6 setup.py develop
-
-This will rebuild all C modules as well.
-
-How To Submit Changes
----------------------
-
-You can submit changes a couple different ways, but the easiest is to use the
-"fork" mechanism on BitBucket.  Just go here:
-
-http://hg.yt-project.org/yt/fork
-
-and you're all set, ready to go.  You'll have to either clone a new copy of the
-repository or edit .hg/hgrc to point to the location of your new fork, first,
-though.
-
-When you're ready to submit them to the main repository, simply go to:
-
-http://hg.yt-project.org/yt/fork
-
-Make sure you notify "yt_analysis" and put in a little description.  That'll
-notify the core developers that you've got something ready to submit, and we
-will review it an (hopefully!) merge it in.  If it goes well, you may end up
-with push access to the main repository.
-
-How To Read The Source Code
----------------------------
-
-yt is organized into several sub-packages, each of which governs a different
-conceptual regime.
-
-   frontends
-      This is where interfaces to codes are created.  Within each subdirectory of
-      yt/frontends/ there must exist the following files, even if empty:
-
-      * data_structures.py, where subclasses of AMRGridPatch, Dataset and
-        GridIndex are defined.
-      * io.py, where a subclass of IOHandler is defined.
-      * misc.py, where any miscellaneous functions or classes are defined.
-      * definitions.py, where any definitions specific to the frontend are
-        defined.  (i.e., header formats, etc.)
-
-   visualization
-      This is where all visualization modules are stored.  This includes plot
-      collections, the volume rendering interface, and pixelization frontends.
-
-   data_objects
-      All objects that handle data, processed or unprocessed, not explicitly
-      defined as visualization are located in here.  This includes the base
-      classes for data regions, covering grids, time series, and so on.  This
-      also includes derived fields and derived quantities.
-
-   astro_objects
-      This is where all objects that represent astrophysical objects should
-      live -- for instance, galaxies, halos, disks, clusters and so on.  These
-      can be expressive, provide astrophysical analysis functionality and will
-      in general be more user-modifiable, user-tweakable, and much less of a
-      black box that data_objects.
-
-   analysis_modules
-      This is where all mechanisms for processing data live.  This includes
-      things like clump finding, halo profiling, halo finding, and so on.  This
-      is something of a catchall, but it serves as a level of greater
-      abstraction that simply data selection and modification.
-
-   gui
-      This is where all GUI components go.  Typically this will be some small
-      tool used for one or two things, which contains a launching mechanism on
-      the command line.
-
-   utilities
-      All broadly useful code that doesn't clearly fit in one of the other
-      categories goes here.
-
-How To Use Branching
---------------------
-
-If you are planning on making a large change to the code base that may not be
-ready for many commits, or if you are planning on breaking some functionality
-and rewriting it, you are encouraged to create a new named branch.  You can
-mark the current repository as a new named branch by executing:
-
-   $ hg branch new_feature_name
-
-The next commit and all subsequent commits will be contained within that named
-branch.  At this point, add your branch here:
-
-http://yt-project.org/wiki/ExistingBranches
-
-To merge changes in from another branch, you would execute:
-
-   $ hg merge some_other_branch
-
-Note also that you can use revision specifiers instead of "some_other_branch".
-When you are ready to merge back into the main branch, execute this process:
-
-   $ hg merge name_of_main_branch
-   $ hg commit --close-branch
-   $ hg up -C name_of_main_branch
-   $ hg merge name_of_feature_branch
-   $ hg commit
-
-When you execute the merge you may have to resolve conflicts.  Once you resolve
-conflicts in a file, you can mark it as resolved by doing:
-
-   $ hg resolve -m path/to/conflicting/file.py
-
-Please be careful when resolving conflicts in files.
-
-Once your branch has been merged in, mark it as closed on the wiki page.
-

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -264,8 +264,8 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.4 and 10.9: download Xcode 5.02 from the mac app store."
-	echo "(search for Xcode)."
+	echo "OS X 10.8.4, 10.9, and 10.10: download the appropriate version of"
+	echo "Xcode from the mac app store (search for Xcode)."
     echo
 	echo "Additionally, you will have to manually install the Xcode"
 	echo "command line tools."
@@ -273,7 +273,7 @@
     echo "For OS X 10.8, see:"
    	echo "http://stackoverflow.com/questions/9353444"
 	echo
-    echo "For OS X 10.9, the command line tools can be installed"
+    echo "For OS X 10.9 and 10.10, the command line tools can be installed"
     echo "with the following command:"
     echo "    xcode-select --install"
     echo

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -99,3 +99,13 @@
   height: 45px; 
   visibility: hidden; 
 }
+
+/*
+
+Make tables span only half the page. 
+
+*/
+
+.table {
+    width: 50%
+}

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/about/index.rst
--- /dev/null
+++ b/doc/source/about/index.rst
@@ -0,0 +1,88 @@
+.. _aboutyt:
+
+About yt
+========
+
+.. contents::
+   :depth: 1
+   :local:
+   :backlinks: none
+
+What is yt?
+-----------
+
+yt is a toolkit for analyzing and visualizing quantitative data.  Originally
+written to analyze 3D grid-based astrophysical simulation data, 
+it has grown to handle any kind of data represented in a 2D or 3D volume.
+yt is an Python-based open source project and is open for anyone to use or 
+contribute code.  The entire source code and history is available to all 
+at https://bitbucket.org/yt_analysis/yt .
+
+.. _who-is-yt:
+
+Who is yt?
+----------
+
+As an open-source project, yt has a large number of user-developers.  
+In September of 2014, the yt developer community collectively decided to endow 
+the title of *member* on individuals who had contributed in a significant way 
+to the project.  For a list of those members and a description of their 
+contributions to the code, see 
+`our members website. <http://yt-project.org/members.html>`_
+
+For an up-to-date list of everyone who has contributed to the yt codebase, 
+see the current `CREDITS <http://hg.yt-project.org/yt/src/yt/CREDITS>`_ file.  
+For a more detailed breakup of contributions made by individual users, see out 
+`Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
+
+History of yt
+-------------
+
+yt was originally begun by Matthew Turk in 2007 in the course of his graduate
+studies in computational astrophysics.  The code was developed
+as a simple data-reader and exporter for grid-based hydrodynamical simulation 
+data outputs from the *Enzo* code.  Over the next few years, he invited 
+collaborators and friends to contribute and use yt.  As the community grew,
+so did the capabilities of yt.  It is now a community-developed project with 
+contributions from many people, the hospitality of several institutions, and 
+benefiting from numerous grants.  With this community-driven approach 
+and contributions from a sizeable population of developers, it has evolved 
+into a fully-featured toolkit for analysis and visualization of 
+multidimensional data.  It relies on no proprietary software -- although it 
+can be and has been extended to interface with proprietary software and 
+libraries -- and has been designed from the ground up to enable users to be 
+as immersed in the data as they desire.
+
+How do I contact yt?
+--------------------
+
+If you have any questions about the code, please contact the `yt users email
+list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.  If
+you're having other problems, please follow the steps in 
+:ref:`asking-for-help`.
+
+How do I cite yt?
+-----------------
+
+If you use yt in a publication, we'd very much appreciate a citation!  You
+should feel free to cite the `ApJS paper
+<http://adsabs.harvard.edu/abs/2011ApJS..192....9T>`_ with the following BibTeX
+entry: ::
+
+   @ARTICLE{2011ApJS..192....9T,
+      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and 
+   	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
+       title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
+     journal = {\apjs},
+   archivePrefix = "arXiv",
+      eprint = {1011.3514},
+    primaryClass = "astro-ph.IM",
+    keywords = {cosmology: theory, methods: data analysis, methods: numerical },
+        year = 2011,
+       month = jan,
+      volume = 192,
+       pages = {9-+},
+         doi = {10.1088/0067-0049/192/1/9},
+      adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
+     adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+   }

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:b83e125278c2e58da4d99ac9d2ca2a136d01f1094e1b83497925e0f9b9b056c2"
+  "signature": "sha256:7ba601e381023e5b7c00a585ccaa55996316d2dfe7f8c96284b4539e1ee5b846"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -193,7 +193,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-150.,150.,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-150.,150.,\"km/s\"), method=\"sum\")"
      ],
      "language": "python",
      "metadata": {},
@@ -335,7 +335,7 @@
      "collapsed": false,
      "input": [
       "cube2 = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-150,150,\"km/s\"), thermal_broad=True, \n",
-      "                atomic_weight=12.0)\n",
+      "                atomic_weight=12.0, method=\"sum\")\n",
       "cube2.write_fits(\"cube2.fits\", clobber=True, length_unit=\"kpc\")"
      ],
      "language": "python",

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/_images/dsquared.png
Binary file doc/source/analyzing/analysis_modules/_images/dsquared.png has changed

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -68,15 +68,17 @@
 but also `separately available <http://code.google.com/p/rockstar>`_. The lead 
 developer is Peter Behroozi, and the methods are described in `Behroozi
 et al. 2011 <http://rockstar.googlecode.com/files/rockstar_ap101911.pdf>`_. 
+In order to run the Rockstar halo finder in yt, make sure you've 
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
-.. note:: At the moment, Rockstar does not support multiple particle masses, 
-  instead using a fixed particle mass. This will not affect most dark matter 
-  simulations, but does make it less useful for finding halos from the stellar
-  mass. In simulations where the highest-resolution particles all have the 
-  same mass (ie: zoom-in grid based simulations), one can set up a particle
-  filter to select the lowest mass particles and perform the halo finding
-  only on those.  See the this cookbook recipe for an example: 
-  :ref:`cookbook-rockstar-nested-grid`.
+At the moment, Rockstar does not support multiple particle masses, 
+instead using a fixed particle mass. This will not affect most dark matter 
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the 
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example: 
+:ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 
@@ -201,11 +203,29 @@
 halo finder at a few paddings to find the right amount, especially 
 if you're analyzing many similar datasets.
 
+.. _rockstar-installation:
+
 Rockstar Installation
 ---------------------
 
-The Rockstar is slightly patched and modified to run as a library inside of 
-yt. By default it will be built with yt using the ``install_script.sh``.
-If it wasn't installed, please make sure that the installation setting
-``INST_ROCKSTAR=1`` is defined in the ``install_script.sh`` and re-run
-the installation script.
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been 
+slightly patched and modified to run as a library inside of yt. By default it 
+is not installed with yt, but installation is very easy.  The 
+:ref:`install-script` used to install yt from source has a line: 
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.  
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of 
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.  You should now
+be able to use Rockstar and yt together.

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -1,12 +1,13 @@
 .. _analysis-modules:
 
-Analysis Modules
-================
+Topic-Specific Analysis Modules
+===============================
 
-These are "canned" analysis modules that can operate on datasets, performing a
-sequence of operations that result in a final result.  This functionality 
-interoperates with yt, but one needs to import the functions associated
-with each specific analysis module into python before using them.
+These semi-autonomous analysis modules are unique to specific subject matter
+like tracking halos, generating synthetic observations, exporting output to
+external visualization routines, and more.  Because they are somewhat 
+specialized, they exist in their own corners of yt, and they do not get loaded
+by default when you "import yt".  Read up on these advanced tools below.
 
 .. toctree::
    :maxdepth: 2

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -46,17 +46,23 @@
 .. code:: python
 
     import yt
+    #yt.enable_parallelism() # If you want to run in parallel this should go here!
     from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
+.. note::
+
+    For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
+    the import of the ``photon_simulator`` module, as shown above.
+
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
     ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
-                 parameters={"time_unit":(1.0,"Myr"),
-                            "length_unit":(1.0,"Mpc"),
-                            "mass_unit":(1.0e14,"Msun")}) 
+                 units_override={"time_unit":(1.0,"Myr"),
+                                 "length_unit":(1.0,"Mpc"),
+                                 "mass_unit":(1.0e14,"Msun")})
 
 First, to get a sense of what the resulting image will look like, let's
 make a new yt field called ``"density_squared"``, since the X-ray
@@ -67,7 +73,7 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("density_squared", function=_density_squared)
+    ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6")
 
 Then we'll project this field along the z-axis.
 
@@ -141,7 +147,8 @@
 
 .. code:: python
 
-    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
+                                       photons_per_chunk=100000000)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -150,6 +157,14 @@
 assume that is the name of the metallicity field (which may be spatially
 varying).
 
+The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source
+to generate the photons, to reduce memory usage and make parallelization more
+efficient. For each chunk, memory is set aside for the photon energies that will
+be generated. ``photons_per_chunk`` is an optional keyword argument which controls
+the size of this array. For large numbers of photons, you may find that
+this parameter needs to be set higher, or if you are looking to decrease memory
+usage, you might set this parameter lower.
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -294,11 +309,11 @@
             187.49897546,  187.47307048]) degree, 
      'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
             12.33327653,  12.32955225]) degree, 
-     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]) (dimensionless), 
+     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633]) (dimensionless)}
+     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633])}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -368,14 +383,23 @@
    files in subtle ways that we haven't been able to identify. Please email
    jzuhone at gmail.com if you find any bugs!
 
-Two ``EventList`` instances can be joined togther like this:
+Two ``EventList`` instances can be added together, which is useful if they were
+created using different data sources:
 
 .. code:: python
 
-    events3 = EventList.join_events(events1, events2)
+    events3 = events1+events2
 
-**WARNING**: This doesn't check for parameter consistency between the
-two lists!
+.. warning:: This only works if the two event lists were generated using
+    the same parameters!
+
+Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``,
+defined by a ds9 region (this functionality requires the
+`pyregion <http://pyregion.readthedocs.org>`_ package to be installed):
+
+.. code:: python
+
+    circle_events = events.filter_events("circle.reg")
 
 Creating a X-ray observation from an in-memory dataset
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -434,11 +458,11 @@
 .. code:: python
 
    data = {}
-   data["density"] = dens
-   data["temperature"] = temp
-   data["velocity_x"] = np.zeros(ddims)
-   data["velocity_y"] = np.zeros(ddims)
-   data["velocity_z"] = np.zeros(ddims)
+   data["density"] = (dens, "g/cm**3")
+   data["temperature"] = (temp, "K")
+   data["velocity_x"] = (np.zeros(ddims), "cm/s")
+   data["velocity_y"] = (np.zeros(ddims), "cm/s")
+   data["velocity_z"] = (np.zeros(ddims), "cm/s")
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
@@ -451,7 +475,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere("c", (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -6,17 +6,11 @@
 .. sectionauthor:: Andrew Myers <atmyers2 at gmail.com>
 .. versionadded:: 2.6
 
-.. note:: 
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
 `RADMC-3D
-<http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d/>`_ is a three-dimensional Monte-Carlo radiative transfer code
-that is capable of handling both line and continuum emission. The :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`
+<http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d/>`_ is a 
+three-dimensional Monte-Carlo radiative transfer code that is capable of 
+handling both line and continuum emission. The 
+:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`
 class saves AMR data to disk in an ACSII format that RADMC-3D can read. 
 In principle, this allows one to use RADMC-3D to make synthetic observations 
 from any simulation data format that yt recognizes.
@@ -31,74 +25,77 @@
 
 .. code-block:: python
 
-    from yt.mods import *
-    from yt.analysis_modules.radmc3d_export.api import *
+    import yt
+    from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
 
-Then, define a field that calculates the dust density in each cell. Here, we assume
-a constant dust-to-gas mass ratio of 0.01:
+Then, define a field that calculates the dust density in each cell. Here, we 
+assume a constant dust-to-gas mass ratio of 0.01:
 
 .. code-block:: python
 
     dust_to_gas = 0.01
     def _DustDensity(field, data):
-        return dust_to_gas*data["density"]
-    add_field("DustDensity", function=_DustDensity)
+        return dust_to_gas * data["density"]
+    yt.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3")
 
 Now load up a dataset and call the
 :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`:
 
 .. code-block:: python
 
-    ds = load("galaxy0030/galaxy0030")
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
     writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
-    writer.write_dust_file("DustDensity", "dust_density.inp")
+    writer.write_dust_file(("gas", "dust_density"), "dust_density.inp")
 
-The method write_amr_grid() creates an "amr_grid.inp" file that tells RADMC-3D how
-to interpret the rest of the data, while "dust_density.inp" contains the actual data field. 
+The method write_amr_grid() creates an "amr_grid.inp" file that tells RADMC-3D 
+how to interpret the rest of the data, while "dust_density.inp" contains the 
+actual data field. 
 
-We can also supply temperature information. The following code creates a "DustTemperature"
-field that is constant at 10 K, and saves it into a file called "dust_temperature.inp"
+We can also supply temperature information. The following code creates a 
+"dust_temperature" field that is constant at 10 K, and saves it into a file 
+called "dust_temperature.inp"
 
 .. code-block:: python
 
     def _DustTemperature(field, data):
-        return 10.0*data["Ones"]
-    add_field("DustTemperature", function=_DustTemperature)
+        return 0. * data["temperature"] + data.ds.quan(10, 'K')
+    yt.add_field(("gas", "dust_temperature"), function=_DustTemperature, units="K")
     
-    writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+    writer.write_dust_file(("gas", "dust_temperature"), "dust_temperature.inp")
 
-With the "amr_grid.inp", "dust_density.inp", and "dust_temperature.inp" files, RADMC-3D
-has everything it needs to compute the thermal dust emission (you may also have to include
-the location and spectra of any sources, which currently must be done manually). 
-The result is something that looks like this:
+With the "amr_grid.inp", "dust_density.inp", and "dust_temperature.inp" files, 
+RADMC-3D has everything it needs to compute the thermal dust emission (you may 
+also have to include the location and spectra of any sources, which currently 
+must be done manually).  The result is something that looks like this:
 
 .. image:: _images/31micron.png
 
 Line Emission
 -------------
 
-The file format required for line emission is slightly different. The following script will generate 
-two files, one called "numderdens_co.inp", which contains the number density of CO molecules
-for every cell in the index, and another called "gas-velocity.inp", which is useful if you want 
-to include doppler broadening.
+The file format required for line emission is slightly different. The 
+following script will generate two files, one called "numderdens_co.inp", 
+which contains the number density of CO molecules for every cell in the index, 
+and another called "gas-velocity.inp", which is useful if you want to include 
+doppler broadening.
 
 .. code-block:: python
 
-    from yt.mods import *
-    from yt.analysis_modules.radmc3d_export.api import *
+    import yt
+    from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
 
     x_co = 1.0e-4
     mu_h = 2.34e-24
     def _NumberDensityCO(field, data):
         return (x_co/mu_h)*data["density"]
-    add_field("NumberDensityCO", function=_NumberDensityCO)
+    yt.add_field(("gas", "number_density_CO"), function=_NumberDensityCO, units="cm**-3")
     
-    ds = load("galaxy0030/galaxy0030")
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
     writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
-    writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    writer.write_line_file(("gas", "number_density_CO"), "numberdens_co.inp")
     velocity_fields = ["velocity_x", "velocity_y", "velocity_z"]
     writer.write_line_file(velocity_fields, "gas_velocity.inp") 

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst
--- a/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst
+++ b/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst
@@ -1,3 +1,5 @@
+.. _sunyaev-zeldovich:
+
 Mock Observations of the Sunyaev-Zeldovich Effect
 -------------------------------------------------
 

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -1,3 +1,5 @@
+.. _synthetic-observations:
+
 Synthetic Observation
 =====================
 

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/external_analysis.rst
--- a/doc/source/analyzing/external_analysis.rst
+++ /dev/null
@@ -1,411 +0,0 @@
-Using yt with External Analysis Tools
-=====================================
-
-yt can be used as a ``glue`` code between simulation data and other methods of
-analyzing data.  Its facilities for understanding units, disk IO and data
-selection set it up ideally to use other mechanisms for analyzing, processing
-and visualizing data.
-
-Calling External Python Codes
------------------------------
-
-Calling external Python codes very straightforward.  For instance, if you had a
-Python code that accepted a set of structured meshes and then post-processed
-them to apply radiative feedback, one could imagine calling it directly:
-
-.. code-block:: python
-
-   import yt
-   import radtrans
-
-   ds = yt.load("DD0010/DD0010")
-   rt_grids = []
-
-   for grid in ds.index.grids:
-       rt_grid = radtrans.RegularBox(
-            grid.LeftEdge, grid.RightEdge,
-            grid["density"], grid["temperature"], grid["metallicity"])
-       rt_grids.append(rt_grid)
-       grid.clear_data()
-
-   radtrans.process(rt_grids)
-
-Or if you wanted to run a population synthesis module on a set of star
-particles (and you could fit them all into memory) it might look something like
-this:
-
-.. code-block:: python
-
-   import yt
-   import pop_synthesis
-
-   ds = yt.load("DD0010/DD0010")
-   ad = ds.all_data()
-   star_masses = ad["StarMassMsun"]
-   star_metals = ad["StarMetals"]
-
-   pop_synthesis.CalculateSED(star_masses, star_metals)
-
-If you have a code that's written in Python that you are having trouble getting
-data into from yt, please feel encouraged to email the users list and we'll
-help out.
-
-Calling Non-Python External Codes
----------------------------------
-
-Independent of its ability to process, analyze and visualize data, yt can also
-serve as a mechanism for reading and selecting simulation data.  In this way,
-it can be used to supply data to an external analysis routine written in
-Fortran, C or C++.  This document describes how to supply that data, using the
-example of a simple code that calculates the best axes that describe a
-distribution of particles as a starting point.  (The underlying method is left
-as an exercise for the reader; we're only currently interested in the function
-specification and structs.)
-
-If you have written a piece of code that performs some analysis function, and
-you would like to include it in the base distribution of yt, we would be happy
-to do so; drop us a line or see :ref:`contributing-code` for more information.
-
-To accomplish the process of linking Python with our external code, we will be
-using a language called `Cython <http://www.cython.org/>`_, which is
-essentially a superset of Python that compiles down to C.  It is aware of NumPy
-arrays, and it is able to massage data between the interpreted language Python
-and C, Fortran or C++.  It will be much easier to utilize routines and analysis
-code that have been separated into subroutines that accept data structures, so
-we will assume that our halo axis calculator accepts a set of structs.
-
-Our Example Code
-++++++++++++++++
-
-Here is the ``axes.h`` file in our imaginary code, which we will then wrap:
-
-.. code-block:: c
-
-   typedef struct structParticleCollection {
-        long npart;
-        double *xpos;
-        double *ypos;
-        double *zpos;
-   } ParticleCollection;
-   
-   void calculate_axes(ParticleCollection *part, 
-            double *ax1, double *ax2, double *ax3);
-
-There are several components to this analysis routine which we will have to
-wrap.
-
-#. We have to wrap the creation of an instance of ``ParticleCollection``.
-#. We have to transform a set of NumPy arrays into pointers to doubles.
-#. We have to create a set of doubles into which ``calculate_axes`` will be
-   placing the values of the axes it calculates.
-#. We have to turn the return values back into Python objects.
-
-Each of these steps can be handled in turn, and we'll be doing it using Cython
-as our interface code.
-
-Setting Up and Building Our Wrapper
-+++++++++++++++++++++++++++++++++++
-
-To get started, we'll need to create two files:
-
-.. code-block:: bash
-
-   axes_calculator.pyx
-   axes_calculator_setup.py
-
-These can go anywhere, but it might be useful to put them in their own
-directory.  The contents of ``axes_calculator.pyx`` will be left for the next
-section, but we will need to put some boilerplate code into
-``axes_calculator_setup.pyx``.  As a quick sidenote, you should call these
-whatever is most appropriate for the external code you are wrapping;
-``axes_calculator`` is probably not the best bet.
-
-Here's a rough outline of what should go in ``axes_calculator_setup.py``:
-
-.. code-block:: python
-
-   NAME = "axes_calculator"
-   EXT_SOURCES = []
-   EXT_LIBRARIES = ["axes_utils", "m"]
-   EXT_LIBRARY_DIRS = ["/home/rincewind/axes_calculator/"]
-   EXT_INCLUDE_DIRS = []
-   DEFINES = []
-
-   from distutils.core import setup
-   from distutils.extension import Extension
-   from Cython.Distutils import build_ext
-
-   ext_modules = [Extension(NAME,
-                    [NAME+".pyx"] + EXT_SOURCES,
-                    libraries = EXT_LIBRARIES,
-                    library_dirs = EXT_LIBRARY_DIRS,
-                    include_dirs = EXT_INCLUDE_DIRS,
-                    define_macros = DEFINES)
-   ]
-
-   setup(
-     name = NAME,
-     cmdclass = {'build_ext': build_ext},
-     ext_modules = ext_modules
-   )
-
-The only variables you should have to change in this are the first six, and
-possibly only the first one.  We'll go through these variables one at a time.  
-
-``NAME``
-   This is the name of our source file, minus the ``.pyx``.  We're also
-   mandating that it be the name of the module we import.  You're free to
-   modify this.
-``EXT_SOURCES``
-   Any additional sources can be listed here.  For instance, if you are only
-   linking against a single ``.c`` file, you could list it here -- if our axes
-   calculator were fully contained within a file called ``calculate_my_axes.c``
-   we could link against it using this variable, and then we would not have to
-   specify any libraries.  This is usually the simplest way to do things, and in
-   fact, yt makes use of this itself for things like HEALPix and interpolation
-   functions.
-``EXT_LIBRARIES``
-   Any libraries that will need to be linked against (like ``m``!) should be
-   listed here.  Note that these are the name of the library minus the leading
-   ``lib`` and without the trailing ``.so``.  So ``libm.so`` would become ``m``
-   and ``libluggage.so`` would become ``luggage``.
-``EXT_LIBRARY_DIRS``
-   If the libraries listed in ``EXT_LIBRARIES`` reside in some other directory
-   or directories, those directories should be listed here.  For instance,
-   ``["/usr/local/lib", "/home/rincewind/luggage/"]`` .
-``EXT_INCLUDE_DIRS``
-   If any header files have been included that live in external directories,
-   those directories should be included here.
-``DEFINES``
-   Any define macros that should be passed to the C compiler should be listed
-   here; if they just need to be defined, then they should be specified to be
-   defined as "None."  For instance, if you wanted to pass ``-DTWOFLOWER``, you
-   would set this to equal: ``[("TWOFLOWER", None)]``.
-
-To build our extension, we would run:
-
-.. code-block:: bash
-
-   $ python2.7 axes_calculator_setup.py build_ext -i
-
-Note that since we don't yet have an ``axes_calculator.pyx``, this will fail.
-But once we have it, it ought to run.
-
-Writing and Calling our Wrapper
-+++++++++++++++++++++++++++++++
-
-Now we begin the tricky part, of writing our wrapper code.  We've already
-figured out how to build it, which is halfway to being able to test that it
-works, and we now need to start writing Cython code.
-
-For a more detailed introduction to Cython, see the Cython documentation at
-http://docs.cython.org/ .  We'll cover a few of the basics for wrapping code
-however.
-
-To start out with, we need to open up and edit our file,
-``axes_calculator.pyx``.  Open this in your favorite version of vi (mine is
-vim) and we will get started by declaring the struct we need to pass in.  But
-first, we need to include some header information:
-
-.. code-block:: cython
-
-   import numpy as np
-   cimport numpy as np
-   cimport cython
-   from stdlib cimport malloc, free
-
-These lines simply import and "Cython import" some common routines.  For more
-information about what is already available, see the Cython documentation.  For
-now, we need to start translating our data.
-
-To do so, we tell Cython both where the struct should come from, and then we
-describe the struct itself.  One fun thing to note is that if you don't need to
-set or access all the values in a struct, and it just needs to be passed around
-opaquely, you don't have to include them in the definition.  For an example of
-this, see the ``png_writer.pyx`` file in the yt repository.  Here's the syntax
-for pulling in (from a file called ``axes_calculator.h``) a struct like the one
-described above:
-
-.. code-block:: cython
-
-   cdef extern from "axes_calculator.h":
-       ctypedef struct ParticleCollection:
-           long npart
-           double *xpos
-           double *ypos
-           double *zpos
-
-So far, pretty easy!  We've basically just translated the declaration from the
-``.h`` file.  Now that we have done so, any other Cython code can create and
-manipulate these ``ParticleCollection`` structs -- which we'll do shortly.
-Next up, we need to declare the function we're going to call, which looks
-nearly exactly like the one in the ``.h`` file.  (One common problem is that
-Cython doesn't know what ``const`` means, so just remove it wherever you see
-it.)  Declare it like so:
-
-.. code-block:: cython
-
-       void calculate_axes(ParticleCollection *part,
-                double *ax1, double *ax2, double *ax3)
-
-Note that this is indented one level, to indicate that it, too, comes from
-``axes_calculator.h``.  The next step is to create a function that accepts
-arrays and converts them to the format the struct likes.  We declare our
-function just like we would a normal Python function, using ``def``.  You can
-also use ``cdef`` if you only want to call a function from within Cython.  We
-want to call it from Python, too, so we just use ``def``.  Note that we don't
-here specify types for the various arguments.  In a moment we'll refine this to
-have better argument types.
-
-.. code-block:: cython
-
-   def examine_axes(xpos, ypos, zpos):
-       cdef double ax1[3], ax2[3], ax3[3]
-       cdef ParticleCollection particles
-       cdef int i
-
-       particles.npart = len(xpos)
-       particles.xpos = <double *> malloc(particles.npart * sizeof(double))
-       particles.ypos = <double *> malloc(particles.npart * sizeof(double))
-       particles.zpos = <double *> malloc(particles.npart * sizeof(double))
-
-       for i in range(particles.npart):
-           particles.xpos[i] = xpos[i]
-           particles.ypos[i] = ypos[i]
-           particles.zpos[i] = zpos[i]
-
-       calculate_axes(&particles, ax1, ax2, ax3)
-
-       free(particles.xpos)
-       free(particles.ypos)
-       free(particles.zpos)
-
-       return ( (ax1[0], ax1[1], ax1[2]),
-                (ax2[0], ax2[1], ax2[2]),
-                (ax3[0], ax3[1], ax3[2]) )
-
-This does the rest.  Note that we've weaved in C-type declarations (ax1, ax2,
-ax3) and Python access to the variables fed in.  This function will probably be
-quite slow -- because it doesn't know anything about the variables xpos, ypos,
-zpos, it won't be able to speed up access to them.  Now we will see what we can
-do by declaring them to be of array-type before we start handling them at all.
-We can do that by annotating in the function argument list.  But first, let's
-test that it works.  From the directory in which you placed these files, run:
-
-.. code-block:: bash
-
-   $ python2.6 setup.py build_ext -i
-
-Now, create a sample file that feeds in the particles:
-
-.. code-block:: python
-
-    import axes_calculator
-    axes_calculator.examine_axes(xpos, ypos, zpos)
-
-Most of the time in that function is spent in converting the data.  So now we
-can go back and we'll try again, rewriting our converter function to believe
-that its being fed arrays from NumPy:
-
-.. code-block:: cython
-
-   def examine_axes(np.ndarray[np.float64_t, ndim=1] xpos,
-                    np.ndarray[np.float64_t, ndim=1] ypos,
-                    np.ndarray[np.float64_t, ndim=1] zpos):
-       cdef double ax1[3], ax2[3], ax3[3]
-       cdef ParticleCollection particles
-       cdef int i
-
-       particles.npart = len(xpos)
-       particles.xpos = <double *> malloc(particles.npart * sizeof(double))
-       particles.ypos = <double *> malloc(particles.npart * sizeof(double))
-       particles.zpos = <double *> malloc(particles.npart * sizeof(double))
-
-       for i in range(particles.npart):
-           particles.xpos[i] = xpos[i]
-           particles.ypos[i] = ypos[i]
-           particles.zpos[i] = zpos[i]
-
-       calculate_axes(&particles, ax1, ax2, ax3)
-
-       free(particles.xpos)
-       free(particles.ypos)
-       free(particles.zpos)
-
-       return ( (ax1[0], ax1[1], ax1[2]),
-                (ax2[0], ax2[1], ax2[2]),
-                (ax3[0], ax3[1], ax3[2]) )
-
-This should be substantially faster, assuming you feed it arrays.
-
-Now, there's one last thing we can try.  If we know our function won't modify
-our arrays, and they are C-Contiguous, we can simply grab pointers to the data:
-
-.. code-block:: cython
-
-   def examine_axes(np.ndarray[np.float64_t, ndim=1] xpos,
-                    np.ndarray[np.float64_t, ndim=1] ypos,
-                    np.ndarray[np.float64_t, ndim=1] zpos):
-       cdef double ax1[3], ax2[3], ax3[3]
-       cdef ParticleCollection particles
-       cdef int i
-
-       particles.npart = len(xpos)
-       particles.xpos = <double *> xpos.data
-       particles.ypos = <double *> ypos.data
-       particles.zpos = <double *> zpos.data
-
-       for i in range(particles.npart):
-           particles.xpos[i] = xpos[i]
-           particles.ypos[i] = ypos[i]
-           particles.zpos[i] = zpos[i]
-
-       calculate_axes(&particles, ax1, ax2, ax3)
-
-       return ( (ax1[0], ax1[1], ax1[2]),
-                (ax2[0], ax2[1], ax2[2]),
-                (ax3[0], ax3[1], ax3[2]) )
-
-But note!  This will break or do weird things if you feed it arrays that are
-non-contiguous.
-
-At this point, you should have a mostly working piece of wrapper code.  And it
-was pretty easy!  Let us know if you run into any problems, or if you are
-interested in distributing your code with yt.
-
-A complete set of files is available with this documentation.  These are
-slightly different, so that the whole thing will simply compile, but they
-provide a useful example.
-
- * `axes.c <../_static/axes.c>`_
- * `axes.h <../_static/axes.h>`_
- * `axes_calculator.pyx <../_static/axes_calculator.pyx>`_
- * `axes_calculator_setup.py <../_static/axes_calculator_setup.txt>`_
-
-Exporting Data from yt
-----------------------
-
-yt is installed alongside h5py.  If you need to export your data from yt, to
-share it with people or to use it inside another code, h5py is a good way to do
-so.  You can write out complete datasets with just a few commands.  You have to
-import, and then save things out into a file.
-
-.. code-block:: python
-
-   import h5py
-   f = h5py.File("some_file.h5")
-   f.create_dataset("/data", data=some_data)
-
-This will create ``some_file.h5`` if necessary and add a new dataset
-(``/data``) to it.  Writing out in ASCII should be relatively straightforward.
-For instance:
-
-.. code-block:: python
-
-   f = open("my_file.txt", "w")
-   for halo in halos:
-       x, y, z = halo.center_of_mass()
-       f.write("%0.2f %0.2f %0.2f\n", x, y, z)
-   f.close()
-
-This example could be extended to work with any data object's fields, as well.

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -193,7 +193,15 @@
     center = [0.20, 0.50, 0.10]
 
     sp = ds.sphere(center, (10, 'Mpc'))
-    prj = yt.ProjectionPlot(ds, "x", "density", center=center, width=(50, "Mpc"), data_source=sp)
+    prj = yt.ProjectionPlot(ds, "x", "density", center=center, width=(50, "Mpc"),
+                            data_source=sp)
 
     # Mark the center with a big X
     prj.annotate_marker(center, 'x', plot_args={'s':100})
+
+    prj.show()
+
+    slc = yt.SlicePlot(ds, "x", "density", center=center, width=(50, "Mpc"),
+                       data_source=sp)
+
+    slc.show()

diff -r 560590bee632b9eff4f205e6c81f0ebd0a612e66 -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -1,7 +1,15 @@
 .. _analyzing:
 
-Analyzing Data
-==============
+General Data Analysis
+=====================
+
+This documentation describes much of the yt infrastructure for manipulating
+one's data to extract the relevant information.  Fields, data objects, and 
+units are at the heart of how yt represents data.  Beyond this, we provide
+a full description for how to filter your datasets based on specific criteria,
+how to analyze chronological datasets from the same underlying simulation or
+source (i.e. time series analysis), and how to run yt in parallel on 
+multiple processors to accomplish tasks faster.
 
 .. toctree::
    :maxdepth: 2
@@ -13,5 +21,3 @@
    generating_processed_data
    time_series_analysis
    parallel_computation
-   analysis_modules/index
-   external_analysis

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/8671b89fd291/
Changeset:   8671b89fd291
Branch:      yt
User:        MatthewTurk
Date:        2014-12-30 02:56:30+00:00
Summary:     Remove commented lines, add comments, simplify projected units.

We now carry through our units somewhat self-consistently start to finish.
This is done by tracking the units, rather than making assumptions about how
they are converted.  In the future, this will allow us to avoid conversions to
cm without worrying about the unitless operations occurring inside Cython, and
will also enable preferred unit systems.
Affected #:  1 file

diff -r 371e62cd0a5826b2557ae28d497d37d7a0164e2e -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -236,6 +236,7 @@
         else:
             raise NotImplementedError(self.method)
         self._set_center(center)
+        self._projected_units = {}
         if data_source is None: data_source = self.ds.all_data()
         for k, v in data_source.field_parameters.items():
             if k not in self.field_parameters or \
@@ -287,7 +288,6 @@
         # We do this once
         for chunk in self.data_source.chunks([], "io", local_only = False):
             self._initialize_chunk(chunk, tree)
-        # This needs to be parallel_objects-ified
         with self.data_source._field_parameter_state(self.field_parameters):
             for chunk in parallel_objects(self.data_source.chunks(
                                           chunk_fields, "io", local_only = True)): 
@@ -327,7 +327,6 @@
                 np.divide(nvals, nwvals[:,None], nvals)
         # We now convert to half-widths and center-points
         data = {}
-        #non_nan = ~np.any(np.isnan(nvals), axis=-1)
         code_length = self.ds.domain_width.units
         data['px'] = self.ds.arr(px, code_length)
         data['py'] = self.ds.arr(py, code_length)
@@ -341,30 +340,8 @@
         for fi, field in enumerate(fields):
             finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
-            units = finfo.units
-            # add length units to "projected units" if non-weighted 
-            # integral projection
-            if self.weight_field is None and not self._sum_only and \
-               self.method == 'integrate':
-                # See _handle_chunk where we mandate cm
-                if units == '':
-                    input_units = "cm"
-                else:
-                    input_units = "(%s) * cm" % units
-            else:
-                input_units = units
-            # Don't forget [non_nan] somewhere here.
-            self[field] = YTArray(field_data[fi].ravel(),
-                                  input_units=input_units,
-                                  registry=self.ds.unit_registry)
-            # convert units if non-weighted integral projection
-            if self.weight_field is None and not self._sum_only and \
-               self.method == 'integrate':
-                u_obj = Unit(units, registry=self.ds.unit_registry)
-                if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
-                    not u_obj.is_dimensionless) and input_units != units:
-                    final_unit = "(%s) * code_length" % units
-                    self[field].convert_to_units(final_unit)
+            input_units = self._projected_units[field].units
+            self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
         for i in data.keys(): self[i] = data.pop(i)
         mylog.info("Projection completed")
 
@@ -379,7 +356,7 @@
 
     def _handle_chunk(self, chunk, fields, tree):
         if self.method == "mip" or self._sum_only:
-            dl = 1.0
+            dl = self.ds.quan(1.0, "")
         else:
             # This gets explicitly converted to cm
             ax_name = self.ds.coordinates.axis_name[self.axis]
@@ -391,12 +368,22 @@
             if not dl.units.is_dimensionless:
                 dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
-        for i in range(len(fields)):
-            v[:,i] = chunk[fields[i]] * dl
+        for i, field in enumerate(fields):
+            d = chunk[field] * dl
+            v[:,i] = d
+            self._projected_units[field] = d.uq
         if self.weight_field is not None:
             w = chunk[self.weight_field]
             np.multiply(v, w[:,None], v)
             np.multiply(w, dl, w)
+            for field in fields:
+                # Note that this removes the dl integration, which is
+                # *correct*, but we are not fully self-consistently carrying it
+                # all through.  So if interrupted, the process will have
+                # incorrect units assigned in the projected units.  This should
+                # not be a problem, since the weight division occurs
+                # self-consistently with unitfree arrays.
+                self._projected_units[field] /= dl.uq
         else:
             w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords


https://bitbucket.org/yt_analysis/yt/commits/ed56d4b20a0c/
Changeset:   ed56d4b20a0c
Branch:      yt
User:        MatthewTurk
Date:        2015-01-03 19:30:57+00:00
Summary:     Merging with mainline
Affected #:  20 files

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -71,11 +71,16 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        evaluated_text, resources = evaluate_notebook(
+        ret = evaluate_notebook(
             nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
 
-        evaluated_text = write_notebook_output(
-            resources, image_dir, image_rel_dir, evaluated_text)
+        try:
+            evaluated_text, resources = ret
+            evaluated_text = write_notebook_output(
+                resources, image_dir, image_rel_dir, evaluated_text)
+        except ValueError:
+            # This happens when a notebook raises an unhandled exception
+            evaluated_text = ret
 
         # Create link to notebook and script files
         link_rst = "(" + \

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/analyzing/analysis_modules/_images/dust_continuum.png
Binary file doc/source/analyzing/analysis_modules/_images/dust_continuum.png has changed

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -6,29 +6,54 @@
 .. sectionauthor:: Andrew Myers <atmyers2 at gmail.com>
 .. versionadded:: 2.6
 
+.. figure:: _images/31micron.png
+
+    Above: a sample image showing the continuum dust emission image around a massive protostar
+    made using RADMC-3D and plotted with pyplot.
+
 `RADMC-3D
 <http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d/>`_ is a 
 three-dimensional Monte-Carlo radiative transfer code that is capable of 
-handling both line and continuum emission. The 
+handling both line and continuum emission. yt comes equipped with a  
 :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`
-class saves AMR data to disk in an ACSII format that RADMC-3D can read. 
+class that exports AMR data to a format that RADMC-3D can read. Currently, only
+the ASCII-style data format is supported.  
 In principle, this allows one to use RADMC-3D to make synthetic observations 
 from any simulation data format that yt recognizes.
 
 Continuum Emission
 ------------------
 
-To compute thermal emission intensities, RADMC-3D needs a file called
-"dust_density.inp" that specifies the density of dust for every cell in the AMR
-index. To generate this file, first import the RADMC-3D exporter, which 
-is not loaded into your environment by default:
+To compute thermal emission intensities, RADMC-3D needs several inputs files that
+describe the spatial distribution of the dust and photon sources. To create these
+files, first import the RADMC-3D exporter, which is not loaded into your environment 
+by default:
 
 .. code-block:: python
 
     import yt
-    from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
+    import numpy as np
+    from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter, RadMC3DSource
 
-Then, define a field that calculates the dust density in each cell. Here, we 
+Next, load up a dataset and instantiate the :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`.
+For this example, we'll use the "StarParticle" dataset,
+available `here
+<http://yt-project.org/data/>`_. 
+
+.. code-block:: python
+
+    ds = yt.load("StarParticles/plrd01000/")
+    writer = RadMC3DWriter(ds)
+
+The first data file to create is the "amr_grid.inp" file, which describes the structure 
+of the AMR index. To create this file, simply call:
+
+.. code-block:: python
+
+    writer.write_amr_grid()
+
+Next, we must give RADMC-3D information about the dust density. To do this, we
+define a field that calculates the dust density in each cell. We 
 assume a constant dust-to-gas mass ratio of 0.01:
 
 .. code-block:: python
@@ -36,41 +61,118 @@
     dust_to_gas = 0.01
     def _DustDensity(field, data):
         return dust_to_gas * data["density"]
-    yt.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3")
+    ds.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3")
 
-Now load up a dataset and call the
-:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`:
+We save this information into a file called "dust_density.inp".
 
 .. code-block:: python
 
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    writer = RadMC3DWriter(ds)
-    
-    writer.write_amr_grid()
     writer.write_dust_file(("gas", "dust_density"), "dust_density.inp")
 
-The method write_amr_grid() creates an "amr_grid.inp" file that tells RADMC-3D 
-how to interpret the rest of the data, while "dust_density.inp" contains the 
-actual data field. 
-
-We can also supply temperature information. The following code creates a 
-"dust_temperature" field that is constant at 10 K, and saves it into a file 
-called "dust_temperature.inp"
+Finally, we must give RADMC-3D information about any stellar sources that are
+present. To do this, we have provided the 
+:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DSource`
+class. For this example, we place a single source with temperature 5780 K
+at the center of the domain:
 
 .. code-block:: python
 
-    def _DustTemperature(field, data):
-        return 0. * data["temperature"] + data.ds.quan(10, 'K')
-    yt.add_field(("gas", "dust_temperature"), function=_DustTemperature, units="K")
-    
-    writer.write_dust_file(("gas", "dust_temperature"), "dust_temperature.inp")
+    radius_cm = 6.96e10
+    mass_g = 1.989e33
+    position_cm = [0.0, 0.0, 0.0]
+    temperature_K = 5780.0
+    star = RadMC3DSource(radius_cm, mass_g, position_cm, temperature_K)
 
-With the "amr_grid.inp", "dust_density.inp", and "dust_temperature.inp" files, 
-RADMC-3D has everything it needs to compute the thermal dust emission (you may 
-also have to include the location and spectra of any sources, which currently 
-must be done manually).  The result is something that looks like this:
+    sources_list = [star]
+    wavelengths_micron = np.logspace(-1.0, 4.0, 1000)
 
-.. image:: _images/31micron.png
+    writer.write_source_files(sources_list, wavelengths_micron)
+
+The last line creates the files "stars.inp" and "wavelength_micron.inp",
+which describe the locations and spectra of the stellar sources as well
+as the wavelengths RADMC-3D will use in it's calculations.  
+
+If everything goes correctly, after executing the above code, you should have
+the files "amr_grid.inp", "dust_density.inp", "stars.inp", and "wavelength_micron.inp"
+sitting in your working directory. RADMC-3D needs a few more configuration files to 
+compute the thermal dust emission. In particular, you need an opacity file, like the 
+"dustkappa_silicate.inp" file included in RADMC-3D, a main "radmc3d.inp" file that sets
+some runtime parameters, and a "dustopac.inp" that describes the assumed composition of the dust.
+yt cannot make these files for you; in the example that follows, we used a 
+"radmc3d.inp" file that looked like:
+
+::
+
+    nphot = 1000000
+    nphot_scat = 1000000
+
+which basically tells RADMC-3D to use 1,000,000 photon packets instead of the default 100,000. The 
+"dustopac.inp" file looked like:
+
+::
+
+    2
+    1
+    -----------------------------
+    1
+    0
+    silicate
+    -----------------------------   
+
+To get RADMC-3D to compute the dust temperature, run the command:
+
+::
+
+   ./radmc3D mctherm 
+
+in the directory that contains your "amr_grid.inp", "dust_density.inp", "stars.inp", "wavelength_micron.inp",
+"radmc3d.inp", "dustkappa_silicate.inp", and "dustopac.inp" files. If everything goes correctly, you should
+get a "dust_temperature.dat" file in your working directory. Once that file is generated, you can use
+RADMC-3D to generate SEDs, images, and so forth. For example, to create an image at 31 microns, do the command:
+
+::
+
+   ./radmc3d image lambda 31 sizeau 30000 npix 800
+
+which should create a file called "image.out". You can view this image using pyplot or whatever other
+plotting package you want. To facilitate this, we provide helper functions
+that parse the image.out file, returning a header dictionary with some useful metadata
+and an np.array containing the image values. To plot this image in pyplot, you could do something like:
+
+.. code-block:: python
+
+   import matplotlib.pyplot as plt
+   import numpy as np
+   from yt.analysis_modules.radmc3d_export.api import read_radmc3d_image
+   header, image = read_radmc3d_image("image.out")
+
+   Nx = header['Nx']
+   Ny = header['Ny']
+
+   x_hi = 0.5*header["pixel_size_cm_x"]*Nx
+   x_lo = -x_hi
+   y_hi = 0.5*header["pixel_size_cm_y"]*Ny
+   y_lo = -y_hi
+   
+   X = np.linspace(x_lo, x_hi, Nx) 
+   Y = np.linspace(y_lo, y_hi, Ny) 
+
+   plt.pcolormesh(X, Y, np.log10(image), cmap='hot')
+   cbar = plt.colorbar()
+   plt.axis((x_lo, x_hi, y_lo, y_hi))
+   ax = plt.gca()
+   ax.set_xlabel(r"$x$ (cm)")
+   ax.set_ylabel(r"$y$ (cm)")
+   cbar.set_label(r"Log Intensity (erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$ ster$^{-1}$)")
+   plt.savefig('dust_continuum.png')
+
+The resulting image should look like:
+
+.. image:: _images/dust_continuum.png
+
+This barely scratches the surface of what you can do with RADMC-3D. Our goal here is
+just to describe how to use yt to export the data it knows about (densities, stellar
+sources, etc.) into a format that RADMC-3D can recognize.
 
 Line Emission
 -------------
@@ -87,7 +189,7 @@
     from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
 
     x_co = 1.0e-4
-    mu_h = 2.34e-24
+    mu_h = yt.YTQuantity(2.34e-24, 'g')
     def _NumberDensityCO(field, data):
         return (x_co/mu_h)*data["density"]
     yt.add_field(("gas", "number_density_CO"), function=_NumberDensityCO, units="cm**-3")

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -44,6 +44,7 @@
    embedded_javascript_animation
    embedded_webm_animation
    gadget_notebook
+   owls_notebook
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/cookbook/owls_notebook.rst
--- /dev/null
+++ b/doc/source/cookbook/owls_notebook.rst
@@ -0,0 +1,7 @@
+.. _owls-notebook:
+
+Using yt to view and analyze Gadget-OWLS outputs
+++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. notebook:: yt_gadget_owls_analysis.ipynb
+

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- /dev/null
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -0,0 +1,269 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 1,
+     "metadata": {},
+     "source": [
+      "OWLS Examples"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Setup"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
+      "\n",
+      "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+      "\n",
+      "> [yt]\n",
+      "\n",
+      "> test_data_dir=/home/galtay/yt-data\n",
+      "\n",
+      "Now you should create the directory referenced above (in this case ``/home/galtay/yt-data``).  The first time you request ion fields from yt it will download a supplementary data file to this directory. \n",
+      "\n",
+      "Next we will get an example OWLS snapshot.  There is a snapshot available on the yt data site, http://yt-project.org/data/snapshot_033.tar.gz (269 MB).  Save this tar.gz file in the same directory as this notebook, then unzip and untar it.  "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we will tell the notebook that we want figures produced inline. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Loading"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we will load the snapshot.  See the docs (http://yt-project.org/docs/dev/examining/loading_data.html#indexing-criteria) for a description of ``n_ref`` and ``over_refine_factor``."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fname = 'snapshot_033/snap_033.0.hdf5'\n",
+      "ds = yt.load(fname, n_ref=64, over_refine_factor=1)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Set a ``YTRegion`` that contains all the data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Inspecting "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The dataset can tell us what fields it knows about, "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Note that the ion fields follow the naming convention described in YTEP-0003 http://ytep.readthedocs.org/en/latest/YTEPs/YTEP-0003.html#molecular-and-atomic-species-names"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Accessing Particle Data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The raw particle data can be accessed using the particle types.  This corresponds directly with what is in the hdf5 snapshots. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad['PartType0', 'Coordinates']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad['PartType4', 'IronFromSNIa']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad['PartType1', 'ParticleIDs']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad['PartType0', 'Hydrogen']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Projection Plots"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The projection plots make use of derived fields that store the smoothed particle data (particles smoothed onto an oct-tree).  Below we make a projection of all hydrogen gas followed by only the neutral hydrogen gas. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pz = yt.ProjectionPlot(ds, 'z', ('gas', 'H_density'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pz.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pz = yt.ProjectionPlot(ds, 'z', ('gas', 'H_p0_density'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pz.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -602,7 +602,8 @@
 can apply smoothing kernels to the data to produce both quantitative analysis
 and visualization. See :ref:`loading-sph-data` for more details and
 :ref:`gadget-notebook` for a detailed example of loading, analyzing, and
-visualizing a Gadget dataset.
+visualizing a Gadget dataset.  An example which makes use of a Gadget snapshot
+from the OWLS project can be found at :ref:`owls-notebook`.  
 
 Gadget data in HDF5 format can be loaded with the ``load`` command:
 

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/analysis_modules/radmc3d_export/RadMC3DImageUtilities.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DImageUtilities.py
@@ -0,0 +1,93 @@
+'''
+
+Functions for dealing with the image.out files created by RADMC-3D
+
+'''
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+
+def parse_radmc3d_image_header(lines):
+    '''
+
+    Parses the header lines from the image.out file.
+    Returns a dictionary containing the image parameters.
+
+    '''
+
+    header_data = {}
+
+    # an integer flag describing the image format.
+    # this function only works for images made in
+    # "observer at infinity" mode, i.e. iformat is 1
+    iformat = np.int64(lines[0].strip())
+    assert(iformat == 1)
+    header_data["iformat"] = iformat
+
+    # The number of pixels in the x and y-directions
+    Nx, Ny = [np.int64(Npix) for Npix in lines[1].strip().split()]
+    header_data["Nx"] = Nx
+    header_data["Ny"] = Ny
+
+    # You can produce images at multiple wavelenths in a single
+    # pass. This function assumes that there is only 1.
+    num_wavelengths = np.int64(lines[2].strip())
+    assert(num_wavelengths == 1)
+    header_data["num_wavelengths"] = num_wavelengths
+
+    # The size of pixel in each direction. Note that
+    # this only makes sense if iformat is 1
+    pixel_size_cm_x, pixel_size_cm_y = \
+        [np.float64(Npix) for Npix in lines[3].strip().split()]
+    header_data["pixel_size_cm_x"] = pixel_size_cm_x
+    header_data["pixel_size_cm_y"] = pixel_size_cm_y
+
+    # The wavelength at which the image was produced.
+    # We assume there is only 1 image here.
+    wavelength_microns = np.float64(lines[4].strip())  # assume 1 wavelength
+    header_data["wavelength_microns"] = wavelength_microns
+
+    return header_data
+
+
+def read_radmc3d_image(filename):
+    '''
+
+    Loads the image.out file created by radmc-3d.
+    Returns an np.array that contains the image data
+    as well as a dictionary with some useful metadata.
+
+    '''
+
+    fileh = open(filename, 'r')
+    lines = fileh.readlines()
+    fileh.close()
+
+    # The header should always be 5 lines long,
+    # as per the radmc-3d manual
+    header_lines = lines[0:6]
+    header = parse_radmc3d_image_header(header_lines)
+
+    Nx = header["Nx"]
+    Ny = header["Ny"]
+
+    # The rest of the lines are the image data, with the
+    # possible exception of a newline at the end
+    image_lines = lines[6:]
+    image = np.array([np.float64(line.strip()) for line in image_lines
+                      if not line.isspace()])
+
+    # This had better be true
+    assert(image.size == Nx*Ny)
+
+    image = image.reshape(header["Nx"], header["Ny"])
+
+    return header, image

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -13,18 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
 import numpy as np
 from yt.utilities.lib.write_array import \
     write_3D_array, write_3D_vector_array
 
+
 class RadMC3DLayer:
     '''
 
     This class represents an AMR "layer" of the style described in
     the radmc3d manual. Unlike yt grids, layers may not have more
     than one parent, so level L grids will need to be split up
-    if they straddle two or more level L - 1 grids. 
+    if they straddle two or more level L - 1 grids.
 
     '''
     def __init__(self, level, parent, unique_id, LE, RE, dim):
@@ -51,7 +51,7 @@
         '''
 
         Returns whether or not this layer overlaps a given grid
-        
+
         '''
         LE, RE = self.get_overlap_with(grid)
         if np.any(RE <= LE):
@@ -59,6 +59,7 @@
         else:
             return True
 
+
 class RadMC3DWriter:
     '''
 
@@ -78,14 +79,13 @@
         An int corresponding to the maximum number of levels of refinement
         to include in the output. Often, this does not need to be very large
         as information on very high levels is frequently unobservable.
-        Default = 2. 
+        Default = 2.
 
     Examples
     --------
 
     This will create a field called "DustDensity" and write it out to the
-    file "dust_density.inp" in a form readable by radmc3d. It will also write
-    a "dust_temperature.inp" file with everything set to 10.0 K: 
+    file "dust_density.inp" in a form readable by RadMC3D.
 
     >>> import yt
     >>> from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
@@ -95,21 +95,16 @@
     ...     return dust_to_gas*data["Density"]
     >>> yt.add_field("DustDensity", function=_DustDensity)
 
-    >>> def _DustTemperature(field, data):
-    ...     return 10.0*data["Ones"]
-    >>> yt.add_field("DustTemperature", function=_DustTemperature)
-    
     >>> ds = yt.load("galaxy0030/galaxy0030")
+
     >>> writer = RadMC3DWriter(ds)
-    
     >>> writer.write_amr_grid()
     >>> writer.write_dust_file("DustDensity", "dust_density.inp")
-    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
 
     ---
 
-    This example will create a field called "NumberDensityCO" and write it out to
-    the file "numberdens_co.inp". It will also write out information about
+    This example will create a field called "NumberDensityCO" and write it out
+    to the file "numberdens_co.inp". It will also write out information about
     the gas velocity to "gas_velocity.inp" so that this broadening may be
     included in the radiative transfer calculation by radmc3d:
 
@@ -117,34 +112,34 @@
     >>> from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
 
     >>> x_co = 1.0e-4
-    >>> mu_h = 2.34e-24
+    >>> mu_h = yt.Quantity(2.34e-24, 'g')
     >>> def _NumberDensityCO(field, data):
     ...     return (x_co/mu_h)*data["Density"]
     >>> yt.add_field("NumberDensityCO", function=_NumberDensityCO)
-    
+
     >>> ds = yt.load("galaxy0030/galaxy0030")
     >>> writer = RadMC3DWriter(ds)
-    
+
     >>> writer.write_amr_grid()
     >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
     >>> velocity_fields = ["velocity_x", "velocity_y", "velocity_z"]
-    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp")
 
     '''
 
     def __init__(self, ds, max_level=2):
         self.max_level = max_level
-        self.cell_count = 0 
+        self.cell_count = 0
         self.layers = []
         self.domain_dimensions = ds.domain_dimensions
-        self.domain_left_edge  = ds.domain_left_edge
+        self.domain_left_edge = ds.domain_left_edge
         self.domain_right_edge = ds.domain_right_edge
         self.grid_filename = "amr_grid.inp"
         self.ds = ds
 
-        base_layer = RadMC3DLayer(0, None, 0, \
-                                  self.domain_left_edge, \
-                                  self.domain_right_edge, \
+        base_layer = RadMC3DLayer(0, None, 0,
+                                  self.domain_left_edge,
+                                  self.domain_right_edge,
                                   self.domain_dimensions)
 
         self.layers.append(base_layer)
@@ -156,7 +151,7 @@
                 self._add_grid_to_layers(grid)
 
     def _get_parents(self, grid):
-        parents = []  
+        parents = []
         for potential_parent in self.layers:
             if potential_parent.level == grid.Level - 1:
                 if potential_parent.overlaps(grid):
@@ -169,12 +164,12 @@
             LE, RE = parent.get_overlap_with(grid)
             N = (RE - LE) / grid.dds
             N = np.array([int(n + 0.5) for n in N])
-            new_layer = RadMC3DLayer(grid.Level, parent.id, \
-                                     len(self.layers), \
+            new_layer = RadMC3DLayer(grid.Level, parent.id,
+                                     len(self.layers),
                                      LE, RE, N)
             self.layers.append(new_layer)
             self.cell_count += np.product(N)
-            
+
     def write_amr_grid(self):
         '''
         This routine writes the "amr_grid.inp" file that describes the mesh
@@ -182,12 +177,12 @@
 
         '''
         dims = self.domain_dimensions
-        LE   = self.domain_left_edge
-        RE   = self.domain_right_edge
+        LE = self.domain_left_edge
+        RE = self.domain_right_edge
 
-        # Radmc3D wants the cell wall positions in cgs. Convert here:
-        LE_cgs = LE.in_units('cm')
-        RE_cgs = RE.in_units('cm')
+        # RadMC-3D wants the cell wall positions in cgs. Convert here:
+        LE_cgs = LE.in_units('cm').d  # don't write the units, though
+        RE_cgs = RE.in_units('cm').d
 
         # calculate cell wall positions
         xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)]
@@ -196,14 +191,14 @@
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')
-        grid_file.write('1 \n') # iformat is always 1
+        grid_file.write('1 \n')  # iformat is always 1
         if self.max_level == 0:
             grid_file.write('0 \n')
         else:
-            grid_file.write('10 \n') # only layer-style AMR files are supported
-        grid_file.write('1 \n') # only cartesian coordinates are supported
-        grid_file.write('0 \n') 
-        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+            grid_file.write('10 \n')  # only layer-style files are supported
+        grid_file.write('1 \n')  # only cartesian coordinates are supported
+        grid_file.write('0 \n')
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1))  # assume 3D
         grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
         if self.max_level != 0:
             s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
@@ -234,9 +229,9 @@
                     if potential_parent.id == p:
                         parent_LE = potential_parent.LeftEdge
                 ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
-            ix  = int(ind[0]+0.5)
-            iy  = int(ind[1]+0.5)
-            iz  = int(ind[2]+0.5)
+            ix = int(ind[0]+0.5)
+            iy = int(ind[1]+0.5)
+            iz = int(ind[2]+0.5)
             nx, ny, nz = layer.ActiveDimensions / 2
             s = '{}    {}    {}    {}    {}    {}    {} \n'
             s = s.format(p, ix, iy, iz, nx, ny, nz)
@@ -284,13 +279,13 @@
             lev = layer.level
             if lev == 0:
                 LE = self.domain_left_edge
-                N  = self.domain_dimensions
+                N = self.domain_dimensions
             else:
                 LE = layer.LeftEdge
-                N  = layer.ActiveDimensions
+                N = layer.ActiveDimensions
 
             self._write_layer_data_to_file(fhandle, field, lev, LE, N)
-            
+
         fhandle.close()
 
     def write_line_file(self, field, filename):
@@ -321,11 +316,108 @@
             lev = layer.level
             if lev == 0:
                 LE = self.domain_left_edge
-                N  = self.domain_dimensions
+                N = self.domain_dimensions
             else:
                 LE = layer.LeftEdge
-                N  = layer.ActiveDimensions
+                N = layer.ActiveDimensions
 
             self._write_layer_data_to_file(fhandle, field, lev, LE, N)
 
         fhandle.close()
+
+    def write_source_files(self, sources, wavelengths):
+        '''
+
+        This function creates the stars.inp and wavelength_micron.inp
+        files that RadMC3D uses for its dust continuum calculations.
+
+        Parameters
+        ----------
+
+        sources: a list of RadMC3DSource objects
+            A list that contains all the sources you would like yt
+            to create
+        wavelengths: np.array of float values
+            An array listing the wavelength points you would like to
+            use the radiative transfer calculation
+
+        '''
+
+        nstars = len(sources)
+        nlam = len(wavelengths)
+
+        filename = 'stars.inp'
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('2 \n')  # a format flag that should always be 2
+        fhandle.write('%d    %d \n' % (nstars, nlam))
+
+        # write source information
+        for source in sources:
+            fhandle.write(str(source.radius) + ' ')
+            fhandle.write(str(source.mass) + ' ')
+            fhandle.write('%f %f %f' %(source.position[0], \
+                                       source.position[1], \
+                                       source.position[2]))
+            fhandle.write('\n')
+
+        # write wavelength informaton
+        for wavelength in wavelengths:
+            fhandle.write('%f \n' % wavelength)
+
+        # finally write blackbody temperature for each source
+        for source in sources:
+            # the negative sign is a flag used internally
+            # by RadMC3D to indicate that this is a blackbody
+            # source
+            fhandle.write('%f \n' % -source.temperature)
+
+        # done with stars.inp
+        fhandle.close()
+
+        # now do the wavelength_micron.inp file
+        filename = 'wavelength_micron.inp'
+        fhandle = open(filename, 'w')
+
+        fhandle.write('%d \n' % nlam)
+        for wavelength in wavelengths:
+            fhandle.write('%f \n' % wavelength)
+
+        # done with both
+        fhandle.close()
+
+
+class RadMC3DSource:
+    '''
+
+    A class that contains the data associated with a single RadMC3D photon source.
+    This is designed to help export data about the stars in a dataset into a format 
+    that can be read in by RadMC3D. Although RadMC3D can handle non-blackbody 
+    sources, here we assume that the source is a blackbody with a given temperature.
+
+    Parameters
+    ----------
+
+    radius: float
+        The size of the source in cm
+    mass: float 
+        The mass of the source in g
+    position: list of floats
+        The x, y, and z coordinates of the source, in cm
+    temperature: float
+        The blackbody temperature of the source, in K
+
+    '''
+
+    def __init__(self, radius, mass, position, temperature):
+        self.radius = radius
+        self.mass = mass
+        self.position = position
+        self.temperature = temperature
+
+        # some basic sanity checks
+        assert(self.radius > 0.0)
+        assert(self.mass > 0.0)
+        assert(self.temperature > 0)
+        assert(len(self.position) == 3)  # 3D only, please

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/analysis_modules/radmc3d_export/api.py
--- a/yt/analysis_modules/radmc3d_export/api.py
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -14,4 +14,8 @@
 #-----------------------------------------------------------------------------
 
 from .RadMC3DInterface import \
-    RadMC3DWriter
+    RadMC3DWriter, \
+    RadMC3DSource
+
+from .RadMC3DImageUtilities import \
+    read_radmc3d_image

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
--- a/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
+++ b/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
@@ -13,14 +13,71 @@
 import yt
 from yt.testing import *
 from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
+from yt.utilities.answer_testing.framework import \
+    AnswerTestingTest, \
+    requires_ds
 from yt.config import ytcfg
 import tempfile
 import os
 import shutil
 
+
+class RadMC3DValuesTest(AnswerTestingTest):
+    '''
+
+    This test writes out a "dust_density.inp" file, 
+    reads it back in, and checks the sum of the 
+    values for degradation.
+
+    '''
+    _type_name = "RadMC3DValuesTest"
+    _attrs = ("field", )
+
+    def __init__(self, ds_fn, field, decimals=10):
+        super(RadMC3DValuesTest, self).__init__(ds_fn)
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+
+        # Set up in a temp dir
+        tmpdir = tempfile.mkdtemp()
+        curdir = os.getcwd()
+        os.chdir(tmpdir)
+
+        # try to write the output files
+        writer = RadMC3DWriter(self.ds)
+        writer.write_amr_grid()
+        writer.write_dust_file(self.field, "dust_density.inp")
+
+        # compute the sum of the values in the resulting file
+        total = 0.0
+        with open('dust_density.inp', 'r') as f:
+            for i, line in enumerate(f):
+
+                # skip header
+                if i < 3:
+                    continue
+
+                line = line.rstrip()
+                total += np.float64(line)
+
+        # clean up
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+        return total
+
+    def compare(self, new_result, old_result):
+        err_msg = "Total value for %s not equal." % (self.field,)
+        assert_allclose(new_result, old_result, 10.**(-self.decimals),
+                        err_msg=err_msg, verbose=True)
+
+
 ISO_GAL = "IsolatedGalaxy/galaxy0030/galaxy0030"
 
- at requires_file(ISO_GAL)
+
+ at requires_ds(ISO_GAL)
 def test_radmc3d_exporter_continuum():
     """
     This test is simply following the description in the docs for how to
@@ -28,29 +85,12 @@
     dust for one of our sample datasets.
     """
 
-    # Set up in a temp dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    ds = yt.load(ISO_GAL)
 
     # Make up a dust density field where dust density is 1% of gas density
     dust_to_gas = 0.01
     def _DustDensity(field, data):
         return dust_to_gas * data["density"]
-    
-    # Make up a dust temperature field where temp = 10K everywhere
-    def _DustTemperature(field, data):
-        return 0.*data["temperature"] + data.ds.quan(10,'K')
-    
-    ds = yt.load(ISO_GAL)
     ds.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3")
-    ds.add_field(("gas", "dust_temperature"), function=_DustTemperature, units="K")
-    writer = RadMC3DWriter(ds)
-    
-    writer.write_amr_grid()
-    writer.write_dust_file(("gas", "dust_density"), "dust_density.inp")
-    writer.write_dust_file(("gas", "dust_temperature"), "dust_temperature.inp")
 
-    # clean up
-    os.chdir(curdir)
-    shutil.rmtree(tmpdir)
+    yield RadMC3DValuesTest(ds, ("gas", "dust_density"))

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -42,7 +42,7 @@
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects, parallel_root_only 
+    parallel_objects, parallel_root_only, communication_system
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
@@ -285,9 +285,11 @@
         if self.weight_field is not None:
             chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
-        # We do this once
-        for chunk in self.data_source.chunks([], "io", local_only = False):
-            self._initialize_chunk(chunk, tree)
+        # This only needs to be done if we are in parallel; otherwise, we can
+        # safely build the mesh as we go.
+        if communication_system.communicators[-1].size > 1:
+            for chunk in self.data_source.chunks([], "io", local_only = False):
+                self._initialize_chunk(chunk, tree)
         with self.data_source._field_parameter_state(self.field_parameters):
             for chunk in parallel_objects(self.data_source.chunks(
                                           chunk_fields, "io", local_only = True)): 

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -38,6 +38,7 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Metallicity", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
+        ("StarFormationRate", ("code_mass / code_time", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
         # These are metallicity fields that get discovered for FIRE simulations
         ("Metallicity_00", ("", ["metallicity"], None)),

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -384,7 +384,7 @@
     cfg_option = "__topcomm_parallel_rank"
     if not ytcfg.getboolean("yt","__parallel"):
         return True
-    if ytcfg.getint("yt", cfg_option) > 0: 
+    if ytcfg.getint("yt", cfg_option) > 0:
         return False
     return True
 
@@ -441,15 +441,6 @@
     print "Traceback pasted to http://paste.yt-project.org/show/%s" % (ret)
     print
 
-def traceback_writer_hook(file_suffix = ""):
-    def write_to_file(exc_type, exc, tb):
-        sys.__excepthook__(exc_type, exc, tb)
-        fn = "yt_traceback%s" % file_suffix
-        f = open(fn, "w")
-        traceback.print_exception(exc_type, exc, tb, file=f)
-        print "Wrote traceback to %s" % fn
-    return write_to_file
-
 _ss = "fURbBUUBE0cLXgETJnZgJRMXVhVGUQpQAUBuehQMUhJWRFFRAV1ERAtBXw1dAxMLXT4zXBFfABNN\nC0ZEXw1YUURHCxMXVlFERwxWCQw=\n"
 def _rdbeta(key):
     import itertools, base64

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -16,8 +16,6 @@
 
 import numpy as np
 cimport numpy as np
-# Double up here for def'd functions
-cimport numpy as cnp
 cimport cython
 from fp_utils cimport fmax
 
@@ -61,15 +59,16 @@
     cdef int i, j, i1, j1
     cdef np.int64_t npos[2]
     cdef QuadTreeNode *node
+    cdef np.float64_t *tvals = <np.float64_t *> alloca(
+            sizeof(np.float64_t) * nvals)
+    for i in range(nvals): tvals[i] = 0.0
     for i in range(2):
         npos[0] = self.pos[0] * 2 + i
         for j in range(2):
             npos[1] = self.pos[1] * 2 + j
             # We have to be careful with allocation...
             self.children[i][j] = QTN_initialize(
-                        npos, nvals, self.val, self.weight_val)
-    for i in range(nvals): self.val[i] = 0.0
-    self.weight_val = 0.0
+                        npos, nvals, tvals, 0.0)
 
 cdef QuadTreeNode *QTN_initialize(np.int64_t pos[2], int nvals,
                         np.float64_t *val, np.float64_t weight_val):
@@ -100,9 +99,6 @@
 
 cdef class QuadTree:
     cdef int nvals
-    # Hardcode to a maximum 80 levels of refinement.
-    # TODO: Update when we get to yottascale.
-    cdef np.int64_t po2[80] 
     cdef QuadTreeNode ***root_nodes
     cdef np.int64_t top_grid_dims[2]
     cdef int merged
@@ -137,9 +133,6 @@
         self.dds[0] = (self.bounds[1] - self.bounds[0])/self.top_grid_dims[0]
         self.dds[1] = (self.bounds[3] - self.bounds[2])/self.top_grid_dims[1]
 
-        # This wouldn't be necessary if we did bitshifting...
-        for i in range(80):
-            self.po2[i] = 2**i
         self.root_nodes = <QuadTreeNode ***> \
             malloc(sizeof(QuadTreeNode **) * top_grid_dims[0])
 
@@ -252,7 +245,7 @@
     cdef int add_to_position(self,
                  int level, np.int64_t pos[2],
                  np.float64_t *val,
-                 np.float64_t weight_val, skip = 0):
+                 np.float64_t weight_val, int skip = 0):
         cdef int i, j, L
         cdef QuadTreeNode *node
         node = self.find_on_root_level(pos, level)
@@ -264,9 +257,8 @@
                 QTN_refine(node, self.nvals)
                 self.num_cells += 4
             # Maybe we should use bitwise operators?
-            fac = self.po2[level - L - 1]
-            i = (pos[0] >= fac*(2*node.pos[0]+1))
-            j = (pos[1] >= fac*(2*node.pos[1]+1))
+            i = (pos[0] >> (level - L - 1)) & 1
+            j = (pos[1] >> (level - L - 1)) & 1
             node = node.children[i][j]
         if skip == 1: return 0
         self.combine(node, val, weight_val, self.nvals)
@@ -277,10 +269,10 @@
         # We need this because the root level won't just have four children
         # So we find on the root level, then we traverse the tree.
         cdef np.int64_t i, j
-        i = <np.int64_t> (pos[0] / self.po2[level])
-        j = <np.int64_t> (pos[1] / self.po2[level])
-        if i > self.top_grid_dims[0] or i < 0 or \
-           j > self.top_grid_dims[1] or j < 0:
+        i = pos[0] >> level
+        j = pos[1] >> level
+        if i >= self.top_grid_dims[0] or i < 0 or \
+           j >= self.top_grid_dims[1] or j < 0:
             self.last_dims[0] = i
             self.last_dims[1] = j
             return NULL
@@ -293,12 +285,11 @@
             np.ndarray[np.float64_t, ndim=2] pvals,
             np.ndarray[np.float64_t, ndim=1] pweight_vals,
             int skip = 0):
-        cdef int np = pxs.shape[0]
         cdef int p
-        cdef cnp.float64_t *vals
-        cdef cnp.float64_t *data = <cnp.float64_t *> pvals.data
-        cdef cnp.int64_t pos[2]
-        for p in range(np):
+        cdef np.float64_t *vals
+        cdef np.float64_t *data = <np.float64_t *> pvals.data
+        cdef np.int64_t pos[2]
+        for p in range(pxs.shape[0]):
             vals = data + self.nvals*p
             pos[0] = pxs[p]
             pos[1] = pys[p]
@@ -314,15 +305,19 @@
             np.ndarray[np.float64_t, ndim=2] pvals,
             np.ndarray[np.float64_t, ndim=1] pweight_vals):
         cdef int ps = pxs.shape[0]
-        cdef int p
-        cdef cnp.float64_t *vals
-        cdef cnp.float64_t *data = <cnp.float64_t *> pvals.data
-        cdef cnp.int64_t pos[2]
+        cdef int p, rv
+        cdef np.float64_t *vals
+        cdef np.float64_t *data = <np.float64_t *> pvals.data
+        cdef np.int64_t pos[2]
         for p in range(ps):
             vals = data + self.nvals*p
             pos[0] = pxs[p]
             pos[1] = pys[p]
-            self.add_to_position(level[p], pos, vals, pweight_vals[p])
+            rv = self.add_to_position(level[p], pos, vals, pweight_vals[p])
+            if rv == -1:
+                raise YTIntDomainOverflow(
+                    (self.last_dims[0], self.last_dims[1]),
+                    (self.top_grid_dims[0], self.top_grid_dims[1]))
         return
 
     @cython.boundscheck(False)
@@ -333,8 +328,8 @@
             np.ndarray[np.int64_t, ndim=1] level):
         cdef int num = pxs.shape[0]
         cdef int p, rv
-        cdef cnp.float64_t *vals
-        cdef cnp.int64_t pos[2]
+        cdef np.float64_t *vals
+        cdef np.int64_t pos[2]
         for p in range(num):
             pos[0] = pxs[p]
             pos[1] = pys[p]

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -24,7 +24,7 @@
 from functools import wraps
 
 from yt.funcs import \
-    ensure_list, iterable, traceback_writer_hook
+    ensure_list, iterable
 
 from yt.config import ytcfg
 import yt.utilities.logger
@@ -55,7 +55,7 @@
 
 class FilterAllMessages(logging.Filter):
     """
-    This is a simple filter for logging.Logger's that won't let any 
+    This is a simple filter for logging.Logger's that won't let any
     messages pass.
     """
     def filter(self, record):
@@ -63,12 +63,33 @@
 
 # Set up translation table and import things
 
+
+def traceback_writer_hook(file_suffix=""):
+    def write_to_file(exc_type, exc, tb):
+        sys.__excepthook__(exc_type, exc, tb)
+        fn = "yt_traceback%s" % file_suffix
+        with open(fn, "w") as fhandle:
+            traceback.print_exception(exc_type, exc, tb, file=fhandle)
+            print "Wrote traceback to %s" % fn
+        MPI.COMM_WORLD.Abort(1)
+    return write_to_file
+
+
+def default_mpi_excepthook(exception_type, exception_value, tb):
+    traceback.print_tb(tb)
+    mylog.error('%s: %s' % (exception_type.__name__, exception_value))
+    comm = yt.communication_system.communicators[-1]
+    if comm.size > 1:
+        mylog.error('Error occured on rank %d.' % comm.rank)
+    MPI.COMM_WORLD.Abort(1)
+
+
 def enable_parallelism(suppress_logging=False, communicator=None):
     """
-    This method is used inside a script to turn on MPI parallelism, via 
+    This method is used inside a script to turn on MPI parallelism, via
     mpi4py.  More information about running yt in parallel can be found
     here: http://yt-project.org/docs/3.0/analyzing/parallel_computation.html
-    
+
     Parameters
     ----------
     suppress_logging : bool
@@ -88,7 +109,7 @@
         return
     MPI = _MPI
     exe_name = os.path.basename(sys.executable)
-    
+
     # if no communicator specified, set to COMM_WORLD
     if communicator is None:
         communicator = MPI.COMM_WORLD
@@ -115,8 +136,12 @@
                                         yt.utilities.logger.ufstring))
     if len(yt.utilities.logger.ytLogger.handlers) > 0:
         yt.utilities.logger.ytLogger.handlers[0].setFormatter(f)
+
     if ytcfg.getboolean("yt", "parallel_traceback"):
         sys.excepthook = traceback_writer_hook("_%03i" % communicator.rank)
+    else:
+        sys.excepthook = default_mpi_excepthook
+
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -444,7 +444,8 @@
             labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
             labels += cbax.yaxis.get_ticklabels()
             labels += [ax.title, ax.xaxis.label, ax.yaxis.label,
-                       cbax.yaxis.label, cbax.yaxis.get_offset_text()]
+                       cbax.yaxis.label, cbax.yaxis.get_offset_text(),
+                       ax.xaxis.get_offset_text(), ax.yaxis.get_offset_text()]
             for label in labels:
                 label.set_fontproperties(self._font_properties)
                 if self._font_color is not None:

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -352,7 +352,7 @@
         Examples
         --------
         >>> im = cam.snapshot()
-        >>> cam.draw__coordinate_vectors(im)
+        >>> cam.draw_coordinate_vectors(im)
         >>> im.write_png('render_with_grids.png')
 
         """
@@ -383,22 +383,23 @@
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
-
         Given starting and ending positions x0 and x1, draws a line on 
         a volume rendering using the camera orientation.
 
         Parameters
         ----------
-        im: Numpy ndarray
+        im : ImageArray or 2D ndarray
             Existing image that has the same resolution as the Camera, 
             which will be painted by grid lines.
-        x0 : Numpy ndarray
-            Starting coordinate, in simulation coordinates
-        x1 : Numpy ndarray
-            Ending coordinate, in simulation coordinates
+        x0 : YTArray or ndarray
+            Starting coordinate.  If passed in as an ndarray,
+            assumed to be in code units.
+        x1 : YTArray or ndarray
+            Ending coordinate, in simulation coordinates.  If passed in as
+            an ndarray, assumed to be in code units.
         color : array like, optional
             Color of the line (r, g, b, a). Defaults to white. 
-        
+
         Returns
         -------
         None
@@ -410,7 +411,13 @@
         >>> write_bitmap(im, 'render_with_line.png')
 
         """
-        if color is None: color = np.array([1.0,1.0,1.0,1.0])
+        if color is None:
+            color = np.array([1.0,1.0,1.0,1.0])
+
+        if not hasattr(x0, "units"):
+            x0 = self.ds.arr(x0, "code_length")
+        if not hasattr(x1, "units"):
+            x1 = self.ds.arr(x1, "code_length")
 
         dx0 = ((x0-self.origin)*self.orienter.unit_vectors[1]).sum()
         dx1 = ((x1-self.origin)*self.orienter.unit_vectors[1]).sum()
@@ -423,7 +430,8 @@
 
         # we flipped it in snapshot to get the orientation correct, so
         # flip the lines
-        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]),flip=1)
+        lines(im, np.array([px0, px1]), np.array([py0, py1]),
+              np.array([color,color]), flip=1)
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.

diff -r 8671b89fd2916f56bd8f446954e8fe6ac7582159 -r ed56d4b20a0cee5bc49490f64e9f69c0a2cf46d7 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -78,6 +78,11 @@
         cam.snapshot('camera.png')
         assert_fname('camera.png')
 
+        im = cam.snapshot()
+        im = cam.draw_domain(im)
+        cam.draw_coordinate_vectors(im)
+        cam.draw_line(im, [0,0,0], [1,1,0])
+
     def test_data_source_camera(self):
         ds = self.ds
         tf = self.setup_transfer_function('camera')


https://bitbucket.org/yt_analysis/yt/commits/821d24988cef/
Changeset:   821d24988cef
Branch:      yt
User:        xarthisius
Date:        2015-01-04 05:29:10+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1307)

Explicitly use path length for projections
Affected #:  17 files

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -236,6 +236,7 @@
         else:
             raise NotImplementedError(self.method)
         self._set_center(center)
+        self._projected_units = {}
         if data_source is None: data_source = self.ds.all_data()
         for k, v in data_source.field_parameters.items():
             if k not in self.field_parameters or \
@@ -289,7 +290,6 @@
         if communication_system.communicators[-1].size > 1:
             for chunk in self.data_source.chunks([], "io", local_only = False):
                 self._initialize_chunk(chunk, tree)
-        # This needs to be parallel_objects-ified
         with self.data_source._field_parameter_state(self.field_parameters):
             for chunk in parallel_objects(self.data_source.chunks(
                                           chunk_fields, "io", local_only = True)): 
@@ -329,7 +329,6 @@
                 np.divide(nvals, nwvals[:,None], nvals)
         # We now convert to half-widths and center-points
         data = {}
-        #non_nan = ~np.any(np.isnan(nvals), axis=-1)
         code_length = self.ds.domain_width.units
         data['px'] = self.ds.arr(px, code_length)
         data['py'] = self.ds.arr(py, code_length)
@@ -343,30 +342,8 @@
         for fi, field in enumerate(fields):
             finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
-            units = finfo.units
-            # add length units to "projected units" if non-weighted 
-            # integral projection
-            if self.weight_field is None and not self._sum_only and \
-               self.method == 'integrate':
-                # See _handle_chunk where we mandate cm
-                if units == '':
-                    input_units = "cm"
-                else:
-                    input_units = "(%s) * cm" % units
-            else:
-                input_units = units
-            # Don't forget [non_nan] somewhere here.
-            self[field] = YTArray(field_data[fi].ravel(),
-                                  input_units=input_units,
-                                  registry=self.ds.unit_registry)
-            # convert units if non-weighted integral projection
-            if self.weight_field is None and not self._sum_only and \
-               self.method == 'integrate':
-                u_obj = Unit(units, registry=self.ds.unit_registry)
-                if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
-                    not u_obj.is_dimensionless) and input_units != units:
-                    final_unit = "(%s) * code_length" % units
-                    self[field].convert_to_units(final_unit)
+            input_units = self._projected_units[field].units
+            self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
         for i in data.keys(): self[i] = data.pop(i)
         mylog.info("Projection completed")
 
@@ -381,18 +358,34 @@
 
     def _handle_chunk(self, chunk, fields, tree):
         if self.method == "mip" or self._sum_only:
-            dl = 1.0
+            dl = self.ds.quan(1.0, "")
         else:
             # This gets explicitly converted to cm
-            dl = chunk.fwidth[:, self.axis]
-            dl.convert_to_units("cm")
+            ax_name = self.ds.coordinates.axis_name[self.axis]
+            dl = chunk["index", "path_element_%s" % (ax_name)]
+            # This is done for cases where our path element does not have a CGS
+            # equivalent.  Once "preferred units" have been implemented, this
+            # will not be necessary at all, as the final conversion will occur
+            # at the display layer.
+            if not dl.units.is_dimensionless:
+                dl.convert_to_units("cm")
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
-        for i in range(len(fields)):
-            v[:,i] = chunk[fields[i]] * dl
+        for i, field in enumerate(fields):
+            d = chunk[field] * dl
+            v[:,i] = d
+            self._projected_units[field] = d.uq
         if self.weight_field is not None:
             w = chunk[self.weight_field]
             np.multiply(v, w[:,None], v)
             np.multiply(w, dl, w)
+            for field in fields:
+                # Note that this removes the dl integration, which is
+                # *correct*, but we are not fully self-consistently carrying it
+                # all through.  So if interrupted, the process will have
+                # incorrect units assigned in the projected units.  This should
+                # not be a problem, since the weight division occurs
+                # self-consistently with unitfree arrays.
+                self._projected_units[field] /= dl.uq
         else:
             w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -190,6 +190,8 @@
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return self.ds.arr(np.random.random(3) * 1e-2, self.fp_units[param])
+        elif param in ['surface_height']:
+            return self.ds.quan(0.0, 'code_length')
         elif param in ['axis']:
             return 0
         elif param.startswith("cp_"):

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -32,6 +32,9 @@
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")
+            registry.add_field(("index", "path_element_%s" % ax), function = f1,
+                               display_field = False,
+                               units = "code_length")
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -47,7 +47,7 @@
                            display_field = False,
                            units = "code_length")
 
-        f1, f2 = _get_coord_fields(1)
+        f1, f2 = _get_coord_fields(self.axis_id['z'])
         registry.add_field(("index", "dz"), function = f1,
                            display_field = False,
                            units = "code_length")
@@ -55,7 +55,7 @@
                            display_field = False,
                            units = "code_length")
 
-        f1, f2 = _get_coord_fields(2, "")
+        f1, f2 = _get_coord_fields(self.axis_id['theta'], "")
         registry.add_field(("index", "dtheta"), function = f1,
                            display_field = False,
                            units = "")
@@ -72,6 +72,22 @@
                  function=_CylindricalVolume,
                  units = "code_length**3")
 
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_element_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_element_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_z(field, data):
+            return data["index", "dz"]
+        registry.add_field(("index", "path_element_z"),
+                 function = _path_z,
+                 units = "code_length")
 
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
@@ -101,10 +117,10 @@
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr'],
-                                 data_source['theta'],
-                                 data_source['dtheta']/2.0, # half-widths
+        buff = pixelize_cylinder(data_source['px'],
+                                 data_source['pdx'],
+                                 data_source['py'],
+                                 data_source['pdy'],
                                  size, data_source[field], bounds)
         return buff
 

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -74,40 +74,64 @@
                  function=_SphericalVolume,
                  units = "code_length**3")
 
+        def _path_altitude(field, data):
+            return data["index", "daltitude"]
+        registry.add_field(("index", "path_element_altitude"),
+                 function = _path_altitude,
+                 units = "code_length")
+        def _path_latitude(field, data):
+            # We use r here explicitly
+            return data["index", "r"] * \
+                data["index", "dlatitude"] * np.pi/180.0
+        registry.add_field(("index", "path_element_latitude"),
+                 function = _path_latitude,
+                 units = "code_length")
+        def _path_longitude(field, data):
+            # We use r here explicitly
+            return data["index", "r"] \
+                    * data["index", "dlongitude"] * np.pi/180.0 \
+                    * np.sin((data["index", "latitude"] + 90.0) * np.pi/180.0)
+        registry.add_field(("index", "path_element_longitude"),
+                 function = _path_longitude,
+                 units = "code_length")
+
         # Altitude is the radius from the central zone minus the radius of the
         # surface.
         def _altitude_to_radius(field, data):
             surface_height = data.get_field_parameter("surface_height")
             if surface_height is None:
-                surface_height = getattr(data.ds, "surface_height", 0.0)
+                if hasattr(data.ds, "surface_height"):
+                    surface_height = data.ds.surface_height
+                else:
+                    surface_height = data.ds.quan(0.0, "code_length")
             return data["altitude"] + surface_height
         registry.add_field(("index", "r"),
                  function=_altitude_to_radius,
                  units = "code_length")
         registry.alias(("index", "dr"), ("index", "daltitude"))
 
-        def _longitude_to_theta(field, data):
-            # longitude runs from -180 to 180.
-            return (data["longitude"] + 180) * np.pi/180.0
+        def _latitude_to_theta(field, data):
+            # latitude runs from -90 to 90
+            return (data["latitude"] + 90) * np.pi/180.0
         registry.add_field(("index", "theta"),
-                 function = _longitude_to_theta,
+                 function = _latitude_to_theta,
                  units = "")
-        def _dlongitude_to_dtheta(field, data):
-            return data["dlongitude"] * np.pi/180.0
+        def _dlatitude_to_dtheta(field, data):
+            return data["dlatitude"] * np.pi/180.0
         registry.add_field(("index", "dtheta"),
-                 function = _dlongitude_to_dtheta,
+                 function = _dlatitude_to_dtheta,
                  units = "")
 
-        def _latitude_to_phi(field, data):
-            # latitude runs from -90 to 90
-            return (data["latitude"] + 90) * np.pi/180.0
+        def _longitude_to_phi(field, data):
+            # longitude runs from -180 to 180
+            return (data["longitude"] + 90) * np.pi/180.0
         registry.add_field(("index", "phi"),
-                 function = _latitude_to_phi,
+                 function = _longitude_to_phi,
                  units = "")
-        def _dlatitude_to_dphi(field, data):
-            return data["dlatitude"] * np.pi/180.0
+        def _dlongitude_to_dphi(field, data):
+            return data["dlongitude"] * np.pi/180.0
         registry.add_field(("index", "dphi"),
-                 function = _dlatitude_to_dphi,
+                 function = _dlongitude_to_dphi,
                  units = "")
 
     def pixelize(self, dimension, data_source, field, bounds, size,
@@ -131,20 +155,19 @@
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
-        if dimension == 0:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['theta'],
-                                     data_source['dtheta'] / 2.0, # half-widths
-                                     size, data_source[field], bounds)
-        elif dimension == 1:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['phi'],
-                                     data_source['dphi'] / 2.0, # half-widths
-                                     size, data_source[field], bounds)
-        else:
-            raise RuntimeError
+        surface_height = data_source.get_field_parameter("surface_height")
+        if surface_height is None:
+            if hasattr(data_source.ds, "surface_height"):
+                surface_height = data_source.ds.surface_height
+            else:
+                surface_height = data_source.ds.quan(0.0, "code_length")
+        r = data_source['py'] + surface_height
+        # Because of the axis-ordering, dimensions 0 and 1 both have r as py
+        # and the angular coordinate as px.
+        buff = pixelize_cylinder(r, data_source['pdy'],
+                                 data_source['px'],
+                                 data_source['pdx'], # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
 

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -15,86 +15,22 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.units.yt_array import YTArray
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
+from .cylindrical_coordinates import CylindricalCoordinateHandler
+import yt.visualization._MPL as _MPL
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
 
-class PolarCoordinateHandler(CoordinateHandler):
+class PolarCoordinateHandler(CylindricalCoordinateHandler):
 
     def __init__(self, ds, ordering = 'rtz'):
         if ordering != 'rtz': raise NotImplementedError
         super(PolarCoordinateHandler, self).__init__(ds)
 
-    def setup_fields(self, registry):
-        # return the fields for r, z, theta
-        registry.add_field("dx", function=_unknown_coord)
-        registry.add_field("dy", function=_unknown_coord)
-        registry.add_field("x", function=_unknown_coord)
-        registry.add_field("y", function=_unknown_coord)
-
-        f1, f2 = _get_coord_fields(0)
-        registry.add_field(("index", "dr"), function = f1,
-                           display_field = False,
-                           units = "code_length")
-        registry.add_field(("index", "r"), function = f2,
-                           display_field = False,
-                           units = "code_length")
-
-        f1, f2 = _get_coord_fields(1, "")
-        registry.add_field(("index", "dtheta"), function = f1,
-                           display_field = False,
-                           units = "")
-        registry.add_field(("index", "theta"), function = f2,
-                           display_field = False,
-                           units = "")
-
-        f1, f2 = _get_coord_fields(2) 
-        registry.add_field(("index", "dz"), function = f1,
-                           display_field = False,
-                           units = "code_length")
-        registry.add_field(("index", "z"), function = f2,
-                           display_field = False,
-                           units = "code_length")
-
-
-        def _CylindricalVolume(field, data):
-            return data["dtheta"] * data["r"] * data["dr"] * data["dz"]
-        registry.add_field("CellVolume", function=_CylindricalVolume)
-
-    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
-        ax_name = self.axis_name[dimension]
-        if ax_name in ('r', 'theta'):
-            return self._ortho_pixelize(data_source, field, bounds, size,
-                                        antialias)
-        elif ax_name == "z":
-            return self._polar_pixelize(data_source, field, bounds, size,
-                                        antialias)
-        else:
-            # Pixelizing along a cylindrical surface is a bit tricky
-            raise NotImplementedError
-
-
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
-                             data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
-                             bounds, int(antialias),
-                             True, self.period).transpose()
-        return buff
-
-    def _polar_pixelize(self, data_source, field, bounds, size, antialias):
-        # Out bounds here will *always* be what plot window thinks are x0, x1,
-        # y0, y1, but which will actually be rmin, rmax, thetamin, thetamax.
-        buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr'],
-                                 data_source['theta'],
-                                 data_source['dtheta'] / 2.0, # half-widths
-                                 size, data_source[field], bounds)
-        return buff
-
     axis_name = { 0  : 'r',  1  : 'theta',  2  : 'z',
                  'r' : 'r', 'theta' : 'theta', 'z' : 'z',
                  'R' : 'r', 'Theta' : 'theta', 'Z' : 'z'}
@@ -108,23 +44,6 @@
     y_axis = { 'r' : 2, 'theta' : 2, 'z' : 1,
                 0  : 2,  1  : 2,  2  : 1}
 
-    _image_axis_name = None
-    @property
-    def image_axis_name(self):    
-        if self._image_axis_name is not None:
-            return self._image_axis_name
-        # This is the x and y axes labels that get displayed.  For
-        # non-Cartesian coordinates, we usually want to override these for
-        # Cartesian coordinates, since we transform them.
-        rv = {0: ('theta', 'z'),
-              1: ('x', 'y'),
-              2: ('r', 'z')}
-        for i in rv.keys():
-            rv[self.axis_name[i]] = rv[i]
-            rv[self.axis_name[i].upper()] = rv[i]
-        self._image_axis_name = rv
-        return rv
-
     def convert_from_cartesian(self, coord):
         return cartesian_to_cylindrical(coord)
 

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -17,6 +17,8 @@
 import numpy as np
 from .cartesian_coordinates import \
     CartesianCoordinateHandler
+from .coordinate_handler import \
+    _get_coord_fields
 
 class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
 
@@ -58,6 +60,41 @@
         self.axis_field = {}
         self.axis_field[self.ds.spec_axis] = _spec_axis
 
+    def setup_fields(self, registry):
+        if self.ds.no_cgs_equiv_length == False:
+            return super(self, SpectralCubeCoordinateHandler
+                    ).setup_fields(registry)
+        for axi, ax in enumerate(self.axis_name):
+            f1, f2 = _get_coord_fields(axi)
+            def _get_length_func():
+                def _length_func(field, data):
+                    # Just use axis 0
+                    rv = data.ds.arr(data.fcoords[...,0].copy(), field.units)
+                    rv[:] = 1.0
+                    return rv
+                return _length_func
+            registry.add_field(("index", "d%s" % ax), function = f1,
+                               display_field = False,
+                               units = "code_length")
+            registry.add_field(("index", "path_element_%s" % ax),
+                               function = _get_length_func(),
+                               display_field = False,
+                               units = "")
+            registry.add_field(("index", "%s" % ax), function = f2,
+                               display_field = False,
+                               units = "code_length")
+        def _cell_volume(field, data):
+            rv  = data["index", "dx"].copy(order='K')
+            rv *= data["index", "dy"]
+            rv *= data["index", "dz"]
+            return rv
+        registry.add_field(("index", "cell_volume"), function=_cell_volume,
+                           display_field=False, units = "code_length**3")
+        registry.check_derived_fields(
+            [("index", "dx"), ("index", "dy"), ("index", "dz"),
+             ("index", "x"), ("index", "y"), ("index", "z"),
+             ("index", "cell_volume")])
+
     def convert_to_cylindrical(self, coord):
         raise NotImplementedError
 

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -73,6 +73,26 @@
                  function=_SphericalVolume,
                  units = "code_length**3")
 
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_element_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_element_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_phi(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] \
+                    * data["index", "dphi"] \
+                    * np.sin(data["index", "theta"])
+        registry.add_field(("index", "path_element_phi"),
+                 function = _path_phi,
+                 units = "code_length")
+
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
         self.period
@@ -87,31 +107,27 @@
 
     def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
                         dim, periodic):
-        # We should be using fcoords
-        period = self.period[:2].copy() # dummy here
-        period[0] = self.period[self.x_axis[dim]]
-        period[1] = self.period[self.y_axis[dim]]
-        period = period.in_units("code_length").d
-        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
-                             data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
-                             bounds, int(antialias),
-                             period, int(periodic)).transpose()
+        buff = pixelize_aitoff(data_source["py"], data_source["pdy"],
+                               data_source["px"], data_source["pdx"],
+                               size, data_source[field], None,
+                               None, theta_offset = 0,
+                               phi_offset = 0).transpose()
         return buff
 
+
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         if dimension == 1:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['phi'],
-                                     data_source['dphi'] / 2.0, # half-widths
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
                                      size, data_source[field], bounds)
         elif dimension == 2:
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'] / 2.0,
-                                     data_source['theta'],
-                                     data_source['dtheta'] / 2.0, # half-widths
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
                                      size, data_source[field], bounds)
             buff = buff.transpose()
         else:
@@ -219,3 +235,17 @@
             width = [self.ds.domain_width[0],
                      2.0*self.ds.domain_width[0]]
         return width
+
+    def _sanity_check(self):
+        """This prints out a handful of diagnostics that help verify the
+        dataset is well formed."""
+        # We just check a few things here.
+        dd = self.ds.all_data()
+        r0 = self.ds.domain_left_edge[0]
+        r1 = self.ds.domain_right_edge[0]
+        v1 = 4.0 * np.pi / 3.0 * (r1**3 - r0**3)
+        print "Total volume should be 4*pi*r**3 = %0.16e" % (v1)
+        v2 = dd.quantities.total_quantity("cell_volume")
+        print "Actual volume is                   %0.16e" % (v2)
+        print "Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1))
+

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/tests/test_cartesian_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py
@@ -0,0 +1,27 @@
+# Some tests for the Cartesian coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cartesian_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds()
+    axes = list(set(ds.coordinates.axis_name.values()))
+    axes.sort()
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i]
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i]
+        yield assert_equal, dd[fd].min(), ds.index.get_smallest_dx()
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i]
+        yield assert_equal, dd[fd], dd[fp]
+    yield assert_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        ds.domain_width.prod()

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py
@@ -0,0 +1,28 @@
+# Some tests for the Cylindrical coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cylindrical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="cylindrical")
+    axes = ["r", "z", "theta"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[1]
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/tests/test_geographic_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_geographic_coordinates.py
@@ -0,0 +1,52 @@
+# Some tests for the geographic coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_geographic_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+
+    # Note that we are setting it up to have an altitude of 1000 maximum, which
+    # means our volume will be that of a shell 1000 wide, starting at r of
+    # whatever our surface_height is set to.
+    ds = fake_amr_ds(geometry="geographic")
+    ds.surface_height = ds.quan(5000, "code_length")
+    axes = ["latitude", "longitude", "altitude"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    inner_r = ds.surface_height
+    outer_r = ds.surface_height + ds.domain_width[2]
+    yield assert_equal, dd["index","dtheta"], \
+                        dd["index","dlatitude"]*np.pi/180.0
+    yield assert_equal, dd["index","dphi"], \
+                        dd["index","dlongitude"]*np.pi/180.0
+    # Note our terrible agreement here.
+    yield assert_rel_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), \
+                        3
+    yield assert_equal, dd["index", "path_element_altitude"], \
+                        dd["index", "daltitude"]
+    yield assert_equal, dd["index", "path_element_altitude"], \
+                        dd["index", "dr"]
+    # Note that latitude corresponds to theta, longitude to phi
+    yield assert_equal, dd["index", "path_element_latitude"], \
+                        dd["index", "r"] * \
+                        dd["index", "dlatitude"] * np.pi/180.0
+    yield assert_equal, dd["index", "path_element_longitude"], \
+                        dd["index", "r"] * \
+                        dd["index", "dlongitude"] * np.pi/180.0 * \
+                        np.sin((dd["index", "latitude"] + 90.0) * np.pi/180.0)
+    # We also want to check that our radius is correct
+    yield assert_equal, dd["index","r"], \
+                        dd["index","altitude"] + ds.surface_height

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/tests/test_polar_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_polar_coordinates.py
@@ -0,0 +1,29 @@
+# Some tests for the polar coordinates handler
+# (Pretty similar to cylindrical, but different ordering)
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_cylindrical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="polar")
+    axes = ["r", "theta", "z"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        np.pi*ds.domain_width[0]**2 * ds.domain_width[2]
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_z"], dd["index", "dz"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/geometry/coordinates/tests/test_spherical_coordinates.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_spherical_coordinates.py
@@ -0,0 +1,35 @@
+# Some tests for the Cylindrical coordinates handler
+
+from yt.testing import *
+
+# Our canonical tests are that we can access all of our fields and we can
+# compute our volume correctly.
+
+def test_spherical_coordinates():
+    # We're going to load up a simple AMR grid and check its volume
+    # calculations and path length calculations.
+    ds = fake_amr_ds(geometry="spherical")
+    axes = ["r", "theta", "phi"]
+    for i, axis in enumerate(axes):
+        dd = ds.all_data()
+        fi = ("index", axis)
+        fd = ("index", "d%s" % axis)
+        fp = ("index", "path_element_%s" % axis)
+        ma = np.argmax(dd[fi])
+        yield assert_equal, dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d
+        mi = np.argmin(dd[fi])
+        yield assert_equal, dd[fi][mi] - dd[fd][mi] / 2.0, ds.domain_left_edge[i].d
+        yield assert_equal, dd[fd].max(), (ds.domain_width/ds.domain_dimensions)[i].d
+    # Note that we're using a lot of funny transforms to get to this, so we do
+    # not expect to get actual agreement.  This is a bit of a shame, but I
+    # don't think it is avoidable as of right now.  Real datasets will almost
+    # certainly be correct, if this is correct to 3 decimel places.
+    yield assert_almost_equal, dd["cell_volume"].sum(dtype="float64"), \
+                        (4.0/3.0) * np.pi*ds.domain_width[0]**3, \
+                        3 # Allow for GENEROUS error on the volume ...
+    yield assert_equal, dd["index", "path_element_r"], dd["index", "dr"]
+    yield assert_equal, dd["index", "path_element_theta"], \
+                        dd["index", "r"] * dd["index", "dtheta"]
+    yield assert_equal, dd["index", "path_element_phi"], \
+                        dd["index", "r"] * dd["index", "dphi"] * \
+                          np.sin(dd["index","theta"])

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -179,11 +179,25 @@
     ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
     return ug
 
-def fake_amr_ds(fields = ("Density",)):
+_geom_transforms = {
+    # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
+    'cartesian'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
+    'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
+    'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
+    'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+    'geographic' : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
+}
+
+def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):
     from yt.frontends.stream.api import load_amr_grids
+    LE, RE = _geom_transforms[geometry]
+    LE = np.array(LE)
+    RE = np.array(RE)
     data = []
     for gspec in _amr_grid_index:
         level, left_edge, right_edge, dims = gspec
+        left_edge = left_edge * (RE - LE) + LE
+        right_edge = right_edge * (RE - LE) + LE
         gdata = dict(level = level,
                      left_edge = left_edge,
                      right_edge = right_edge,
@@ -191,7 +205,8 @@
         for f in fields:
             gdata[f] = np.random.random(dims)
         data.append(gdata)
-    return load_amr_grids(data, [32, 32, 32], 1.0)
+    bbox = np.array([LE, RE]).T
+    return load_amr_grids(data, [32, 32, 32], 1.0, geometry=geometry, bbox=bbox)
 
 def fake_particle_ds(
         fields = ("particle_position_x",
@@ -225,8 +240,6 @@
     ds = load_particles(data, 1.0, bbox=bbox)
     return ds
 
-
-
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -579,7 +579,9 @@
                     np.ndarray[np.float64_t, ndim=1] dphi,
                     buff_size,
                     np.ndarray[np.float64_t, ndim=1] field,
-                    extents, input_img = None):
+                    extents, input_img = None,
+                    np.float64_t theta_offset = 0.0,
+                    np.float64_t phi_offset = 0.0):
     # http://paulbourke.net/geometry/transformationprojection/
     # longitude is -pi to pi
     # latitude is -pi/2 to pi/2
@@ -610,9 +612,9 @@
     dx = 2.0 / (img.shape[0] - 1)
     dy = 2.0 / (img.shape[1] - 1)
     for fi in range(nf):
-        theta_p = theta[fi] - PI
+        theta_p = (theta[fi] + theta_offset) - PI
         dtheta_p = dtheta[fi]
-        phi_p = phi[fi] - PI/2.0
+        phi_p = (phi[fi] + phi_offset) - PI/2.0
         dphi_p = dphi[fi]
         # Four transformations
         aitoff_thetaphi_to_xy(theta_p - dtheta_p, phi_p - dphi_p, &x, &y)

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -80,7 +80,11 @@
     104923.1
     """
     _exclude_fields = ('pz','pdz','dx','x','y','z',
-                       ('index','dx'),('index','x'),('index','y'),('index','z'))
+        'r', 'dr', 'phi', 'dphi', 'theta', 'dtheta',
+                       ('index','dx'),('index','x'),('index','y'),('index','z'),
+                       ('index', 'r'), ('index', 'dr'),
+                       ('index', 'phi'), ('index', 'dphi'),
+                       ('index', 'theta'), ('index', 'dtheta'))
     def __init__(self, data_source, bounds, buff_size, antialias = True,
                  periodic = False):
         self.data_source = data_source

diff -r 20d4fcc40283217c698762168f98fbe9eb2e29ec -r 821d24988cef8f9163cd73f8acc11d9910367489 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -80,7 +80,7 @@
     from pyparsing import ParseFatalException
 
 def get_window_parameters(axis, center, width, ds):
-    if ds.geometry in ("cartesian", "spectral_cube", "spherical"):
+    if ds.geometry in ("cartesian", "spectral_cube"):
         width = ds.coordinates.sanitize_width(axis, width, None)
         center, display_center = ds.coordinates.sanitize_center(center, axis)
     elif ds.geometry in ("polar", "cylindrical"):
@@ -88,6 +88,14 @@
         width = [ds.domain_right_edge[0]*2.0, ds.domain_right_edge[0]*2.0]
         center = ds.arr([0.0, 0.0, 0.0], "code_length")
         display_center = center.copy()
+    elif ds.geometry == "spherical":
+        center, display_center = ds.coordinates.sanitize_center(center, axis)
+        if axis == 0:
+            # latitude slice
+            width = ds.arr([2*np.pi, np.pi], "code_length")
+        else:
+            width = [2.0*ds.domain_right_edge[2], 2.0*ds.domain_right_edge[2]]
+            center[2] = 0.0
     elif ds.geometry == "geographic":
         c_r = ((ds.domain_right_edge + ds.domain_left_edge)/2.0)[2]
         center = ds.arr([0.0, 0.0, c_r], "code_length")

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list