[yt-svn] commit/yt: 11 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Oct 5 11:44:01 PDT 2015


11 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/e7578b39e645/
Changeset:   e7578b39e645
Branch:      yt
User:        ngoldbaum
Date:        2015-08-21 17:01:22+00:00
Summary:     Add pr backport script
Affected #:  1 file

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r e7578b39e64500169f987d82461d7bfb13e7d8b9 scripts/pr_backport.py
--- /dev/null
+++ b/scripts/pr_backport.py
@@ -0,0 +1,268 @@
+import hglib
+import requests
+import shutil
+import tempfile
+
+from datetime import datetime
+from time import strptime, mktime
+
+MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
+                      "yt/pullrequests/?state=MERGED")
+
+YT_REPO = "https://bitbucket.org/yt_analysis/yt"
+
+
+def clone_new_repo(source=None):
+    """Clones a new copy of yt_analysis/yt and returns a path to it"""
+    path = tempfile.mkdtemp()
+    dest_repo_path = path+'/yt-backport'
+    if source is None:
+        source = YT_REPO
+    hglib.clone(source=source, dest=dest_repo_path, updaterev='yt')
+    return dest_repo_path
+
+
+def get_first_commit_after_release(repo_path):
+    """Returns the SHA1 hash of the first commit to the yt branch that wasn't
+    included in the last tagged release.
+    """
+    with hglib.open(repo_path) as client:
+        most_recent_tag = client.log("reverse(tag())")[0]
+        tag_name = most_recent_tag[2]
+        last_before_release = client.log(
+            "last(ancestors(%s) and branch(yt))" % tag_name)
+        first_after_release = client.log(
+            "first(descendants(%s) and branch(yt) and not %s)"
+            % (last_before_release[0][1], last_before_release[0][1]))
+    return first_after_release[0]
+
+
+def get_branch_tip(repo_path, branch):
+    """Returns the SHA1 hash of the most recent commit on the given branch"""
+    with hglib.open(repo_path) as client:
+        change = client.identify(rev=branch, id=True)
+        change.strip('\n')
+    return change
+
+
+def get_lineage_between_release_and_tip(repo_path, first, last):
+    """Returns the lineage of changesets that were at one point the public tip"""
+    fhash = first[1]
+    with hglib.open(repo_path) as client:
+        return client.log("%s::%s and p1(%s::%s) + %s"
+                          % (fhash, last, fhash, last, last))
+
+
+def get_pull_requests_since_last_release(first):
+    """Returns a list of pull requests made since the last tagged release"""
+    r = requests.get(MERGED_PR_ENDPOINT)
+    done = False
+    merged_prs = []
+    while not done:
+        if r.status_code != 200:
+            raise RuntimeError
+        data = r.json()
+        prs = data['values']
+        for pr in prs:
+            activity = requests.get(pr['links']['activity']['href']).json()
+            merge_date = None
+            for action in activity['values']:
+                if 'update' in action and action['update']['state'] == 'MERGED':
+                    merge_date = action['update']['date']
+                    merge_date = merge_date.split('.')[0]
+                    timestamp = mktime(strptime(merge_date, "%Y-%m-%dT%H:%M:%S"))
+                    merge_date = datetime.fromtimestamp(timestamp)
+                    break
+            if merge_date is None:
+                break
+            if merge_date < first[6]:
+                break
+            merged_prs.append(pr)
+        if merge_date is not None and merge_date < first[6]:
+            done = True
+        r = requests.get(data['next'])
+    return merged_prs
+
+
+def cache_commit_data(prs):
+    """Avoid repeated calls to bitbucket API to get the list of commits per PR"""
+    commit_data = {}
+    for pr in prs:
+        data = requests.get(pr['links']['commits']['href']).json()
+        done = False
+        commits = []
+        while not done:
+            commits.extend(data['values'])
+            if 'next' not in data:
+                done = True
+            else:
+                data = requests.get(data['next']).json()
+        commit_data[pr['id']] = commits
+    return commit_data
+
+
+def find_commit_in_prs(needle, commit_data, prs):
+    """Finds the commit `needle` PR in the commit_data dictionary
+
+    If found, returns the pr the needle commit is in. If the commit was not
+    part of the PRs in the dictionary, returns None.
+    """
+    for pr_id in commit_data:
+        commits = commit_data[pr_id]
+        for commit in commits:
+            if commit['hash'] == needle[1]:
+                pr = [pr for pr in prs if pr['id'] == pr_id][0]
+                return pr
+    return None
+
+
+def find_merge_commit_in_prs(needle, prs):
+    """Find the merge commit `needle` in the list of `prs`
+
+    If found, returns the pr the merge commit comes from. If not found, raises a
+    RuntimeError, since all merge commits are supposed to be associated with a
+    PR.
+    """
+    for pr in prs[::-1]:
+        if pr['merge_commit'] is not None:
+            if pr['merge_commit']['hash'] == needle[1][:12]:
+                return pr
+    raise RuntimeError
+
+
+def create_commits_to_prs_mapping(linege, prs):
+    """create a mapping from commits to the pull requests that the commit is
+    part of
+    """
+    commits_to_prs = {}
+    # make a copy of this list to avoid side effects from calling this function
+    my_prs = list(prs)
+    commit_data = cache_commit_data(my_prs)
+    for commit in lineage:
+        cset_hash = commit[1]
+        message = commit[5]
+        if message.startswith('Merged in') and '(pull request #' in message:
+            commits_to_prs[cset_hash] = find_merge_commit_in_prs(commit, my_prs)
+            # Since we know this PR won't have another commit associated with it,
+            # remove from global list to reduce number of network accesses
+            my_prs.remove(commits_to_prs[cset_hash])
+        else:
+            pr = find_commit_in_prs(commit, commit_data, my_prs)
+            commits_to_prs[cset_hash] = pr
+        if commits_to_prs[cset_hash] is None:
+            continue
+    return commits_to_prs
+
+
+def invert_commits_to_prs_mapping(commits_to_prs):
+    """invert the mapping from individual commits to pull requests"""
+    inv_map = {}
+    for k, v in commits_to_prs.iteritems():
+        # can't save v itself in inv_map since it's an unhashable dictionary
+        if v is not None:
+            created_date = v['created_on'].split('.')[0]
+            timestamp = mktime(strptime(created_date, "%Y-%m-%dT%H:%M:%S"))
+            created_date = datetime.fromtimestamp(timestamp)
+            pr_desc = (v['id'], v['title'], created_date,
+                       v['links']['html']['href'], v['description'])
+        else:
+            pr_desc = None
+        inv_map[pr_desc] = inv_map.get(pr_desc, [])
+        inv_map[pr_desc].append(k)
+    return inv_map
+
+
+def get_last_descendant(repo_path, commit):
+    """get the most recent descendant of a commit"""
+    with hglib.open(repo_path) as client:
+        com = client.log('last(%s::)' % commit)
+    return com[0][1][:12]
+
+
+def get_no_pr_commits(repo_path, inv_map):
+    """"get a list of commits that aren't in any pull request"""
+    no_pr_commits = inv_map[None]
+    del inv_map[None]
+    with hglib.open(repo_path) as client:
+        # remove merge commits since they can't be grafted
+        no_pr_commits = [com for com in no_pr_commits if
+                         len(client.log('%s and merge()' % com)) == 0]
+    return no_pr_commits
+
+
+def backport_no_pr_commits(repo_path, no_pr_commits):
+    """backports commits that aren't in a pull request"""
+    for commit in no_pr_commits:
+        with hglib.open(repo_path) as client:
+            client.update('stable')
+            commit_info = client.log(commit)[0]
+            commit_info = (commit_info[1][:12], commit_info[4], commit_info[5])
+            print "Commit %s by %s\n%s" % commit_info
+        print ""
+        print "To backport issue the following command:"
+        print ""
+        print "hg graft %s\n" % commit_info[0]
+        raw_input('Press any key to continue')
+        print ""
+
+
+def backport_pr_commits(repo_path, inv_map, last_stable, prs):
+    """backports pull requests to the stable branch.
+
+    Accepts a dictionary mapping pull requests to a list of commits that
+    are in the pull request.
+    """
+    pr_list = inv_map.keys()
+    pr_list = sorted(pr_list, key=lambda x: x[2])
+    for pr_desc in pr_list:
+        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+        print "To backport, issue the following command(s):\n"
+        pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
+        data = requests.get(pr['links']['commits']['href']).json()
+        commits = data['values']
+        while 'next' in data:
+            data = requests.get(data['next']).json()
+            commits.extend(data['values'])
+        commits = [com['hash'][:12] for com in commits]
+        if len(commits) > 1:
+            revset = commits[-1] + '::' + commits[0]
+            message = "Backporting PR #%s %s" % \
+                (pr['id'], pr['links']['html']['href'])
+            dest = get_last_descendant(repo_path, last_stable)
+            message = \
+                "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
+                (revset, message, dest)
+            message += "hg update stable\n"
+        else:
+            message = "hg graft %s\n" % commits[0]
+        print message
+        raw_input('Press any key to continue')
+
+
+if __name__ == "__main__":
+    print ""
+    print "Gathering PR information, this may take a minute."
+    print "Don't worry, yt loves you."
+    print ""
+    repo_path = clone_new_repo()
+    try:
+        first_dev = get_first_commit_after_release(repo_path)
+        last_dev = get_branch_tip(repo_path, 'yt')
+        last_stable = get_branch_tip(repo_path, 'stable')
+        lineage = get_lineage_between_release_and_tip(
+            repo_path, first_dev, last_dev)
+        prs = get_pull_requests_since_last_release(first_dev)
+        commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
+        inv_map = invert_commits_to_prs_mapping(commits_to_prs)
+        no_pr_commits = get_no_pr_commits(repo_path, inv_map)
+        print "In another terminal window, navigate to the following path:"
+        print "%s" % repo_path
+        raw_input("Press any key to continue")
+        backport_no_pr_commits(repo_path, no_pr_commits)
+        backport_pr_commits(repo_path, inv_map, last_stable, prs)
+        raw_input(
+            "Now you need to push your backported changes. The temporary\n"
+            "repository currently being used will be deleted as soon as you\n"
+            "press any key.")
+    finally:
+        shutil.rmtree(repo_path)


https://bitbucket.org/yt_analysis/yt/commits/9f0cdb36e784/
Changeset:   9f0cdb36e784
Branch:      yt
User:        ngoldbaum
Date:        2015-08-28 17:19:36+00:00
Summary:     Merging with mainline
Affected #:  203 files

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -38,6 +38,7 @@
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/Interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,8 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
-recursive-include yt/gui/reason/html *.html *.png *.ico *.js *.gif *.css
+include yt/visualization/mapserver/html/map_index.html
+include yt/visualization/mapserver/html/leaflet/*.css
+include yt/visualization/mapserver/html/leaflet/*.js
+include yt/visualization/mapserver/html/leaflet/images/*.png
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
@@ -10,5 +13,4 @@
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
 prune tests
-graft yt/gui/reason/html/resources
 exclude clean.sh .hgchurn

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -644,7 +644,6 @@
 echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
-echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '4a83f9ae1855a7fad90133b327d426201c8ccfd2e7fbe9f39b2d61a2eee2f3ebe2ea02cf80f3d4e1ad659f8e790c173df8cc99b87d0b7ce63d34aa88cfdc7939  h5py-2.5.0.tar.gz' > h5py-2.5.0.tar.gz.sha512
 echo '4073fba510ccadaba41db0939f909613c9cb52ba8fb6c1062fc9118edc601394c75e102310be1af4077d07c9b327e6bbb1a6359939a7268dc140382d0c1e0199  hdf5-1.8.14.tar.gz' > hdf5-1.8.14.tar.gz.sha512
@@ -686,7 +685,6 @@
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
-get_ytproject reason-js-20120623.zip
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -109,3 +109,8 @@
 .table {
     width: 50%
 }
+
+
+.navbar-form.navbar-right:last-child {
+    margin-right: -20px;
+}

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -476,6 +476,7 @@
 .. code:: python
 
    import yt
+   import numpy as np
    from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
@@ -548,14 +549,15 @@
                                0.01, 20.0, 20000)
    abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
-   thermal_model = ThermalPhotonModel(apec_model)
+   thermal_model = ThermalPhotonModel(apec_model, photons_per_chunk=40000000)
    photons = PhotonList.from_scratch(sphere, redshift, A,
                                      exp_time, thermal_model, center="c")
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
                                     responses=["sim_arf.fits","sim_rmf.fits"], 
-                                    absorb_model=abs_model)
+                                    absorb_model=abs_model,
+                                    north_vector=[0.0,1.0,0.0])
 
    events.write_fits_image("img.fits", clobber=True)
 

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -597,23 +597,27 @@
 
 Add a Physical Scale Bar
 ~~~~~~~~~~~~~~~~~~~~~~~~
-
 .. function:: annotate_scale(corner='lower_right', coeff=None, \
-                             unit=None, pos=None, max_frac=0.2, \
-                             min_frac=0.018, text_args=None, \
-                             inset_box_args=None)
+                             unit=None, pos=None, max_frac=0.16, \
+                             min_frac=0.015, coord_system='axis', \
+                             text_args=None, size_bar_args=None, \
+                             draw_inset_box=False, inset_box_args=None)
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.ScaleCallback`.)
 
     Annotates the scale of the plot at a specified location in the image
     (either in a preset corner, or by specifying (x,y) image coordinates with
-    the pos argument.  Coeff and units (e.g. 1 Mpc) refer to the distance scale
-    you desire to show on the plot.  If no coeff and units are specified,
-    an appropriate pair will be determined such that your scale bar is never
-    smaller than min_frac or greater than max_frac of your plottable axis
-    length.  For additional text and plot arguments for the text and line,
-    include them as dictionaries to pass to text_args and plot_args.
+    the pos argument.  Coeff and units (e.g. 1 Mpc or 100 kpc) refer to the
+    distance scale you desire to show on the plot.  If no coeff and units are
+    specified, an appropriate pair will be determined such that your scale bar
+    is never smaller than min_frac or greater than max_frac of your plottable
+    axis length.  Additional customization of the scale bar is possible by
+    adjusting the text_args and size_bar_args dictionaries.  The text_args
+    dictionary accepts matplotlib's font_properties arguments to override
+    the default font_properties for the current plot.  The size_bar_args
+    dictionary accepts keyword arguments for the AnchoredSizeBar class in
+    matplotlib's axes_grid toolkit.
 
 .. python-script::
 

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/ffmpeg_volume_rendering.py
--- a/doc/source/visualizing/ffmpeg_volume_rendering.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#This is an example of how to make videos of 
-#uniform grid data using Theia and ffmpeg
-
-#The Scene object to hold the ray caster and view camera
-from yt.visualization.volume_rendering.theia.scene import TheiaScene
-
-#GPU based raycasting algorithm to use 
-from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
-
-#These will be used to define how to color the data
-from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
-from yt.visualization.color_maps import *
-
-#This will be used to launch ffmpeg
-import subprocess as sp
-
-#Of course we need numpy for math magic
-import numpy as np
-
-#Opacity scaling function
-def scale_func(v, mi, ma):
-      return  np.minimum(1.0, (v-mi)/(ma-mi) + 0.0)
-
-#load the uniform grid from a numpy array file
-bolshoi = "/home/bogert/log_densities_1024.npy"
-density_grid = np.load(bolshoi)
-
-#Set the TheiaScene to use the density_grid and 
-#setup the raycaster for a resulting 1080p image
-ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (1920,1080) ))
-
-#the min and max values in the data to color
-mi, ma = 0.0, 3.6
-
-#setup colortransferfunction
-bins = 5000
-tf = ColorTransferFunction( (mi, ma), bins)
-tf.map_to_colormap(0.5, ma, colormap="spring", scale_func = scale_func)
-
-#pass the transfer function to the ray caster
-ts.source.raycaster.set_transfer(tf)
-
-#Initial configuration for start of video
-#set initial opacity and brightness values
-#then zoom into the center of the data 30%
-ts.source.raycaster.set_opacity(0.03)
-ts.source.raycaster.set_brightness(2.3)
-ts.camera.zoom(30.0)
-
-#path to ffmpeg executable
-FFMPEG_BIN = "/usr/local/bin/ffmpeg"
-
-pipe = sp.Popen([ FFMPEG_BIN,
-        '-y', # (optional) overwrite the output file if it already exists
-	#This must be set to rawvideo because the image is an array
-        '-f', 'rawvideo', 
-	#This must be set to rawvideo because the image is an array
-        '-vcodec','rawvideo',
-	#The size of the image array and resulting video
-        '-s', '1920x1080', 
-	#This must be rgba to match array format (uint32)
-        '-pix_fmt', 'rgba',
-	#frame rate of video
-        '-r', '29.97', 
-        #Indicate that the input to ffmpeg comes from a pipe
-        '-i', '-', 
-        # Tells FFMPEG not to expect any audio
-        '-an', 
-        #Setup video encoder
-	#Use any encoder you life available from ffmpeg
-        '-vcodec', 'libx264', '-preset', 'ultrafast', '-qp', '0',
-        '-pix_fmt', 'yuv420p',
-        #Name of the output
-        'bolshoiplanck2.mkv' ],
-        stdin=sp.PIPE,stdout=sp.PIPE)
-		
-		
-#Now we loop and produce 500 frames
-for k in range (0,500) :
-    #update the scene resulting in a new image
-    ts.update()
-
-    #get the image array from the ray caster
-    array = ts.source.get_results()
-
-    #send the image array to ffmpeg
-    array.tofile(pipe.stdin)
-
-    #rotate the scene by 0.01 rads in x,y & z
-    ts.camera.rotateX(0.01)
-    ts.camera.rotateZ(0.01)
-    ts.camera.rotateY(0.01)
-
-    #zoom in 0.01% for a total of a 5% zoom
-    ts.camera.zoom(0.01)
-
-
-#Close the pipe to ffmpeg
-pipe.terminate()

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/hardware_volume_rendering.rst
--- a/doc/source/visualizing/hardware_volume_rendering.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-.. _hardware_volume_rendering:
-
-Hardware Volume Rendering on NVidia Graphics cards
---------------------------------------------------
-
-Theia is a hardware volume renderer that takes advantage of NVidias CUDA language
-to peform ray casting with GPUs instead of the CPU. 
-
-Only unigrid rendering is supported, but yt provides a grid mapping function
-to get unigrid data from amr or sph formats, see :ref:`extract_frb`.
-
-System Requirements
-+++++++++++++++++++
-
-Nvidia graphics card - The memory limit of the graphics card sets the limit
-                       on the size of the data source.
-
-CUDA 5 or later and
-
-The environment variable CUDA_SAMPLES must be set pointing to
-the common/inc samples shipped with CUDA. The following shows an example
-in bash with CUDA 5.5 installed in /usr/local :
-
-    export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
-
-PyCUDA must also be installed to use Theia. 
-
-PyCUDA can be installed following these instructions :
-
-    git clone --recursive http://git.tiker.net/trees/pycuda.git
-
-    python configure.py
-    python setup.py install
-
-
-Tutorial
-++++++++
-
-Currently rendering only works on uniform grids. Here is an example
-on a 1024 cube of float32 scalars.
-
-.. code-block:: python
-
-   from yt.visualization.volume_rendering.theia.scene import TheiaScene
-   from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
-   import numpy as np
-
-   #load 3D numpy array of float32
-   volume = np.load("/home/bogert/log_densities_1024.npy")
-
-   scene = TheiaScene( volume = volume, raycaster = FrontToBackRaycaster() )
-
-   scene.camera.rotateX(1.0)
-   scene.update()
-
-   surface = scene.get_results()
-   #surface now contains an image array 2x2 int32 rbga values
-
-.. _the-theiascene-interface:
-
-The TheiaScene Interface
-++++++++++++++++++++++++
-
-A TheiaScene object has been created to provide a high level entry point for
-controlling the raycaster's view onto the data. The class
-:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates a
-Camera object and a TheiaSource that intern encapsulates a volume. The
-:class:`~yt.visualization.volume_rendering.theia.Camera` provides controls for
-rotating, translating, and zooming into the volume.  Using the
-:class:`~yt.visualization.volume_rendering.theia.TheiaSource` automatically
-transfers the volume to the graphic's card texture memory.
-
-Example Cookbooks
-+++++++++++++++++
-
-OpenGL Example for interactive volume rendering:
-
-.. literalinclude:: opengl_volume_rendering.py
-
-.. warning::  Frame rate will suffer significantly from stereoscopic rendering.
-              ~2x slower since the volume must be rendered twice.
-
-OpenGL Stereoscopic Example: 
-
-.. literalinclude:: opengl_stereo_volume_rendering.py
-
-Pseudo-Realtime video rendering with ffmpeg:
-
-.. literalinclude:: ffmpeg_volume_rendering.py

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -15,7 +15,6 @@
    callbacks
    manual_plotting
    volume_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/opengl_stereo_volume_rendering.py
--- a/doc/source/visualizing/opengl_stereo_volume_rendering.py
+++ /dev/null
@@ -1,370 +0,0 @@
-from OpenGL.GL import *
-from OpenGL.GLUT import *
-from OpenGL.GLU import *
-from OpenGL.GL.ARB.vertex_buffer_object import *
-
-import sys, time
-import numpy as np
-import pycuda.driver as cuda_driver
-import pycuda.gl as cuda_gl
-
-from yt.visualization.volume_rendering.theia.scene import TheiaScene
-from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
-from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
-from yt.visualization.color_maps import *
-
-import numexpr as ne
-
-window = None     # Number of the glut window.
-rot_enabled = True
-
-#Theia Scene
-ts = None
-
-#RAY CASTING values
-c_tbrightness = 1.0
-c_tdensity = 0.05
-
-output_texture = None # pointer to offscreen render target
-
-leftButton = False
-middleButton = False
-rightButton = False
-
-#Screen width and height
-width = 1920
-height = 1080
-
-eyesep = 0.1
-
-(pbo, pycuda_pbo) = [None]*2
-(rpbo, rpycuda_pbo) = [None]*2
-
-#create 2 PBO for stereo scopic rendering
-def create_PBO(w, h):
-    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
-    num_texels = w*h
-    array = np.zeros((num_texels, 3),np.float32)
-
-    pbo = glGenBuffers(1)
-    glBindBuffer(GL_ARRAY_BUFFER, pbo)
-    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
-
-    rpbo = glGenBuffers(1)
-    glBindBuffer(GL_ARRAY_BUFFER, rpbo)
-    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    rpycuda_pbo = cuda_gl.RegisteredBuffer(long(rpbo))
-
-def destroy_PBO(self):
-    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
-    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
-    glDeleteBuffers(1, long(pbo));
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    pbo,pycuda_pbo = [None]*2
-
-    glBindBuffer(GL_ARRAY_BUFFER, long(rpbo))
-    glDeleteBuffers(1, long(rpbo));
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    rpbo,rpycuda_pbo = [None]*2
-
-#consistent with C initPixelBuffer()
-def create_texture(w,h):
-    global output_texture
-    output_texture = glGenTextures(1)
-    glBindTexture(GL_TEXTURE_2D, output_texture)
-    # set basic parameters
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
-    # buffer data
-    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
-                 w, h, 0, GL_RGB, GL_FLOAT, None)
-
-#consistent with C initPixelBuffer()
-def destroy_texture():
-    global output_texture
-    glDeleteTextures(output_texture);
-    output_texture = None
-
-def init_gl(w = 512 , h = 512):
-    Width, Height = (w, h)
-
-    glClearColor(0.1, 0.1, 0.5, 1.0)
-    glDisable(GL_DEPTH_TEST)
-
-    #matrix functions
-    glViewport(0, 0, Width, Height)
-    glMatrixMode(GL_PROJECTION);
-    glLoadIdentity();
-
-    #matrix functions
-    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
-    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
-
-def resize(Width, Height):
-    global width, height
-    (width, height) = Width, Height
-    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
-    glMatrixMode(GL_PROJECTION)
-    glLoadIdentity()
-    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
-
-
-def do_tick():
-    global time_of_last_titleupdate, frame_counter, frames_per_second
-    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
-        frames_per_second = frame_counter                   # Save The FPS
-        frame_counter = 0  # Reset The FPS Counter
-        szTitle = "%d FPS" % (frames_per_second )
-        glutSetWindowTitle ( szTitle )
-        time_of_last_titleupdate = time.clock () * 1000.0
-    frame_counter += 1
-
-oldMousePos = [ 0, 0 ]
-def mouseButton( button, mode, x, y ):
-	"""Callback function (mouse button pressed or released).
-
-	The current and old mouse positions are stored in
-	a	global renderParam and a global list respectively"""
-
-	global leftButton, middleButton, rightButton, oldMousePos
-
-        if button == GLUT_LEFT_BUTTON:
-	    if mode == GLUT_DOWN:
-	        leftButton = True
-            else:
-		leftButton = False
-
-        if button == GLUT_MIDDLE_BUTTON:
-	    if mode == GLUT_DOWN:
-	        middleButton = True
-            else:
-		middleButton = False
-
-        if button == GLUT_RIGHT_BUTTON:
-	    if mode == GLUT_DOWN:
-	        rightButton = True
-            else:
-		rightButton = False
-
-	oldMousePos[0], oldMousePos[1] = x, y
-	glutPostRedisplay( )
-
-def mouseMotion( x, y ):
-	"""Callback function (mouse moved while button is pressed).
-
-	The current and old mouse positions are stored in
-	a	global renderParam and a global list respectively.
-	The global translation vector is updated according to
-	the movement of the mouse pointer."""
-
-	global ts, leftButton, middleButton, rightButton, oldMousePos
-	deltaX = x - oldMousePos[ 0 ]
-	deltaY = y - oldMousePos[ 1 ]
-
-	factor = 0.001
-
-	if leftButton == True:
-            ts.camera.rotateX( - deltaY * factor)
-            ts.camera.rotateY( - deltaX * factor)
-	if middleButton == True:
-	    ts.camera.translateX( deltaX* 2.0 * factor)
-	    ts.camera.translateY( - deltaY* 2.0 * factor)
-	if rightButton == True:
-	    ts.camera.scale += deltaY * factor
-
-	oldMousePos[0], oldMousePos[1] = x, y
-	glutPostRedisplay( )
-
-def keyPressed(*args):
-    global c_tbrightness, c_tdensity, eyesep
-    # If escape is pressed, kill everything.
-    if args[0] == '\033':
-        print('Closing..')
-        destroy_PBOs()
-        destroy_texture()
-        exit()
-
-    #change the brightness of the scene
-    elif args[0] == ']':
-        c_tbrightness += 0.025
-    elif args[0] == '[':
-        c_tbrightness -= 0.025
-
-    #change the density scale
-    elif args[0] == ';':
-        c_tdensity -= 0.001
-    elif args[0] == '\'':
-        c_tdensity += 0.001 
-
-    #change the transfer scale
-    elif args[0] == '-':
-        eyesep -= 0.01
-    elif args[0] == '=':
-        eyesep += 0.01 
-
-def idle():
-    glutPostRedisplay()
-
-def display():
-    try:
-        #process left eye
-        process_image()
-        display_image()
-
-        #process right eye
-        process_image(eye = False)
-        display_image(eye = False)
-
-
-        glutSwapBuffers()
-
-    except:
-        from traceback import print_exc
-        print_exc()
-        from os import _exit
-        _exit(0)
-
-def process(eye = True):
-    global ts, pycuda_pbo, rpycuda_pbo, eyesep, c_tbrightness, c_tdensity
-    """ Use PyCuda """
-
-    ts.get_raycaster().set_opacity(c_tdensity)
-    ts.get_raycaster().set_brightness(c_tbrightness)
-
-    if (eye) :
-        ts.camera.translateX(-eyesep)
-        dest_mapping = pycuda_pbo.map()
-        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
-        ts.get_raycaster().surface.device_ptr = dev_ptr
-        ts.update()
-        dest_mapping.unmap()
-        ts.camera.translateX(eyesep)
-    else :
-        ts.camera.translateX(eyesep)
-        dest_mapping = rpycuda_pbo.map()
-        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
-        ts.get_raycaster().surface.device_ptr = dev_ptr
-        ts.update()
-        dest_mapping.unmap()
-        ts.camera.translateX(-eyesep)
-
-
-def process_image(eye =  True):
-    global output_texture, pbo, rpbo, width, height
-    """ copy image and process using CUDA """
-    # run the Cuda kernel
-    process(eye)
-    # download texture from PBO
-    if (eye) : 
-        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
-        glBindTexture(GL_TEXTURE_2D, output_texture)
-
-        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
-                 width, height, 0,
-                 GL_RGB, GL_FLOAT, None)
-    else :
-        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(rpbo))
-        glBindTexture(GL_TEXTURE_2D, output_texture)
-
-        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
-                 width, height, 0,
-                 GL_RGB, GL_FLOAT, None)
-
-def display_image(eye = True):
-    global width, height
-    """ render a screen sized quad """
-    glDisable(GL_DEPTH_TEST)
-    glDisable(GL_LIGHTING)
-    glEnable(GL_TEXTURE_2D)
-    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
-
-    #matix functions should be moved
-    glMatrixMode(GL_PROJECTION)
-    glPushMatrix()
-    glLoadIdentity()
-    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
-    glMatrixMode( GL_MODELVIEW)
-    glLoadIdentity()
-    glViewport(0, 0, width, height)
-
-    if (eye) :
-        glDrawBuffer(GL_BACK_LEFT)
-    else :
-        glDrawBuffer(GL_BACK_RIGHT)
-
-    glBegin(GL_QUADS)
-    glTexCoord2f(0.0, 0.0)
-    glVertex3f(-1.0, -1.0, 0.5)
-    glTexCoord2f(1.0, 0.0)
-    glVertex3f(1.0, -1.0, 0.5)
-    glTexCoord2f(1.0, 1.0)
-    glVertex3f(1.0, 1.0, 0.5)
-    glTexCoord2f(0.0, 1.0)
-    glVertex3f(-1.0, 1.0, 0.5)
-    glEnd()
-
-    glMatrixMode(GL_PROJECTION)
-    glPopMatrix()
-
-    glDisable(GL_TEXTURE_2D)
-    glBindTexture(GL_TEXTURE_2D, 0)
-    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
-    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
-
-
-#note we may need to init cuda_gl here and pass it to camera
-def main():
-    global window, ts, width, height
-    (width, height) = (1920, 1080)
-
-    glutInit(sys.argv)
-    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH | GLUT_STEREO)
-    glutInitWindowSize(*initial_size)
-    glutInitWindowPosition(0, 0)
-    window = glutCreateWindow("Stereo Volume Rendering")
-
-
-    glutDisplayFunc(display)
-    glutIdleFunc(idle)
-    glutReshapeFunc(resize)
-    glutMouseFunc( mouseButton )
-    glutMotionFunc( mouseMotion )
-    glutKeyboardFunc(keyPressed)
-    init_gl(width, height)
-
-    # create texture for blitting to screen
-    create_texture(width, height)
-
-    import pycuda.gl.autoinit
-    import pycuda.gl
-    cuda_gl = pycuda.gl
-
-    create_PBO(width, height)
-    # ----- Load and Set Volume Data -----
-
-    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
-
-    mi, ma= 21.5, 24.5
-    bins = 5000
-    tf = ColorTransferFunction( (mi, ma), bins)
-    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
-
-    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
-
-    ts.get_raycaster().set_sample_size(0.01)
-    ts.get_raycaster().set_max_samples(5000)
-
-    glutMainLoop()
-
-def scale_func(v, mi, ma):
-    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
-
-# Print message to console, and kick off the main to get it rolling.
-if __name__ == "__main__":
-    print("Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda")
-    main()

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 doc/source/visualizing/opengl_volume_rendering.py
--- a/doc/source/visualizing/opengl_volume_rendering.py
+++ /dev/null
@@ -1,322 +0,0 @@
-from OpenGL.GL import *
-from OpenGL.GLUT import *
-from OpenGL.GLU import *
-from OpenGL.GL.ARB.vertex_buffer_object import *
-
-import sys, time
-import numpy as np
-import pycuda.driver as cuda_driver
-import pycuda.gl as cuda_gl
-
-from yt.visualization.volume_rendering.theia.scene import TheiaScene
-from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
-from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
-from yt.visualization.color_maps import *
-
-import numexpr as ne
-
-window = None     # Number of the glut window.
-rot_enabled = True
-
-#Theia Scene
-ts = None
-
-#RAY CASTING values
-c_tbrightness = 1.0
-c_tdensity = 0.05
-
-output_texture = None # pointer to offscreen render target
-
-leftButton = False
-middleButton = False
-rightButton = False
-
-#Screen width and height
-width = 1024
-height = 1024
-
-eyesep = 0.1
-
-(pbo, pycuda_pbo) = [None]*2
-
-def create_PBO(w, h):
-    global pbo, pycuda_pbo
-    num_texels = w*h
-    array = np.zeros((w,h,3),np.uint32)
-
-    pbo = glGenBuffers(1)
-    glBindBuffer(GL_ARRAY_BUFFER, pbo)
-    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
-
-def destroy_PBO(self):
-    global pbo, pycuda_pbo
-    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
-    glDeleteBuffers(1, long(pbo));
-    glBindBuffer(GL_ARRAY_BUFFER, 0)
-    pbo,pycuda_pbo = [None]*2
-
-#consistent with C initPixelBuffer()
-def create_texture(w,h):
-    global output_texture
-    output_texture = glGenTextures(1)
-    glBindTexture(GL_TEXTURE_2D, output_texture)
-    # set basic parameters
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
-    # buffer data
-    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
-                 w, h, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, None)
-
-#consistent with C initPixelBuffer()
-def destroy_texture():
-    global output_texture
-    glDeleteTextures(output_texture);
-    output_texture = None
-
-def init_gl(w = 512 , h = 512):
-    Width, Height = (w, h)
-
-    glClearColor(0.1, 0.1, 0.5, 1.0)
-    glDisable(GL_DEPTH_TEST)
-
-    #matrix functions
-    glViewport(0, 0, Width, Height)
-    glMatrixMode(GL_PROJECTION);
-    glLoadIdentity();
-
-    #matrix functions
-    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
-    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
-
-def resize(Width, Height):
-    global width, height
-    (width, height) = Width, Height
-    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
-    glMatrixMode(GL_PROJECTION)
-    glLoadIdentity()
-    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
-
-
-def do_tick():
-    global time_of_last_titleupdate, frame_counter, frames_per_second
-    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
-        frames_per_second = frame_counter                   # Save The FPS
-        frame_counter = 0  # Reset The FPS Counter
-        szTitle = "%d FPS" % (frames_per_second )
-        glutSetWindowTitle ( szTitle )
-        time_of_last_titleupdate = time.clock () * 1000.0
-    frame_counter += 1
-
-oldMousePos = [ 0, 0 ]
-def mouseButton( button, mode, x, y ):
-	"""Callback function (mouse button pressed or released).
-
-	The current and old mouse positions are stored in
-	a	global renderParam and a global list respectively"""
-
-	global leftButton, middleButton, rightButton, oldMousePos
-
-        if button == GLUT_LEFT_BUTTON:
-	    if mode == GLUT_DOWN:
-	        leftButton = True
-            else:
-		leftButton = False
-
-        if button == GLUT_MIDDLE_BUTTON:
-	    if mode == GLUT_DOWN:
-	        middleButton = True
-            else:
-		middleButton = False
-
-        if button == GLUT_RIGHT_BUTTON:
-	    if mode == GLUT_DOWN:
-	        rightButton = True
-            else:
-		rightButton = False
-
-	oldMousePos[0], oldMousePos[1] = x, y
-	glutPostRedisplay( )
-
-def mouseMotion( x, y ):
-	"""Callback function (mouse moved while button is pressed).
-
-	The current and old mouse positions are stored in
-	a	global renderParam and a global list respectively.
-	The global translation vector is updated according to
-	the movement of the mouse pointer."""
-
-	global ts, leftButton, middleButton, rightButton, oldMousePos
-	deltaX = x - oldMousePos[ 0 ]
-	deltaY = y - oldMousePos[ 1 ]
-
-	factor = 0.001
-
-	if leftButton == True:
-             ts.camera.rotateX( - deltaY * factor)
-             ts.camera.rotateY( - deltaX * factor)
-	if middleButton == True:
-	     ts.camera.translateX( deltaX* 2.0 * factor)
-	     ts.camera.translateY( - deltaY* 2.0 * factor)
-	if rightButton == True:
-	     ts.camera.scale += deltaY * factor
-
-	oldMousePos[0], oldMousePos[1] = x, y
-	glutPostRedisplay( )
-
-def keyPressed(*args):
-    global c_tbrightness, c_tdensity
-    # If escape is pressed, kill everything.
-    if args[0] == '\033':
-        print('Closing..')
-        destroy_PBOs()
-        destroy_texture()
-        exit()
-
-    #change the brightness of the scene
-    elif args[0] == ']':
-        c_tbrightness += 0.025
-    elif args[0] == '[':
-        c_tbrightness -= 0.025
-
-    #change the density scale
-    elif args[0] == ';':
-        c_tdensity -= 0.001
-    elif args[0] == '\'':
-        c_tdensity += 0.001 
-
-def idle():
-    glutPostRedisplay()
-
-def display():
-    try:
-        #process left eye
-        process_image()
-        display_image()
-
-        glutSwapBuffers()
-
-    except:
-        from traceback import print_exc
-        print_exc()
-        from os import _exit
-        _exit(0)
-
-def process(eye = True):
-    global ts, pycuda_pbo, eyesep, c_tbrightness, c_tdensity
-
-    ts.get_raycaster().set_opacity(c_tdensity)
-    ts.get_raycaster().set_brightness(c_tbrightness)
-
-    dest_mapping = pycuda_pbo.map()
-    (dev_ptr, size) = dest_mapping.device_ptr_and_size()
-    ts.get_raycaster().surface.device_ptr = dev_ptr
-    ts.update()
-   # ts.get_raycaster().cast()
-    dest_mapping.unmap()
-
-
-def process_image():
-    global output_texture, pbo, width, height
-    """ copy image and process using CUDA """
-    # run the Cuda kernel
-    process()
-    # download texture from PBO
-    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
-    glBindTexture(GL_TEXTURE_2D, output_texture)
-
-    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
-                 width, height, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, None)
-
-def display_image(eye = True):
-    global width, height
-    """ render a screen sized quad """
-    glDisable(GL_DEPTH_TEST)
-    glDisable(GL_LIGHTING)
-    glEnable(GL_TEXTURE_2D)
-    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
-
-    #matix functions should be moved
-    glMatrixMode(GL_PROJECTION)
-    glPushMatrix()
-    glLoadIdentity()
-    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
-    glMatrixMode( GL_MODELVIEW)
-    glLoadIdentity()
-    glViewport(0, 0, width, height)
-
-    glBegin(GL_QUADS)
-    glTexCoord2f(0.0, 0.0)
-    glVertex3f(-1.0, -1.0, 0.5)
-    glTexCoord2f(1.0, 0.0)
-    glVertex3f(1.0, -1.0, 0.5)
-    glTexCoord2f(1.0, 1.0)
-    glVertex3f(1.0, 1.0, 0.5)
-    glTexCoord2f(0.0, 1.0)
-    glVertex3f(-1.0, 1.0, 0.5)
-    glEnd()
-
-    glMatrixMode(GL_PROJECTION)
-    glPopMatrix()
-
-    glDisable(GL_TEXTURE_2D)
-    glBindTexture(GL_TEXTURE_2D, 0)
-    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
-    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
-
-
-#note we may need to init cuda_gl here and pass it to camera
-def main():
-    global window, ts, width, height
-    (width, height) = (1024, 1024)
-
-    glutInit(sys.argv)
-    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH )
-    glutInitWindowSize(width, height)
-    glutInitWindowPosition(0, 0)
-    window = glutCreateWindow("Stereo Volume Rendering")
-
-
-    glutDisplayFunc(display)
-    glutIdleFunc(idle)
-    glutReshapeFunc(resize)
-    glutMouseFunc( mouseButton )
-    glutMotionFunc( mouseMotion )
-    glutKeyboardFunc(keyPressed)
-    init_gl(width, height)
-
-    # create texture for blitting to screen
-    create_texture(width, height)
-
-    import pycuda.gl.autoinit
-    import pycuda.gl
-    cuda_gl = pycuda.gl
-
-    create_PBO(width, height)
-    # ----- Load and Set Volume Data -----
-
-    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
-
-    mi, ma= 21.5, 24.5
-    bins = 5000
-    tf = ColorTransferFunction( (mi, ma), bins)
-    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
-
-    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
-
-    ts.get_raycaster().set_sample_size(0.01)
-    ts.get_raycaster().set_max_samples(5000)
-    ts.update()
-
-    glutMainLoop()
-
-def scale_func(v, mi, ma):
-    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
-
-# Print message to console, and kick off the main to get it rolling.
-if __name__ == "__main__":
-    print("Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda")
-    main()

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 scripts/pyro_queue.py
--- a/scripts/pyro_queue.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from yt.config import ytcfg;ytcfg["yt","__withinreason"]="True"
-import os
-import Pyro4
-import uuid
-
-from yt.mods import *
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    _get_comm
-from yt.gui.reason.pyro_queue import \
-    PyroQueueRoot, \
-    PyroQueueNonRoot
-
-comm = _get_comm(())
-my_rank = comm.comm.rank
-
-if my_rank == 0:
-    my_q = PyroQueueRoot(comm)
-    Pyro4.config.HMAC_KEY = uuid.uuid4().hex
-    key_file = 'reason.key'
-    fd = os.open(key_file, os.O_CREAT, 0600)
-    os.close(fd)
-    out_file = file(key_file, 'w')
-    out_file.write("HMAC KEY: %s\n" % Pyro4.config.HMAC_KEY)
-    out_file.close()
-    mylog.info('See %s for HMAC key.', key_file)
-    Pyro4.Daemon.serveSimple(
-        {my_q: "yt.executor"},
-        ns=False, verbose=True)
-else:
-    my_q = PyroQueueNonRoot(comm)
-    my_q.run()

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,31 +22,19 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
-REASON_FILES = []
-REASON_DIRS = [
+MAPSERVER_FILES = []
+MAPSERVER_DIRS = [
     "",
-    "resources",
-    "resources/ux",
-    "resources/images",
-    "resources/css",
-    "resources/css/images",
-    "app",
-    "app/store",
-    "app/store/widgets",
-    "app/view",
-    "app/view/widgets",
-    "app/model",
-    "app/controller",
-    "app/controller/widgets",
-    "app/templates",
+    "leaflet",
+    "leaflet/images"
 ]
 
-for subdir in REASON_DIRS:
-    dir_name = os.path.join("yt", "gui", "reason", "html", subdir)
+for subdir in MAPSERVER_DIRS:
+    dir_name = os.path.join("yt", "visualization", "mapserver", "html", subdir)
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append((dir_name, files))
+    MAPSERVER_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 REQ_CYTHON = '0.22'
@@ -218,7 +206,7 @@
         license="BSD",
         configuration=configuration,
         zip_safe=False,
-        data_files=REASON_FILES,
+        data_files=MAPSERVER_FILES,
         cmdclass={'build_py': my_build_py, 'build_src': my_build_src},
     )
     return

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -21,18 +21,22 @@
 from yt.utilities.on_demand_imports import _scipy, NotAModule
 
 special = _scipy.special
+tau_factor = None
+_cs = None
+
 
 def voigt_scipy(a, u):
     x = np.asarray(u).astype(np.float64)
     y = np.asarray(a).astype(np.float64)
     return special.wofz(x + 1j * y).real
 
+
 def voigt_old(a, u):
     """
     NAME:
-        VOIGT 
+        VOIGT
     PURPOSE:
-        Implementation of Voigt function 
+        Implementation of Voigt function
     CATEGORY:
             Math
     CALLING SEQUENCE:
@@ -57,9 +61,10 @@
     OUTPUTS:
             An array of the same type as u
     RESTRICTIONS:
-            U must be an array, a should not be. Also this procedure is only valid
-            for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should be most 
-            astrophysical conditions (see the article below for further comments
+            U must be an array, a should not be. Also this procedure is only
+            valid for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should
+            be most astrophysical conditions (see the article below for further
+            comments
     PROCEDURE:
             Follows procedure in Armstrong JQSRT 7, 85 (1967)
             also the same as the intrinsic in the previous version of IDL
@@ -71,17 +76,17 @@
     y = np.asarray(a).astype(np.float64)
 
     # Hummer's Chebyshev Coefficients
-    c = ( 0.1999999999972224, -0.1840000000029998,   0.1558399999965025, 
-         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907, 
-          0.0362157301623914, -0.0208497654398036,   0.0111960116346270, 
-         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2, 
-          0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4, 
-         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5, 
-          0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7, 
-         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8, 
-          0.2997777e-9,       -0.696450e-10,         0.156262e-10, 
-         -0.33897e-11,         0.7116e-12,          -0.1447e-12, 
-          0.285e-13,          -0.55e-14,             0.10e-14,
+    c = (0.1999999999972224, -0.1840000000029998,   0.1558399999965025,
+         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907,
+         0.0362157301623914, -0.0208497654398036,   0.0111960116346270,
+         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2,
+         0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4,
+         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5,
+         0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7,
+         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8,
+         0.2997777e-9,       -0.696450e-10,         0.156262e-10,
+         -0.33897e-11,         0.7116e-12,          -0.1447e-12,
+         0.285e-13,          -0.55e-14,             0.10e-14,
          -0.2e-15)
 
     y2 = y * y
@@ -108,11 +113,11 @@
         x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
         x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
         x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
-        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
-        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
-        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
-        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
-        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
+        x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6 = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4 = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2 = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
+        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 +
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
         dno2[q] = (1. - dno1[q]) / (2. * x[q])
@@ -130,81 +135,89 @@
                 yn = yn * y2
                 g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if np.max(np.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8:
+                    break
 
     k1 = u1 - 1.12837917 * funct
     k1 = k1.astype(np.float64).clip(0)
     return k1
 
-def tau_profile(lamba_0, f_value, gamma, v_doppler, column_density, 
+
+def tau_profile(lambda_0, f_value, gamma, v_doppler, column_density,
                 delta_v=None, delta_lambda=None,
                 lambda_bins=None, n_lambda=12000, dlambda=0.01):
     r"""
-    Create an optical depth vs. wavelength profile for an 
+    Create an optical depth vs. wavelength profile for an
     absorption line using a voigt profile.
 
     Parameters
     ----------
-    
-    lamba_0 : float YTQuantity in length units
+
+    lambda_0 : float in angstroms
        central wavelength.
     f_value : float
        absorption line f-value.
     gamma : float
        absorption line gamma value.
-    v_doppler : float YTQuantity in velocity units
+    v_doppler : float in cm/s
        doppler b-parameter.
-    column_density : float YTQuantity in (length units)^-2
+    column_density : float in cm^-2
        column density.
-    delta_v : float YTQuantity in velocity units
-       velocity offset from lamba_0.
+    delta_v : float in cm/s
+       velocity offset from lambda_0.
        Default: None (no shift).
-    delta_lambda : float YTQuantity in length units
+    delta_lambda : float in angstroms
         wavelength offset.
         Default: None (no shift).
-    lambda_bins : YTArray in length units
-        wavelength array for line deposition.  If None, one will be 
+    lambda_bins : array in angstroms
+        wavelength array for line deposition.  If None, one will be
         created using n_lambda and dlambda.
         Default: None.
     n_lambda : int
         size of lambda bins to create if lambda_bins is None.
         Default: 12000.
-    dlambda : float 
+    dlambda : float in angstroms
         lambda bin width in angstroms if lambda_bins is None.
         Default: 0.01.
-        
+
     """
+    global tau_factor
+    if tau_factor is None:
+        tau_factor = (
+            np.sqrt(np.pi) * charge_proton_cgs ** 2 /
+            (mass_electron_cgs * speed_of_light_cgs)
+        ).in_cgs().d
 
-    ## shift lamba_0 by delta_v
+    global _cs
+    if _cs is None:
+        _cs = speed_of_light_cgs.d[()]
+
+    # shift lambda_0 by delta_v
     if delta_v is not None:
-        lam1 = lamba_0 * (1 + delta_v / speed_of_light_cgs)
+        lam1 = lambda_0 * (1 + delta_v / _cs)
     elif delta_lambda is not None:
-        lam1 = lamba_0 + delta_lambda
+        lam1 = lambda_0 + delta_lambda
     else:
-        lam1 = lamba_0
+        lam1 = lambda_0
 
-    ## conversions
-    nu1 = speed_of_light_cgs / lam1           # line freq in Hz
-    nudop = v_doppler / speed_of_light_cgs * nu1   # doppler width in Hz
+    # conversions
+    nudop = 1e8 * v_doppler / lam1   # doppler width in Hz
 
-    ## create wavelength
+    # create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
             np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2  # wavelength vector (angstroms)
-    nua = (speed_of_light_cgs / lambda_bins)  # frequency vector (Hz)
 
-    ## tau_0
-    tau_X = np.sqrt(np.pi) * charge_proton_cgs**2 / \
-      (mass_electron_cgs * speed_of_light_cgs) * \
-      column_density * f_value / v_doppler
-    tau0 = tau_X * lamba_0
+    # tau_0
+    tau_X = tau_factor * column_density * f_value / v_doppler
+    tau0 = tau_X * lambda_0 * 1e-8
 
     # dimensionless frequency offset in units of doppler freq
-    x = ((nua - nu1) / nudop).in_units("")
-    a = (gamma / (4 * np.pi * nudop)).in_units("s")  # damping parameter 
-    phi = voigt(a, x)                                # line profile
-    tauphi = (tau0 * phi).in_units("")               # profile scaled with tau0
+    x = _cs / v_doppler * (lam1 / lambda_bins - 1.0)
+    a = gamma / (4.0 * np.pi * nudop)               # damping parameter
+    phi = voigt(a, x)                               # line profile
+    tauphi = tau0 * phi              # profile scaled with tau0
 
     return (lambda_bins, tauphi)
 

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -54,7 +54,7 @@
         self.spectrum_line_list = None
         self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
                                    "angstrom")
-        self.bin_width = YTQuantity((lambda_max - lambda_min) / 
+        self.bin_width = YTQuantity((lambda_max - lambda_min) /
                                     float(n_lambda - 1), "angstrom")
         self.line_list = []
         self.continuum_list = []
@@ -66,7 +66,7 @@
 
         Parameters
         ----------
-        
+
         label : string
            label for the line.
         field_name : string
@@ -124,15 +124,15 @@
         input_file : string
            path to input ray data.
         output_file : optional, string
-           path for output file.  File formats are chosen based on the 
-           filename extension.  ``.h5`` for hdf5, ``.fits`` for fits, 
+           path for output file.  File formats are chosen based on the
+           filename extension.  ``.h5`` for hdf5, ``.fits`` for fits,
            and everything else is ASCII.
            Default: "spectrum.h5"
         line_list_file : optional, string
-           path to file in which the list of all deposited lines 
-           will be saved.  If set to None, the line list will not 
-           be saved.  Note, when running in parallel, combining the 
-           line lists can be quite slow, so it is recommended to set 
+           path to file in which the list of all deposited lines
+           will be saved.  If set to None, the line list will not
+           be saved.  Note, when running in parallel, combining the
+           line lists can be quite slow, so it is recommended to set
            this to None when running in parallel unless you really
            want them.
            Default: "lines.txt"
@@ -141,15 +141,15 @@
            Default: True
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
-           absorption lines will be divided.  If set to -1, each 
+           absorption lines will be divided.  If set to -1, each
            absorption line will be deposited by exactly one processor.
-           If njobs is set to a value less than the total number of 
-           available processors (N), then the deposition of an 
+           If njobs is set to a value less than the total number of
+           available processors (N), then the deposition of an
            individual line will be parallelized over (N / njobs)
-           processors.  If set to "auto", it will first try to 
-           parallelize over the list of lines and only parallelize 
+           processors.  If set to "auto", it will first try to
+           parallelize over the list of lines and only parallelize
            the line deposition if there are more processors than
-           lines.  This is the optimal strategy for parallelizing 
+           lines.  This is the optimal strategy for parallelizing
            spectrum generation.
            Default: "auto"
         """
@@ -176,7 +176,7 @@
         if njobs == "auto":
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
-        
+
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     line_list_file is not None, njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
@@ -273,17 +273,26 @@
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)
+
+            # Sanitize units here
+            column_density.convert_to_units("cm ** -2")
+            lbins = self.lambda_bins.d  # Angstroms
+            lambda_0 = line['wavelength'].d  # Angstroms
+            v_doppler = thermal_b.in_cgs().d  # cm / s
+            cdens = column_density.d
+            dlambda = delta_lambda.d  # Angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d
+
             for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
                 my_bin_ratio = spectrum_bin_ratio
+
                 while True:
                     lambda_bins, line_tau = \
                         tau_profile(
-                            line['wavelength'], line['f_value'],
-                            line['gamma'], thermal_b[lixel].in_units("km/s"),
-                            column_density[lixel],
-                            delta_lambda=delta_lambda[lixel],
-                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
-                        
+                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
+                            cdens[lixel], delta_lambda=dlambda[lixel],
+                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+
                     # Widen wavelength window until optical depth reaches a max value at the ends.
                     if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                       (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
@@ -295,16 +304,16 @@
                     right_index[lixel] = (center_bins[lixel] +
                                           my_bin_ratio *
                                           width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                 if save_line_list and line['label_threshold'] is not None and \
-                        column_density[lixel] >= line['label_threshold']:
+                        cdens[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
+                        peculiar_velocity = vlos[lixel]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (line['wavelength'] +
-                                                                   delta_lambda[lixel]),
+                                                    'wavelength': (lambda_0 + dlambda[lixel]),
                                                     'column_density': column_density[lixel],
                                                     'b_thermal': thermal_b[lixel],
                                                     'redshift': field_data['redshift'][lixel],

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -138,7 +138,7 @@
 
             idxs = np.argsort(kT)
 
-            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            kT_bins = np.linspace(kT_min, max(my_kT_max.v, kT_max), num=n_kT+1)
             dkT = kT_bins[1]-kT_bins[0]
             kT_idxs = np.digitize(kT[idxs], kT_bins)
             kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -26,8 +26,7 @@
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
 import numpy as np
-from yt.funcs import \
-    mylog, get_pbar, iterable, ensure_list
+from yt.funcs import mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
@@ -880,21 +879,24 @@
         f = h5py.File(h5file, "r")
 
         parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
-        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
+        if isinstance(f["/area"].value, (string_types, bytes)):
+            parameters["Area"] = f["/area"].value.decode("utf8")
+        else:
+            parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
         parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
-            parameters["RMF"] = f["/rmf"].value
+            parameters["RMF"] = f["/rmf"].value.decode("utf8")
         if "arf" in f:
-            parameters["ARF"] = f["/arf"].value
+            parameters["ARF"] = f["/arf"].value.decode("utf8")
         if "channel_type" in f:
-            parameters["ChannelType"] = f["/channel_type"].value
+            parameters["ChannelType"] = f["/channel_type"].value.decode("utf8")
         if "mission" in f:
-            parameters["Mission"] = f["/mission"].value
+            parameters["Mission"] = f["/mission"].value.decode("utf8")
         if "telescope" in f:
-            parameters["Telescope"] = f["/telescope"].value
+            parameters["Telescope"] = f["/telescope"].value.decode("utf8")
         if "instrument" in f:
-            parameters["Instrument"] = f["/instrument"].value
+            parameters["Instrument"] = f["/instrument"].value.decode("utf8")
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
@@ -924,7 +926,10 @@
         parameters = {}
 
         parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
-        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
+        if isinstance(tblhdu.header["AREA"], (string_types, bytes)):
+            parameters["Area"] = tblhdu.header["AREA"]
+        else:
+            parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
         parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -30,7 +30,6 @@
     loglevel = '20',
     inline = 'False',
     numthreads = '-1',
-    __withinreason = 'False',
     __withintesting = 'False',
     __parallel = 'False',
     __global_parallel_rank = '0',

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -33,6 +33,16 @@
 
 derived_quantity_registry = {}
 
+def get_position_fields(field, data):
+    axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]]
+    if field[0] in data.ds.particle_types:
+        position_fields = [(field[0], 'particle_position_%s' % d)
+                           for d in axis_names]
+    else:
+        position_fields = axis_names
+
+    return position_fields
+
 class RegisteredDerivedQuantity(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -542,19 +552,17 @@
         return rv
 
     def process_chunk(self, data, field):
-        axis_names = data.ds.coordinates.axis_name
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, -HUGE, field)
-        mx = array_like_field(data, -1, axis_names[0])
-        my = array_like_field(data, -1, axis_names[1])
-        mz = array_like_field(data, -1, axis_names[2])
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         maxi = -1
         if data[field].size > 0:
             maxi = np.argmax(data[field])
             ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in (axis_names[0],
-                                                    axis_names[1],
-                                                    axis_names[2])]
+            mx, my, mz = [data[ax][maxi] for ax in position_fields]
         return (ma, maxi, mx, my, mz)
 
     def reduce_intermediate(self, values):
@@ -590,14 +598,15 @@
     def process_chunk(self, data, field):
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         mini = -1
         if data[field].size > 0:
             mini = np.argmin(data[field])
             ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in 'xyz']
+            mx, my, mz = [data[ax][mini] for ax in position_fields]
         return (ma, mini, mx, my, mz)
 
     def reduce_intermediate(self, values):

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -683,6 +683,7 @@
         self.unit_registry.add("code_velocity", 1.0, dimensions.velocity)
         self.unit_registry.add("code_metallicity", 1.0,
                                dimensions.dimensionless)
+        self.unit_registry.add("a", 1.0, dimensions.dimensionless)
 
     def set_units(self):
         """
@@ -700,6 +701,7 @@
                 self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0] /
                                        (1 + self.current_redshift),
                                        length, "\\rm{%s}/(1+z)" % my_unit)
+            self.unit_registry.modify('a', 1/(1+self.current_redshift))
 
         self.set_code_units()
 

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -108,6 +108,14 @@
             raise RuntimeError("units_override is not supported for GadgetDataset. "+
                                "Use unit_base instead.")
         super(GadgetDataset, self).__init__(filename, dataset_type)
+        if self.cosmological_simulation:
+            self.time_unit.convert_to_units('s/h')
+            self.length_unit.convert_to_units('kpccm/h')
+            self.mass_unit.convert_to_units('g/h')
+        else:
+            self.time_unit.convert_to_units('s')
+            self.length_unit.convert_to_units('kpc')
+            self.mass_unit.convert_to_units('Msun')
 
     def _setup_binary_spec(self, spec, spec_dict):
         if isinstance(spec, str):
@@ -218,12 +226,21 @@
         self.length_unit = self.quan(length_unit[0], length_unit[1])
 
         unit_base = self._unit_base or {}
+
+        if self.cosmological_simulation:
+            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
+            # for why we need to include a factor of square root of the
+            # scale factor
+            vel_units = "cm/s * sqrt(a)"
+        else:
+            vel_units = "cm/s"
+
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
         elif "UnitVelocity_in_cm_per_s" in unit_base:
-            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], vel_units)
         else:
-            velocity_unit = (1e5, "cm/s")
+            velocity_unit = (1e5, vel_units)
         velocity_unit = _fix_unit_ordering(velocity_unit)
         self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
 
@@ -238,10 +255,26 @@
                 mass_unit = (unit_base["UnitMass_in_g"], "g/h")
         else:
             # Sane default
-            mass_unit = (1.0, "1e10*Msun/h")
+            mass_unit = (1e10, "Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
         self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
-        self.time_unit = self.length_unit / self.velocity_unit
+        if self.cosmological_simulation:
+            # self.velocity_unit is the unit to rescale on-disk velocities, The
+            # actual internal velocity unit is really in comoving units
+            # since the time unit is derived from the internal velocity unit, we
+            # infer the internal velocity unit here and name it vel_unit
+            #
+            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
+            if 'velocity' in unit_base:
+                vel_unit = unit_base['velocity']
+            elif "UnitVelocity_in_cm_per_s" in unit_base:
+                vel_unit = (unit_base['UnitVelocity_in_cm_per_s'], 'cmcm/s')
+            else:
+                vel_unit = (1, 'kmcm/s')
+            vel_unit = self.quan(*vel_unit)
+        else:
+            vel_unit = self.velocity_unit
+        self.time_unit = self.length_unit / vel_unit
 
     @staticmethod
     def _validate_header(filename):

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -44,6 +44,7 @@
     RAMSESOctreeContainer
 from yt.fields.particle_fields import \
     standard_particle_fields
+from yt.arraytypes import blankRecordArray
 
 class RAMSESDomainFile(object):
     _last_mask = None
@@ -108,7 +109,7 @@
                     print("You are running with the wrong number of fields.")
                     print("If you specified these in the load command, check the array length.")
                     print("In this file there are %s hydro fields." % skipped)
-                    #print "The last set of field sizes was: %s" % skipped
+                    #print"The last set of field sizes was: %s" % skipped
                     raise
                 if hvals['file_ncache'] == 0: continue
                 assert(hvals['file_ilevel'] == level+1)
@@ -464,6 +465,59 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
+    def _initialize_level_stats(self):
+        levels=sum([dom.level_count for dom in self.domains])
+        desc = {'names': ['numcells','level'],
+                'formats':['Int64']*2}
+        max_level=self.dataset.min_level+self.dataset.max_level+2
+        self.level_stats = blankRecordArray(desc, max_level)
+        self.level_stats['level'] = [i for i in range(max_level)]
+        self.level_stats['numcells'] = [0 for i in range(max_level)]
+        for level in range(self.dataset.min_level+1):
+            self.level_stats[level+1]['numcells']=2**(level*self.dataset.dimensionality)
+        for level in range(self.max_level+1):
+            self.level_stats[level+self.dataset.min_level+1]['numcells'] = levels[level]
+
+    def print_stats(self):
+        
+        # This function prints information based on the fluid on the grids,
+        # and therefore does not work for DM only runs. 
+        if not self.fluid_field_list:
+            print("This function is not implemented for DM only runs")
+            return
+
+        self._initialize_level_stats()
+        """
+        Prints out (stdout) relevant information about the simulation
+        """
+        header = "%3s\t%14s\t%14s" % ("level", "# cells","# cells^3")
+        print(header)
+        print("%s" % (len(header.expandtabs())*"-"))
+        for level in range(self.dataset.min_level+self.dataset.max_level+2):
+            print("% 3i\t% 14i\t% 14i" % \
+                  (level,
+                   self.level_stats['numcells'][level],
+                   np.ceil(self.level_stats['numcells'][level]**(1./3))))
+        print("-" * 46)
+        print("   \t% 14i" % (self.level_stats['numcells'].sum()))
+        print("\n")
+
+        dx = self.get_smallest_dx()
+        try:
+            print("z = %0.8f" % (self.dataset.current_redshift))
+        except:
+            pass
+        print("t = %0.8e = %0.8e s = %0.8e years" % \
+            (self.ds.current_time.in_units("code_time"),
+             self.ds.current_time.in_units("s"),
+             self.ds.current_time.in_units("yr")))
+        print("\nSmallest Cell:")
+        u=[]
+        for item in ("Mpc", "pc", "AU", "cm"):
+            print("\tWidth: %0.3e %s" % (dx.in_units(item), item))
+
+
+
 class RAMSESDataset(Dataset):
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -367,9 +367,6 @@
        "__IPYTHON__" in dir(builtins) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
-    elif ytcfg.getboolean("yt", "__withinreason"):
-        from yt.gui.reason.extdirect_repl import ExtProgressBar
-        return ExtProgressBar(title, maxval)
     elif ytcfg.getboolean("yt", "__parallel"):
         return ParallelProgressBar(title, maxval)
     widgets = [ title,

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -1,5 +1,5 @@
 """
-Cartesian fields
+Definitions for cartesian coordinate systems
 
 
 
@@ -17,8 +17,9 @@
 import numpy as np
 from .coordinate_handler import \
     CoordinateHandler, \
-    _unknown_coord, \
-    _get_coord_fields
+    _get_coord_fields, \
+    cartesian_to_cylindrical, \
+    cylindrical_to_cartesian
 import yt.visualization._MPL as _MPL
 
 class CartesianCoordinateHandler(CoordinateHandler):

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -15,23 +15,19 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import abc
 import weakref
 from numbers import Number
 
-from yt.funcs import *
-from yt.fields.field_info_container import \
-    NullFunc, FieldInfoContainer
-from yt.utilities.io_handler import io_registry
-from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.utilities.lib.pixelization_routines import \
-    pixelize_cylinder
-import yt.visualization._MPL as _MPL
+from yt.extern.six import string_types
+from yt.funcs import \
+    validate_width_tuple, \
+    fix_unitary, \
+    iterable
 from yt.units.yt_array import \
     YTArray, YTQuantity
-from yt.extern.six import string_types
+from yt.utilities.exceptions import \
+    YTCoordinateNotImplemented, \
+    YTInvalidWidthError
 
 def _unknown_coord(field, data):
     raise YTCoordinateNotImplemented

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -1,5 +1,5 @@
 """
-Cylindrical fields
+Definitions for cylindrical coordinate systems
 
 
 

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -1,5 +1,5 @@
 """
-Geographic fields
+Definitions for geographic coordinate systems
 
 
 
@@ -19,7 +19,6 @@
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
-import yt.visualization._MPL as _MPL
 from yt.utilities.lib.pixelization_routines import \
     pixelize_cylinder, pixelize_aitoff
 
@@ -197,7 +196,27 @@
         raise NotImplementedError
 
     def convert_to_cartesian(self, coord):
-        raise NotImplementedError
+        if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
+            alt = self.axis_id['altitude']
+            lon = self.axis_id['longitude']
+            lat = self.axis_id['latitude']
+            r = coord[:,alt] + self.ds.surface_height
+            theta = coord[:,lon] * np.pi/180
+            phi = coord[:,lat] * np.pi/180
+            nc = np.zeros_like(coord)
+            # r, theta, phi
+            nc[:,lat] = np.cos(phi) * np.sin(theta)*r
+            nc[:,lon] = np.sin(phi) * np.sin(theta)*r
+            nc[:,alt] = np.cos(theta) * r
+        else:
+            a, b, c = coord
+            theta = b * np.pi/180
+            phi = a * np.pi/180
+            r = self.ds.surface_height + c
+            nc = (np.cos(phi) * np.sin(theta)*r,
+                  np.sin(phi) * np.sin(theta)*r,
+                  np.cos(theta) * r)
+        return nc
 
     def convert_to_cylindrical(self, coord):
         raise NotImplementedError
@@ -274,27 +293,3 @@
                               0.0 * display_center[2]]
             display_center[self.axis_id['latitude']] = c
         return center, display_center
-
-    def convert_to_cartesian(self, coord):
-        if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
-            alt = self.axis_id['altitude']
-            lon = self.axis_id['longitude']
-            lat = self.axis_id['latitude']
-            r = coord[:,alt] + self.ds.surface_height
-            theta = coord[:,lon] * np.pi/180
-            phi = coord[:,lat] * np.pi/180
-            nc = np.zeros_like(coord)
-            # r, theta, phi
-            nc[:,lat] = np.cos(phi) * np.sin(theta)*r
-            nc[:,lon] = np.sin(phi) * np.sin(theta)*r
-            nc[:,alt] = np.cos(theta) * r
-        else:
-            a, b, c = coord
-            theta = b * np.pi/180
-            phi = a * np.pi/180
-            r = self.ds.surface_height + c
-            nc = (np.cos(phi) * np.sin(theta)*r,
-                  np.sin(phi) * np.sin(theta)*r,
-                  np.cos(theta) * r)
-        return nc
-

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -1,5 +1,5 @@
 """
-Polar fields
+Definitions for polar coordinate systems
 
 
 
@@ -14,20 +14,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-from yt.units.yt_array import YTArray
-from .coordinate_handler import \
-    CoordinateHandler, \
-    _unknown_coord, \
-    cylindrical_to_cartesian, \
-    _get_coord_fields
 from .cylindrical_coordinates import CylindricalCoordinateHandler
-import yt.visualization._MPL as _MPL
-from yt.utilities.lib.pixelization_routines import \
-    pixelize_cylinder
+
 
 class PolarCoordinateHandler(CylindricalCoordinateHandler):
 
-  def __init__(self, ds, ordering = ('r', 'theta', 'z')):
+    def __init__(self, ds, ordering = ('r', 'theta', 'z')):
         super(PolarCoordinateHandler, self).__init__(ds, ordering)
         # No need to set labels here

diff -r e7578b39e64500169f987d82461d7bfb13e7d8b9 -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -1,5 +1,5 @@
 """
-Cartesian fields
+Definitions for spectral cube coordinate systems
 
 
 
@@ -14,7 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 from .cartesian_coordinates import \
     CartesianCoordinateHandler
 from .coordinate_handler import \
@@ -55,7 +54,7 @@
         self.axis_field[self.ds.spec_axis] = _spec_axis
 
     def setup_fields(self, registry):
-        if self.ds.no_cgs_equiv_length == False:
+        if self.ds.no_cgs_equiv_length is False:
             return super(SpectralCubeCoordinateHandler, self
                     ).setup_fields(registry)
         for axi, ax in enumerate("xyz"):

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/cef05fa29af3/
Changeset:   cef05fa29af3
Branch:      yt
User:        ngoldbaum
Date:        2015-08-28 17:26:25+00:00
Summary:     Remove unused yt_lodgeit.py script
Affected #:  1 file

diff -r 9f0cdb36e7848b8680fc7b2a6dc54d4dd0bd5223 -r cef05fa29af32c93fb886c32e3dfa13d7f8a7e84 scripts/yt_lodgeit.py
--- a/scripts/yt_lodgeit.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-    LodgeIt!
-    ~~~~~~~~
-
-    A script that pastes stuff into the yt-project pastebin on
-    paste.yt-project.org.
-
-    Modified (very, very slightly) from the original script by the authors
-    below.
-
-    .lodgeitrc / _lodgeitrc
-    -----------------------
-
-    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
-    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
-
-        language=default_language
-        clipboard=true/false
-        open_browser=true/false
-        encoding=fallback_charset
-
-    :authors: 2007-2008 Georg Brandl <georg at python.org>,
-              2006 Armin Ronacher <armin.ronacher at active-4.com>,
-              2006 Matt Good <matt at matt-good.net>,
-              2005 Raphael Slinckx <raphael at slinckx.net>
-"""
-import os
-import sys
-from optparse import OptionParser
-
-
-SCRIPT_NAME = os.path.basename(sys.argv[0])
-VERSION = '0.3'
-SERVICE_URL = 'http://paste.yt-project.org/'
-SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
-                'open_browser']
-
-# global server proxy
-_xmlrpc_service = None
-
-
-def fail(msg, code):
-    """Bail out with an error message."""
-    print >> sys.stderr, 'ERROR: %s' % msg
-    sys.exit(code)
-
-
-def load_default_settings():
-    """Load the defaults from the lodgeitrc file."""
-    settings = {
-        'language':     None,
-        'clipboard':    True,
-        'open_browser': False,
-        'encoding':     'iso-8859-15'
-    }
-    rcfile = None
-    if os.name == 'posix':
-        rcfile = os.path.expanduser('~/.lodgeitrc')
-    elif os.name == 'nt' and 'APPDATA' in os.environ:
-        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
-    if rcfile:
-        try:
-            f = open(rcfile)
-            for line in f:
-                if line.strip()[:1] in '#;':
-                    continue
-                p = line.split('=', 1)
-                if len(p) == 2:
-                    key = p[0].strip().lower()
-                    if key in settings:
-                        if key in ('clipboard', 'open_browser'):
-                            settings[key] = p[1].strip().lower() in \
-                                            ('true', '1', 'on', 'yes')
-                        else:
-                            settings[key] = p[1].strip()
-            f.close()
-        except IOError:
-            pass
-    settings['tags'] = []
-    settings['title'] = None
-    return settings
-
-
-def make_utf8(text, encoding):
-    """Convert a text to UTF-8, brute-force."""
-    try:
-        u = unicode(text, 'utf-8')
-        uenc = 'utf-8'
-    except UnicodeError:
-        try:
-            u = unicode(text, encoding)
-            uenc = 'utf-8'
-        except UnicodeError:
-            u = unicode(text, 'iso-8859-15', 'ignore')
-            uenc = 'iso-8859-15'
-    try:
-        import chardet
-    except ImportError:
-        return u.encode('utf-8')
-    d = chardet.detect(text)
-    if d['encoding'] == uenc:
-        return u.encode('utf-8')
-    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
-
-
-def get_xmlrpc_service():
-    """Create the XMLRPC server proxy and cache it."""
-    global _xmlrpc_service
-    import xmlrpclib
-    if _xmlrpc_service is None:
-        try:
-            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
-                                                    allow_none=True)
-        except Exception, err:
-            fail('Could not connect to Pastebin: %s' % err, -1)
-    return _xmlrpc_service
-
-
-def copy_url(url):
-    """Copy the url into the clipboard."""
-    # try windows first
-    try:
-        import win32clipboard
-    except ImportError:
-        # then give pbcopy a try.  do that before gtk because
-        # gtk might be installed on os x but nobody is interested
-        # in the X11 clipboard there.
-        from subprocess import Popen, PIPE
-        try:
-            client = Popen(['pbcopy'], stdin=PIPE)
-        except OSError:
-            try:
-                import pygtk
-                pygtk.require('2.0')
-                import gtk
-                import gobject
-            except ImportError:
-                return
-            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
-            gobject.idle_add(gtk.main_quit)
-            gtk.main()
-        else:
-            client.stdin.write(url)
-            client.stdin.close()
-            client.wait()
-    else:
-        win32clipboard.OpenClipboard()
-        win32clipboard.EmptyClipboard()
-        win32clipboard.SetClipboardText(url)
-        win32clipboard.CloseClipboard()
-
-
-def open_webbrowser(url):
-    """Open a new browser window."""
-    import webbrowser
-    webbrowser.open(url)
-
-
-def language_exists(language):
-    """Check if a language alias exists."""
-    xmlrpc = get_xmlrpc_service()
-    langs = xmlrpc.pastes.getLanguages()
-    return language in langs
-
-
-def get_mimetype(data, filename):
-    """Try to get MIME type from data."""
-    try:
-        import gnomevfs
-    except ImportError:
-        from mimetypes import guess_type
-        if filename:
-            return guess_type(filename)[0]
-    else:
-        if filename:
-            return gnomevfs.get_mime_type(os.path.abspath(filename))
-        return gnomevfs.get_mime_type_for_data(data)
-
-
-def print_languages():
-    """Print a list of all supported languages, with description."""
-    xmlrpc = get_xmlrpc_service()
-    languages = xmlrpc.pastes.getLanguages().items()
-    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
-    print 'Supported Languages:'
-    for alias, name in languages:
-        print '    %-30s%s' % (alias, name)
-
-
-def download_paste(uid):
-    """Download a paste given by ID."""
-    xmlrpc = get_xmlrpc_service()
-    paste = xmlrpc.pastes.getPaste(uid)
-    if not paste:
-        fail('Paste "%s" does not exist.' % uid, 5)
-    print paste['code'].encode('utf-8')
-
-
-def create_paste(code, language, filename, mimetype, private):
-    """Create a new paste."""
-    xmlrpc = get_xmlrpc_service()
-    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
-                                private)
-    if not rv:
-        fail('Could not create paste. Something went wrong '
-             'on the server side.', 4)
-    return rv
-
-
-def compile_paste(filenames, langopt):
-    """Create a single paste out of zero, one or multiple files."""
-    def read_file(f):
-        try:
-            return f.read()
-        finally:
-            f.close()
-    mime = ''
-    lang = langopt or ''
-    if not filenames:
-        data = read_file(sys.stdin)
-        if not langopt:
-            mime = get_mimetype(data, '') or ''
-        fname = ""
-    elif len(filenames) == 1:
-        fname = filenames[0]
-        data = read_file(open(filenames[0], 'rb'))
-        if not langopt:
-            mime = get_mimetype(data, filenames[0]) or ''
-    else:
-        result = []
-        for fname in filenames:
-            data = read_file(open(fname, 'rb'))
-            if langopt:
-                result.append('### %s [%s]\n\n' % (fname, langopt))
-            else:
-                result.append('### %s\n\n' % fname)
-            result.append(data)
-            result.append('\n\n')
-        data = ''.join(result)
-        lang = 'multi'
-    return data, lang, fname, mime
-
-
-def main():
-    """Main script entry point."""
-
-    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
-             'Read the files and paste their contents to %s.\n'
-             'If no file is given, read from standard input.\n'
-             'If multiple files are given, they are put into a single paste.'
-             % SERVICE_URL)
-    parser = OptionParser(usage=usage)
-
-    settings = load_default_settings()
-
-    parser.add_option('-v', '--version', action='store_true',
-                      help='Print script version')
-    parser.add_option('-L', '--languages', action='store_true', default=False,
-                      help='Retrieve a list of supported languages')
-    parser.add_option('-l', '--language', default=settings['language'],
-                      help='Used syntax highlighter for the file')
-    parser.add_option('-e', '--encoding', default=settings['encoding'],
-                      help='Specify the encoding of a file (default is '
-                           'utf-8 or guessing if available)')
-    parser.add_option('-b', '--open-browser', dest='open_browser',
-                      action='store_true',
-                      default=settings['open_browser'],
-                      help='Open the paste in a web browser')
-    parser.add_option('-p', '--private', action='store_true', default=False,
-                      help='Paste as private')
-    parser.add_option('--no-clipboard', dest='clipboard',
-                      action='store_false',
-                      default=settings['clipboard'],
-                      help="Don't copy the url into the clipboard")
-    parser.add_option('--download', metavar='UID',
-                      help='Download a given paste')
-
-    opts, args = parser.parse_args()
-
-    # special modes of operation:
-    # - paste script version
-    if opts.version:
-        print '%s: version %s' % (SCRIPT_NAME, VERSION)
-        sys.exit()
-    # - print list of languages
-    elif opts.languages:
-        print_languages()
-        sys.exit()
-    # - download Paste
-    elif opts.download:
-        download_paste(opts.download)
-        sys.exit()
-
-    # check language if given
-    if opts.language and not language_exists(opts.language):
-        fail('Language %s is not supported.' % opts.language, 3)
-
-    # load file(s)
-    try:
-        data, language, filename, mimetype = compile_paste(args, opts.language)
-    except Exception, err:
-        fail('Error while reading the file(s): %s' % err, 2)
-    if not data:
-        fail('Aborted, no content to paste.', 4)
-
-    # create paste
-    code = make_utf8(data, opts.encoding)
-    pid = create_paste(code, language, filename, mimetype, opts.private)
-    url = '%sshow/%s/' % (SERVICE_URL, pid)
-    print url
-    if opts.open_browser:
-        open_webbrowser(url)
-    if opts.clipboard:
-        copy_url(url)
-
-
-if __name__ == '__main__':
-    sys.exit(main())


https://bitbucket.org/yt_analysis/yt/commits/332026e6bc3e/
Changeset:   332026e6bc3e
Branch:      yt
User:        ngoldbaum
Date:        2015-08-28 17:26:39+00:00
Summary:     Ensure pr_backport.py is not deployed during installation
Affected #:  2 files

diff -r cef05fa29af32c93fb886c32e3dfa13d7f8a7e84 -r 332026e6bc3e175127a1a5d27adfe367bf0a62ad MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx

diff -r cef05fa29af32c93fb886c32e3dfa13d7f8a7e84 -r 332026e6bc3e175127a1a5d27adfe367bf0a62ad setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@
     config.make_config_py()
     # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
-    config.add_scripts("scripts/*")
+    config.add_scripts("scripts/iyt")
 
     return config
 


https://bitbucket.org/yt_analysis/yt/commits/9257bbdc2f85/
Changeset:   9257bbdc2f85
Branch:      yt
User:        ngoldbaum
Date:        2015-08-28 22:55:38+00:00
Summary:     Add basic screening for PRs that have already been backported
Affected #:  1 file

diff -r 332026e6bc3e175127a1a5d27adfe367bf0a62ad -r 9257bbdc2f85d9a072ae88197cf5c71b13dc3aa5 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -127,7 +127,7 @@
         if pr['merge_commit'] is not None:
             if pr['merge_commit']['hash'] == needle[1][:12]:
                 return pr
-    raise RuntimeError
+    return None
 
 
 def create_commits_to_prs_mapping(linege, prs):
@@ -142,7 +142,10 @@
         cset_hash = commit[1]
         message = commit[5]
         if message.startswith('Merged in') and '(pull request #' in message:
-            commits_to_prs[cset_hash] = find_merge_commit_in_prs(commit, my_prs)
+            pr = find_merge_commit_in_prs(commit, my_prs)
+            if pr is None:
+                continue
+            commits_to_prs[cset_hash] = pr
             # Since we know this PR won't have another commit associated with it,
             # remove from global list to reduce number of network accesses
             my_prs.remove(commits_to_prs[cset_hash])
@@ -181,8 +184,11 @@
 
 def get_no_pr_commits(repo_path, inv_map):
     """"get a list of commits that aren't in any pull request"""
-    no_pr_commits = inv_map[None]
-    del inv_map[None]
+    try:
+        no_pr_commits = inv_map[None]
+        del inv_map[None]
+    except KeyError:
+        no_pr_commits = []
     with hglib.open(repo_path) as client:
         # remove merge commits since they can't be grafted
         no_pr_commits = [com for com in no_pr_commits if
@@ -190,6 +196,35 @@
     return no_pr_commits
 
 
+def screen_already_backported(repo_path, inv_map, no_pr_commits):
+    with hglib.open(repo_path) as client:
+        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_tag_name)
+        for commit in no_pr_commits:
+            lineage.remove(commit)
+        prs_to_screen = []
+        for pr in inv_map:
+            for commit in lineage:
+                if commit[5].startswith('Backporting PR #%s' % pr[0]):
+                    prs_to_screen.append(pr)
+        for pr in prs_to_screen:
+            del inv_map[pr]
+        return inv_map, no_pr_commits
+
+def commit_already_on_stable(repo_path, commit):
+    with hglib.open(repo_path) as client:
+        commit_info = client.log(commit)[0]
+        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_tag_name)
+        # if there is a stable commit with the same commit message,
+        # it's been grafted
+        if any([commit_info[5] == c[5] for c in lineage]):
+            return True
+        return False
+
+
 def backport_no_pr_commits(repo_path, no_pr_commits):
     """backports commits that aren't in a pull request"""
     for commit in no_pr_commits:
@@ -205,7 +240,6 @@
         raw_input('Press any key to continue')
         print ""
 
-
 def backport_pr_commits(repo_path, inv_map, last_stable, prs):
     """backports pull requests to the stable branch.
 
@@ -215,8 +249,6 @@
     pr_list = inv_map.keys()
     pr_list = sorted(pr_list, key=lambda x: x[2])
     for pr_desc in pr_list:
-        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
-        print "To backport, issue the following command(s):\n"
         pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
         data = requests.get(pr['links']['commits']['href']).json()
         commits = data['values']
@@ -234,7 +266,11 @@
                 (revset, message, dest)
             message += "hg update stable\n"
         else:
+            if commit_already_on_stable(repo_path, commits[0]) is True:
+                continue
             message = "hg graft %s\n" % commits[0]
+        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+        print "To backport, issue the following command(s):\n"
         print message
         raw_input('Press any key to continue')
 
@@ -255,6 +291,8 @@
         commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
         inv_map = invert_commits_to_prs_mapping(commits_to_prs)
         no_pr_commits = get_no_pr_commits(repo_path, inv_map)
+        inv_map, no_pr_commits = \
+            screen_already_backported(repo_path, inv_map, no_pr_commits)
         print "In another terminal window, navigate to the following path:"
         print "%s" % repo_path
         raw_input("Press any key to continue")


https://bitbucket.org/yt_analysis/yt/commits/34c12c260c5b/
Changeset:   34c12c260c5b
Branch:      yt
User:        ngoldbaum
Date:        2015-08-31 17:55:35+00:00
Summary:     Warn when rebase contains merge commits, since these can pull in unwanted changes
Affected #:  1 file

diff -r 9257bbdc2f85d9a072ae88197cf5c71b13dc3aa5 -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -240,6 +240,7 @@
         raw_input('Press any key to continue')
         print ""
 
+
 def backport_pr_commits(repo_path, inv_map, last_stable, prs):
     """backports pull requests to the stable branch.
 
@@ -249,6 +250,8 @@
     pr_list = inv_map.keys()
     pr_list = sorted(pr_list, key=lambda x: x[2])
     for pr_desc in pr_list:
+        merge_warn = False
+        merge_commits = []
         pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
         data = requests.get(pr['links']['commits']['href']).json()
         commits = data['values']
@@ -256,15 +259,31 @@
             data = requests.get(data['next']).json()
             commits.extend(data['values'])
         commits = [com['hash'][:12] for com in commits]
+        with hglib.open(repo_path) as client:
+            for com in commits:
+                if client.log('merge() and %s' % com) != []:
+                    merge_warn = True
+                    merge_commits.append(com)
         if len(commits) > 1:
-            revset = commits[-1] + '::' + commits[0]
+            revset = " | ".join(commits)
+            revset = '"%s"' % revset
             message = "Backporting PR #%s %s" % \
                 (pr['id'], pr['links']['html']['href'])
             dest = get_last_descendant(repo_path, last_stable)
             message = \
                 "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
                 (revset, message, dest)
-            message += "hg update stable\n"
+            message += "hg update stable\n\n"
+            if merge_warn is True:
+                if len(merge_commits) > 1:
+                    merge_commits = ", ".join(merge_commits)
+                else:
+                    merge_commits = merge_commits[0]
+                message += \
+                    "WARNING, PULL REQUEST CONTAINS MERGE COMMITS, CONSIDER\n" \
+                    "BACKPORTING BY HAND TO AVOID BACKPORTING UNWANTED CHANGES\n"
+                message += \
+                    "Merge commits are %s\n\n" % merge_commits
         else:
             if commit_already_on_stable(repo_path, commits[0]) is True:
                 continue


https://bitbucket.org/yt_analysis/yt/commits/58d619fa41d9/
Changeset:   58d619fa41d9
Branch:      yt
User:        ngoldbaum
Date:        2015-09-21 20:15:17+00:00
Summary:     Merging with mainline
Affected #:  59 files

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,11 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
 exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
@@ -13,5 +13,3 @@
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
-prune tests
-exclude clean.sh .hgchurn

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -111,7 +111,7 @@
 
 .. code-block:: python
 
-    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    @yt.particle_filter(requires=["particle_type"], filtered_type='all')
     def stars(pfilter, data):
         filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
+  "signature": "sha256:6a06d5720eb6316ac0d322ef0898ec20f33d65ea3eeeacef35ae1d869af12607"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -352,7 +352,7 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
+      "Round-Trip Conversions to and from Other Unit Systems"
      ]
     },
     {
@@ -503,6 +503,58 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also do the same thing with unitful quantities from the [Pint package](http://pint.readthedocs.org), using essentially the same procedure:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from pint import UnitRegistry\n",
+      "ureg = UnitRegistry()\n",
+      "v = 1000.*ureg.km/ureg.s\n",
+      "w = yt.YTQuantity.from_pint(v)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print v, type(v)\n",
+      "print w, type(w)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ptemp = temp.to_pint()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print ptemp, type(ptemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -84,6 +84,9 @@
 * :func:`~yt.testing.assert_equal` can operate on arrays.
 * :func:`~yt.testing.assert_almost_equal` can operate on arrays and accepts a
   relative allowable difference.
+* :func:`~yt.testing.assert_allclose_units` raises an error if two arrays are
+  not equal up to a desired absolute or relative tolerance. This wraps numpy's
+  assert_allclose to correctly verify unit consistency as well.
 * :func:`~yt.testing.amrspace` provides the ability to create AMR grid
   structures.
 * :func:`~yt.testing.expand_keywords` provides the ability to iterate over
@@ -99,9 +102,10 @@
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a set of values of the form
-   ``function``, ``arguments``.  For example ``yield assert_equal, 1.0, 1.0``
-   would evaluate that 1.0 equaled 1.0.
+   accept no arguments.  These should ``yield`` a tuple of the form
+   ``function``, ``argument_one``, ``argument_two``, etc.  For example
+   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
+   asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -113,6 +117,53 @@
 ``yt/data_objects/tests/test_covering_grid.py``, which covers a great deal of
 functionality.
 
+Debugging failing tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When writing new tests, often one exposes bugs or writes a test incorrectly,
+causing an exception to be raised or a failed test. To help debug issues like
+this, ``nose`` can drop into a debugger whenever a test fails or raises an
+exception. This can be accomplished by passing ``--pdb`` and ``--pdb-failures``
+to the ``nosetests`` executable. These options will drop into the pdb debugger
+whenever an error is raised or a failure happens, respectively. Inside the
+debugger you can interactively print out variables and go up and down the call
+stack to determine the context for your failure or error.
+
+.. code-block:: bash
+
+    nosetests --pdb --pdb-failures
+
+In addition, one can debug more crudely using print statements. To do this,
+you can add print statements to the code as normal. However, the test runner
+will capture all print output by default. To ensure that output gets printed
+to your terminal while the tests are running, pass ``-s`` to the ``nosetests``
+executable.
+
+Lastly, to quickly debug a specific failing test, it is best to only run that
+one test during your testing session. This can be accomplished by explicitly
+passing the name of the test function or class to ``nosetests``, as in the
+following example:
+
+.. code-block:: bash
+
+    $ nosetests yt.visualization.tests.test_plotwindow:TestSetWidth
+
+This nosetests invocation will only run the tests defined by the
+``TestSetWidth`` class.
+
+Finally, to determine which test is failing while the tests are running, it helps
+to run the tests in "verbose" mode. This can be done by passing the ``-v`` option
+to the ``nosetests`` executable.
+
+All of the above ``nosetests`` options can be combined. So, for example to run
+the ``TestSetWidth`` tests with verbose output, letting the output of print
+statements come out on the terminal prompt, and enabling pdb debugging on errors
+or test failures, one would do:
+
+.. code-block:: bash
+
+    $ nosetests --pdb --pdb-failures -v -s yt.visualization.tests.test_plotwindow:TestSetWidth
+
 .. _answer_testing:
 
 Answer Testing
@@ -122,8 +173,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 Answer tests test **actual data**, and many operations on that data, to make
-sure that answers don't drift over time.  This is how we will be testing
-frontends, as opposed to operations, in yt.
+sure that answers don't drift over time.  This is how we test frontends, as
+opposed to operations, in yt.
 
 .. _run_answer_testing:
 
@@ -133,20 +184,104 @@
 The very first step is to make a directory and copy over the data against which
 you want to test.  Currently, we test:
 
+NMSU ART
+~~~~~~~~
+
+* ``D9p_500/10MpcBox_HartGal_csf_a0.500.d``
+
+ARTIO
+~~~~~
+
+* ``sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art``
+
+Athena
+~~~~~~
+
+* ``ShockCloud/id0/Cloud.0050.vtk``
+* ``MHDBlast/id0/Blast.0100.vtk``
+* ``RamPressureStripping/id0/rps.0062.vtk``
+* ``MHDSloshing/virgo_low_res.0054.vtk``
+
+Boxlib
+~~~~~~
+
+* ``RadAdvect/plt00000``
+* ``RadTube/plt00500``
+* ``StarParticles/plrd01000``
+
+Chombo
+~~~~~~
+
+* ``TurbBoxLowRes/data.0005.3d.hdf5``
+* ``GaussianCloud/data.0077.3d.hdf5``
+* ``IsothermalSphere/data.0000.3d.hdf5``
+* ``ZeldovichPancake/plt32.2d.hdf5``
+* ``KelvinHelmholtz/data.0004.hdf5``
+
+Enzo
+~~~~
+
 * ``DD0010/moving7_0010`` (available in ``tests/`` in the yt distribution)
 * ``IsolatedGalaxy/galaxy0030/galaxy0030``
+* ``enzo_tiny_cosmology/DD0046/DD0046``
+* ``enzo_cosmology_pluts/DD0046/DD0046``
+
+FITS
+~~~~
+
+* ``radio_fits/grs-50-cube.fits``
+* ``UnigridData/velocity_field_20.fits``
+
+FLASH
+~~~~~
+
 * ``WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030``
 * ``GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300``
-* ``TurbBoxLowRes/data.0005.3d.hdf5``
-* ``GaussianCloud/data.0077.3d.hdf5``
-* ``RadAdvect/plt00000``
-* ``RadTube/plt00500``
+
+Gadget
+~~~~~~
+
+* ``IsothermalCollapse/snap_505``
+* ``IsothermalCollapse/snap_505.hdf5``
+* ``GadgetDiskGalaxy/snapshot_200.hdf5``
+
+Halo Catalog
+~~~~~~~~~~~~
+
+* ``owls_fof_halos/groups_001/group_001.0.hdf5``
+* ``owls_fof_halos/groups_008/group_008.0.hdf5``
+* ``gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5``
+* ``gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5``
+* ``rockstar_halos/halos_0.0.bin``
+
+MOAB
+~~~~
+
+* ``c5/c5.h5m``
+
+
+RAMSES
+~~~~~~
+
+* ``output_00080/info_00080.txt``
+
+Tipsy
+~~~~~
+
+* ``halo1e11_run1.00400/halo1e11_run1.00400``
+* ``agora_1e11.00400/agora_1e11.00400``
+* ``TipsyGalaxy/galaxy.00300``
+
+OWLS
+~~~~
+
+* ``snapshot_033/snap_033.0.hdf5``
 
 These datasets are available at http://yt-project.org/data/.
 
 Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
 with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to compare.  Here is an example
+directory with the test data you want to test with.  Here is an example
 config file:
 
 .. code-block:: none
@@ -154,47 +289,45 @@
    [yt]
    test_data_dir = /Users/tomservo/src/yt-data
 
-More data will be added over time.  To run the tests, you can import the yt
-module and invoke ``yt.run_nose()`` with a new keyword argument:
+More data will be added over time.  To run the answer tests, you must first
+generate a set of test answers locally on a "known good" revision, then update
+to the revision you want to test, and run the tests again using the locally
+stored answers.
 
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True)
-
-If you have installed yt using ``python setup.py develop`` you can also
-optionally invoke nose using the ``nosetests`` command line interface:
+Let's focus on running the answer tests for a single frontend. It's possible to
+run the answer tests for **all** the frontends, but due to the large number of
+test datasets we currently use this is not normally done except on the yt
+project's contiguous integration server.
 
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
 
-In either case, the current gold standard results will be downloaded from the
-rackspace cloud and compared to what is generated locally.  The results from a
-nose testing session are pretty straightforward to understand, the results for
-each test are printed directly to STDOUT. If a test passes, nose prints a
-period, F if a test fails, and E if the test encounters an exception or errors
-out for some reason.  If you want to also run tests for the 'big' datasets,
-then you can use the ``answer_big_data`` keyword argument:
-
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True, answer_big_data=True)
-
-or, in the base directory of the yt mercurial repository:
+This command will create a set of local answers from the tipsy frontend tests
+and store them in ``$HOME/Documents/test`` (this can but does not have to be the
+same directory as the ``test_data_dir`` configuration variable defined in your
+``.yt/config`` file). To run the tipsy frontend's answer tests using a different
+yt changeset, update to that changeset, recompile if necessary, and run the
+tests using the following command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --answer-big-data
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
 
-It's also possible to only run the answer tests for one frontend.  For example,
-to run only the enzo answers tests, one can do,
+The results from a nose testing session are pretty straightforward to
+understand, the results for each test are printed directly to STDOUT.  If a test
+passes, nose prints a period, F if a test fails, and E if the test encounters an
+exception or errors out for some reason.  Explicit descriptions for each test
+are also printed if you pass ``-v`` to the ``nosetests`` executable.  If you
+want to also run tests for the 'big' datasets, then you will need to pass
+``--answer-big-data`` to ``nosetests``.  For example, to run the tests for the
+OWLS frontend, do the following:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing yt.frontends.enzo
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+
 
 How to Write Answer Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,38 +393,21 @@
   directory.
 
 * Create a new routine that operates similarly to the routines you can see
-  in Enzo's outputs.
+  in Enzo's output tests.
 
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(file_name)``  This decorate can accept the argument
-    ``big_data`` for if this data is too big to run all the time.
+    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive.
 
-  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that
-    you can yield from to execute a bunch of standard tests.  This is where
-    you should start, and then yield additional tests that stress the
-    outputs in whatever ways are necessary to ensure functionality.
+  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
+    yield from to execute a bunch of standard tests. In addition we have created
+    ``sph_answer`` which is more suited for particle SPH datasets. This is where
+    you should start, and then yield additional tests that stress the outputs in
+    whatever ways are necessary to ensure functionality.
 
   * **All tests should be yielded!**
 
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
-
-How to Upload Answers
-^^^^^^^^^^^^^^^^^^^^^
-
-To upload answers you can execute this command:
-
-.. code-block:: bash
-
-   $ nosetests --with-answer-testing frontends/enzo/ --answer-store --answer-name=whatever
-
-The current version of the gold standard can be found in the variable
-``_latest`` inside ``yt/utilities/answer_testing/framework.py``  As of
-the time of this writing, it is ``gold007``  Note that the name of the
-suite of results is now disconnected from the dataset's name, so you
-can upload multiple outputs with the same name and not collide.
-
-To upload answers, you **must** have the package boto installed, and you
-**must** have an Amazon key provided by Matt.  Contact Matt for these keys.

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1257,8 +1257,8 @@
 
 .. _specifying-cosmology-tipsy:
 
-Specifying Tipsy Cosmological Parameters
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Specifying Tipsy Cosmological Parameters and Setting Default Units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
 default units.  The parameters recognized are of this form:
@@ -1270,5 +1270,27 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-These will be used set the units, if they are specified.
+If you wish to set the default units directly, you can do so by using the
+``unit_base`` keyword in the load statement.
 
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+
+
+Loading Cosmological Simulations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are not using a parameter file (i.e. non-Gasoline users), then you must
+use keyword ``cosmology_parameters`` when loading your data set to indicate to
+yt that it is a cosmological data set. If you do not wish to set any
+non-default cosmological parameters, you may pass an empty dictionary.
+
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, cosmology_parameters={})
+
+
+

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -32,18 +32,6 @@
     match, at which point it returns an instance of the appropriate
     :class:`yt.data_objects.api.Dataset` subclass.
     """
-    if len(args) == 0:
-        try:
-            from yt.extern.six.moves import tkinter
-            import tkinter, tkFileDialog
-        except ImportError:
-            raise YTOutputNotIdentified(args, kwargs)
-        root = tkinter.Tk()
-        filename = tkFileDialog.askopenfilename(parent=root,title='Choose a file')
-        if filename != None:
-            return load(filename)
-        else:
-            raise YTOutputNotIdentified(args, kwargs)
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, str)
             else arg for arg in args]
@@ -100,32 +88,6 @@
         mylog.error("    Possible: %s", c)
     raise YTOutputNotIdentified(args, kwargs)
 
-def projload(ds, axis, weight_field = None):
-    # This is something of a hack, so that we can just get back a projection
-    # and not utilize any of the intermediate index objects.
-    class ProjMock(dict):
-        pass
-    import h5py
-    f = h5py.File(os.path.join(ds.fullpath, ds.parameter_filename + ".yt"))
-    b = f["/Projections/%s/" % (axis)]
-    wf = "weight_field_%s" % weight_field
-    if wf not in b: raise KeyError(wf)
-    fields = []
-    for k in b:
-        if k.startswith("weight_field"): continue
-        if k.endswith("_%s" % weight_field):
-            fields.append(k)
-    proj = ProjMock()
-    for f in ["px","py","pdx","pdy"]:
-        proj[f] = b[f][:]
-    for f in fields:
-        new_name = f[:-(len(weight_field) + 1)]
-        proj[new_name] = b[f][:]
-    proj.axis = axis
-    proj.ds = ds
-    f.close()
-    return proj
-
 def simulation(parameter_filename, simulation_type, find_outputs=False):
     """
     Loads a simulation time series object of the specified

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -136,6 +136,8 @@
         # that dx=dy=dz, at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if self.Parent is not None:
+            if not hasattr(self.Parent, 'dds'):
+                self.Parent._setup_dx()
             self.dds = self.Parent.dds.ndarray_view() / self.ds.refine_by
         else:
             LE, RE = self.index.grid_left_edge[id,:], \

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -159,7 +159,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         method : string
             This is the "method name" which will be looked up in the
             `particle_deposit` namespace as `methodname_deposit`.  Current
@@ -212,7 +212,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         index_fields : list of arrays
             All of the fields defined on the mesh that may be used as input to
             the operation.
@@ -265,11 +265,14 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_octree(self.oct_handler, mdom_ind, positions, 
+        # Pointer operations within 'process_octree' require arrays to be
+        # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079
+        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
+        op.process_octree(self.oct_handler, mdom_ind, positions,
             self.fcoords, fields,
             self.domain_id, self._domain_offset, self.ds.periodicity,
             index_fields, particle_octree, pdom_ind, self.ds.geometry)
-        # If there are 0s in the smoothing field this will not throw an error, 
+        # If there are 0s in the smoothing field this will not throw an error,
         # but silently return nans for vals where dividing by 0
         # Same as what is currently occurring, but suppressing the div by zero
         # error.
@@ -342,7 +345,7 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_particles(particle_octree, pdom_ind, positions, 
+        op.process_particles(particle_octree, pdom_ind, positions,
             fields, self.domain_id, self._domain_offset, self.ds.periodicity,
             self.ds.geometry)
         vals = op.finalize()
@@ -494,7 +497,7 @@
         LE -= np.abs(LE) * eps
         RE = self.max(axis=0)
         RE += np.abs(RE) * eps
-        octree = ParticleOctreeContainer(dims, LE, RE, 
+        octree = ParticleOctreeContainer(dims, LE, RE,
             over_refine = over_refine_factor)
         octree.n_ref = n_ref
         octree.add(mi)

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -691,8 +691,7 @@
 
         """
         from yt.units.dimensions import length
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, so add cosmological units.
             self.unit_registry.modify("h", self.hubble_constant)
             # Comoving lengths
@@ -705,16 +704,15 @@
 
         self.set_code_units()
 
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, add a cosmology object
-            setattr(self, "cosmology",
+            self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
                               omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry))
-            setattr(self, "critical_density",
-                    self.cosmology.critical_density(self.current_redshift))
+                              unit_registry=self.unit_registry)
+            self.critical_density = \
+                    self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)
 
     def get_unit_from_registry(self, unit_str):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -156,7 +156,7 @@
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
-            if isinstance(o, str):
+            if isinstance(o, string_types):
                 ds = load(o, **self.kwargs)
                 self._setup_function(ds)
                 yield ds
@@ -170,7 +170,7 @@
             # This will return a sliced up object!
             return DatasetSeries(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
-        if isinstance(o, str):
+        if isinstance(o, string_types):
             o = load(o, **self.kwargs)
             self._setup_function(o)
         return o
@@ -248,13 +248,31 @@
 
         """
         dynamic = False
-        if self.parallel == False:
+        if self.parallel is False:
             njobs = 1
         else:
-            if self.parallel == True: njobs = -1
-            else: njobs = self.parallel
-        return parallel_objects(self, njobs=njobs, storage=storage,
-                                dynamic=dynamic)
+            if self.parallel is True:
+                njobs = -1
+            else:
+                njobs = self.parallel
+
+        for output in parallel_objects(self._pre_outputs, njobs=njobs,
+                                       storage=storage, dynamic=dynamic):
+            if storage is not None:
+                sto, output = output
+
+            if isinstance(output, string_types):
+                ds = load(output, **self.kwargs)
+                self._setup_function(ds)
+            else:
+                ds = output
+
+            if storage is not None:
+                next_ret = (sto, ds)
+            else:
+                next_ret = ds
+
+            yield next_ret
 
     def eval(self, tasks, obj=None):
         tasks = ensure_list(tasks)
@@ -323,13 +341,13 @@
 
         """
         
-        if isinstance(filenames, str):
+        if isinstance(filenames, string_types):
             filenames = get_filenames_from_glob_pattern(filenames)
 
         # This will crash with a less informative error if filenames is not
         # iterable, but the plural keyword should give users a clue...
         for fn in filenames:
-            if not isinstance(fn, str):
+            if not isinstance(fn, string_types):
                 raise YTOutputNotIdentified("DataSeries accepts a list of "
                                             "strings, but "
                                             "received {0}".format(fn))

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -15,12 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import types
 import numpy as np
-import inspect
-import copy
-
-from yt.units.yt_array import YTArray
 
 from .derived_field import \
     ValidateParameter
@@ -29,8 +24,8 @@
     register_field_plugin
 
 from .vector_operations import \
-     create_magnitude_field
-    
+    create_magnitude_field
+
 from yt.utilities.lib.geometry_utils import \
     obtain_rvec, obtain_rv_vec
 
@@ -78,7 +73,7 @@
 
     create_magnitude_field(registry, "specific_angular_momentum",
                            "cm**2 / s", ftype=ftype)
-    
+
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
@@ -105,4 +100,3 @@
 
     create_magnitude_field(registry, "angular_momentum",
                            "g * cm**2 / s", ftype=ftype)
-                           

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -16,8 +16,7 @@
 import numpy as np
 
 from .derived_field import \
-    ValidateParameter, \
-    ValidateSpatial
+    ValidateParameter
 from .field_exceptions import \
     NeedsParameter
 from .field_plugin_registry import \
@@ -30,7 +29,7 @@
     clight, \
     kboltz, \
     G
-    
+
 @register_field_plugin
 def setup_astro_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -45,7 +44,7 @@
         div_fac = 2.0
     else:
         sl_left, sl_right, div_fac = slice_info
-    
+
     def _dynamical_time(field, data):
         """
         sqrt(3 pi / (16 G rho))
@@ -71,7 +70,7 @@
 
     def _chandra_emissivity(field, data):
         logT0 = np.log10(data[ftype, "temperature"].to_ndarray().astype(np.float64)) - 7
-        # we get rid of the units here since this is a fit and not an 
+        # we get rid of the units here since this is a fit and not an
         # analytical expression
         return data.ds.arr(data[ftype, "number_density"].to_ndarray().astype(np.float64)**2
                            * (10**(- 0.0103 * logT0**8 + 0.0417 * logT0**7
@@ -91,7 +90,7 @@
     registry.add_field((ftype, "chandra_emissivity"),
                        function=_chandra_emissivity,
                        units="") # add correct units here
-    
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2
@@ -110,7 +109,7 @@
     registry.add_field((ftype,"mazzotta_weighting"),
                        function=_mazzotta_weighting,
                        units="keV**-0.25*cm**-6")
-    
+
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/astro_simulations.py
--- a/yt/fields/astro_simulations.py
+++ b/yt/fields/astro_simulations.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 from .domain_context import DomainContext
 
 # Here's how this all works:

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -14,21 +14,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
+from .derived_field import \
+    ValidateParameter
+from .field_exceptions import \
+    NeedsConfiguration, \
+    NeedsParameter
+from .field_plugin_registry import \
+    register_field_plugin
 
-from .derived_field import \
-     ValidateParameter
-from .field_exceptions import \
-     NeedsConfiguration, \
-     NeedsParameter
-from .field_plugin_registry import \
-     register_field_plugin
+from yt.utilities.physical_constants import \
+    speed_of_light_cgs
 
-from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.physical_constants import \
-     speed_of_light_cgs
-    
 @register_field_plugin
 def setup_cosmology_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -49,7 +45,7 @@
           data[ftype, "dark_matter_density"]
 
     registry.add_field((ftype, "matter_density"),
-                       function=_matter_density, 
+                       function=_matter_density,
                        units="g/cm**3")
 
     def _matter_mass(field, data):
@@ -67,7 +63,7 @@
         co = data.ds.cosmology
         return data[ftype, "matter_density"] / \
           co.critical_density(data.ds.current_redshift)
-    
+
     registry.add_field((ftype, "overdensity"),
                        function=_overdensity,
                        units="")
@@ -116,7 +112,7 @@
                        function=_virial_radius_fraction,
                        validators=[ValidateParameter("virial_radius")],
                        units="")
-    
+
     # Weak lensing convergence.
     # Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
     # This needs to be checked for accuracy.
@@ -127,7 +123,7 @@
         co = data.ds.cosmology
         observer_redshift = data.get_field_parameter('observer_redshift')
         source_redshift = data.get_field_parameter('source_redshift')
-        
+
         # observer to lens
         dl = co.angular_diameter_distance(observer_redshift, data.ds.current_redshift)
         # observer to source
@@ -135,11 +131,11 @@
         # lens to source
         dls = co.angular_diameter_distance(data.ds.current_redshift, source_redshift)
 
-        # removed the factor of 1 / a to account for the fact that we are projecting 
+        # removed the factor of 1 / a to account for the fact that we are projecting
         # with a proper distance.
         return (1.5 * (co.hubble_constant / speed_of_light_cgs)**2 * (dl * dls / ds) * \
           data[ftype, "matter_overdensity"]).in_units("1/cm")
-       
+
     registry.add_field((ftype, "weak_lensing_convergence"),
                        function=_weak_lensing_convergence,
                        units="1/cm",

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -16,10 +16,7 @@
 
 from yt.funcs import \
     ensure_list
-from yt.units.yt_array import \
-    YTArray
 from .field_exceptions import \
-    ValidationException, \
     NeedsGridType, \
     NeedsOriginalGrid, \
     NeedsDataField, \
@@ -30,15 +27,9 @@
     FieldDetector
 from yt.units.unit_object import \
     Unit
+from yt.utilities.exceptions import \
+    YTFieldNotFound
 
-def derived_field(**kwargs):
-    def inner_decorator(function):
-        if 'name' not in kwargs:
-            kwargs['name'] = function.__name__
-        kwargs['function'] = function
-        add_field(**kwargs)
-        return function
-    return inner_decorator
 
 def TranslationFunc(field_name):
     def _TranslationFunc(field, data):
@@ -48,7 +39,7 @@
 
 def NullFunc(field, data):
     raise YTFieldNotFound(field.name)
- 
+
 class DerivedField(object):
     """
     This is the base class used to describe a cell-by-cell derived field.
@@ -178,7 +169,7 @@
 
     def __call__(self, data):
         """ Return the value of the field in a given *data* object. """
-        ii = self.check_available(data)
+        self.check_available(data)
         original_fields = data.keys() # Copy
         if self._function is NullFunc:
             raise RuntimeError(

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/domain_context.py
--- a/yt/fields/domain_context.py
+++ b/yt/fields/domain_context.py
@@ -14,8 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 domain_context_registry = {}
 
 class DomainContext(object):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -15,16 +15,9 @@
 
 import numpy as np
 from collections import defaultdict
-from yt.units.unit_object import Unit
 from yt.units.yt_array import YTArray
 from .field_exceptions import \
-    ValidationException, \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter, \
-    FieldUnitsError
+    NeedsGridType
 
 class FieldDetector(defaultdict):
     Level = 1
@@ -87,27 +80,18 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.ds, "field_info"):
-            if not isinstance(item, tuple):
-                field = ("unknown", item)
-                finfo = self.ds._get_field_info(*field)
-                #mylog.debug("Guessing field %s is %s", item, finfo.name)
-            else:
-                field = item
-            finfo = self.ds._get_field_info(*field)
-            # For those cases where we are guessing the field type, we will
-            # need to re-update -- otherwise, our item will always not have the
-            # field type.  This can lead to, for instance, "unknown" particle
-            # types not getting correctly identified.
-            # Note that the *only* way this works is if we also fix our field
-            # dependencies during checking.  Bug #627 talks about this.
-            item = self.ds._last_freq
+        if not isinstance(item, tuple):
+            field = ("unknown", item)
         else:
-            FI = getattr(self.ds, "field_info", FieldInfo)
-            if item in FI:
-                finfo = FI[item]
-            else:
-                finfo = None
+            field = item
+        finfo = self.ds._get_field_info(*field)
+        # For those cases where we are guessing the field type, we will
+        # need to re-update -- otherwise, our item will always not have the
+        # field type.  This can lead to, for instance, "unknown" particle
+        # types not getting correctly identified.
+        # Note that the *only* way this works is if we also fix our field
+        # dependencies during checking.  Bug #627 talks about this.
+        item = self.ds._last_freq
         if finfo is not None and finfo._function.__name__ != 'NullFunc':
             try:
                 vv = finfo(self)
@@ -171,10 +155,7 @@
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
-        if hasattr(self.ds, "field_info"):
-            finfo = self.ds._get_field_info(*field_name)
-        else:
-            finfo = FieldInfo[field_name]
+        finfo = self.ds._get_field_info(*field_name)
         if finfo.particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/field_exceptions.py
--- a/yt/fields/field_exceptions.py
+++ b/yt/fields/field_exceptions.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 
 class ValidationException(Exception):
     pass

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -32,7 +32,7 @@
         # it from a cm**2 array.
         np.subtract(data["%s%s" % (field_prefix, ax)].in_units("cm"),
                     center[i], r)
-        if data.ds.periodicity[i] == True:
+        if data.ds.periodicity[i] is True:
             np.abs(r, r)
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/field_plugin_registry.py
--- a/yt/fields/field_plugin_registry.py
+++ b/yt/fields/field_plugin_registry.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 field_plugins = {}
 
 def register_field_plugin(func):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -15,20 +15,16 @@
 
 import numpy as np
 
-from yt.funcs import \
-    just_one
-
 from .derived_field import \
-    ValidateParameter, \
     ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
 
 from .vector_operations import \
-     create_averaged_field, \
-     create_magnitude_field, \
-     create_vector_fields
+    create_averaged_field, \
+    create_magnitude_field, \
+    create_vector_fields
 
 from yt.utilities.physical_constants import \
     mh, \
@@ -37,20 +33,6 @@
 from yt.utilities.physical_ratios import \
     metallicity_sun
 
-from yt.units.yt_array import \
-    YTArray
-
-from yt.utilities.math_utils import \
-    get_sph_r_component, \
-    get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
 
 @register_field_plugin
 def setup_fluid_fields(registry, ftype = "gas", slice_info = None):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -16,10 +16,7 @@
 import numpy as np
 
 from yt.fields.derived_field import \
-    ValidateGridType, \
-    ValidateParameter, \
-    ValidateSpatial, \
-    NeedsParameter
+    ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
@@ -28,8 +25,8 @@
     just_one
 
 from .vector_operations import \
-     create_magnitude_field, \
-     create_squared_field
+    create_magnitude_field, \
+    create_squared_field
 
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/interpolated_fields.py
--- a/yt/fields/interpolated_fields.py
+++ b/yt/fields/interpolated_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.fields.local_fields import add_field
 
 from yt.utilities.linear_interpolators import \

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.utilities.logger import \
     ytLogger as mylog
 

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -15,11 +15,6 @@
 
 import numpy as np
 
-from yt.units.yt_array import YTArray
-from yt.utilities.lib.misc_utilities import \
-    obtain_rvec, obtain_rv_vec
-from yt.utilities.math_utils import resize_vector
-from yt.utilities.cosmology import Cosmology
 from yt.fields.derived_field import \
     ValidateParameter
 
@@ -27,16 +22,8 @@
     register_field_plugin
 
 from yt.utilities.math_utils import \
-    get_sph_r_component, \
     get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
+    get_sph_phi_component
 
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/my_plugin_fields.py
--- a/yt/fields/my_plugin_fields.py
+++ b/yt/fields/my_plugin_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from .field_plugin_registry import \
     register_field_plugin
 

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -16,8 +16,6 @@
 
 import numpy as np
 
-from yt.funcs import *
-from yt.units.yt_array import YTArray
 from yt.fields.derived_field import \
     ValidateParameter, \
     ValidateSpatial
@@ -125,7 +123,7 @@
     def particle_density(field, data):
         pos = data[ptype, coord_name].convert_to_units("code_length")
         mass = data[ptype, mass_name].convert_to_units("code_mass")
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data.deposit(pos, [mass], method = "sum")
         d = data.ds.arr(d, "code_mass")
         d /= data["index", "cell_volume"]
         return d
@@ -790,13 +788,19 @@
         kwargs = {}
         if nneighbors:
             kwargs['nneighbors'] = nneighbors
+        # This is for applying cutoffs, similar to in the SPLASH paper.
+        smooth_cutoff = data["index","cell_volume"]**(1./3)
+        smooth_cutoff.convert_to_units("code_length")
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
                          method="volume_weighted",
                          create_octree=True,
+                         index_fields=[smooth_cutoff],
                          kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
+        # This should be used when seeking a non-normalized value:
+        rv /= hsml.uq**3 / hsml.uq.in_cgs().uq**3
         rv = data.apply_units(rv, field_units)
         return rv
     registry.add_field(field_name, function = _vol_weight,
@@ -827,7 +831,7 @@
         field_name = (ptype, "smoothed_density")
     else:
         field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-    field_units = registry[ptype, mass_name].units
+
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name]
         pos.convert_to_units("code_length")

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/setup.py
--- a/yt/fields/setup.py
+++ b/yt/fields/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -17,12 +17,10 @@
 import re
 
 from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs, \
     amu_cgs
 from yt.utilities.physical_ratios import \
     primordial_H_mass_fraction
-from yt.funcs import *
+
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
 from .field_plugin_registry import \

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -1,13 +1,20 @@
-from yt.testing import *
 import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_array_almost_equal_nulp, \
+    assert_array_equal, \
+    assert_raises
 from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    Cosmology
 from yt.frontends.stream.fields import \
     StreamFieldInfo
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
+from yt.utilities.exceptions import \
+    YTFieldUnitError, \
+    YTFieldUnitParseError
 
 def setup():
     global base_ds
@@ -88,19 +95,6 @@
         return field
     return field[1]
 
-def _expand_field(field):
-    if isinstance(field, tuple):
-        return field
-    if field in KnownStreamFields:
-        fi = KnownStreamFields[field]
-        if fi.particle_type:
-            return ("all", field)
-        else:
-            return ("gas", field)
-    # Otherwise, we just guess.
-    if "particle" in field:
-        return ("all", field)
-    return ("gas", field)
 
 class TestFieldAccess(object):
     description = None

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -178,7 +178,8 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return  1.0/(2**self.max_level)
+        return (self.dataset.domain_width /
+                (self.dataset.domain_dimensions * 2**(self.max_level))).min()
 
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -185,27 +185,10 @@
             for alias in aliases:
                 self.alias((ptype, alias), (ptype, f), units = output_units)
 
-        # We'll either have particle_position or particle_position_[xyz]
-        if (ptype, "particle_position") in self.field_list or \
-           (ptype, "particle_position") in self.field_aliases:
-            particle_scalar_functions(ptype,
-                   "particle_position", "particle_velocity",
-                   self)
-        else:
-            # We need to check to make sure that there's a "known field" that
-            # overlaps with one of the vector fields.  For instance, if we are
-            # in the Stream frontend, and we have a set of scalar position
-            # fields, they will overlap with -- and be overridden by -- the
-            # "known" vector field that the frontend creates.  So the easiest
-            # thing to do is to simply remove the on-disk field (which doesn't
-            # exist) and replace it with a derived field.
-            if (ptype, "particle_position") in self and \
-                 self[ptype, "particle_position"]._function == NullFunc:
-                self.pop((ptype, "particle_position"))
-            particle_vector_functions(ptype,
-                    ["particle_position_%s" % ax for ax in 'xyz'],
-                    ["particle_velocity_%s" % ax for ax in 'xyz'],
-                    self)
+        ppos_fields = ["particle_position_%s" % ax for ax in 'xyz']
+        pvel_fields = ["particle_velocity_%s" % ax for ax in 'xyz']
+        particle_vector_functions(ptype, ppos_fields, pvel_fields, self)
+
         particle_deposition_functions(ptype, "particle_position",
             "particle_mass", self)
         standard_particle_fields(self, ptype)
@@ -219,7 +202,7 @@
             self.add_output_field(field, 
                                   units = self.ds.field_units.get(field, ""),
                                   particle_type = True)
-        self.setup_smoothed_fields(ptype, 
+        self.setup_smoothed_fields(ptype,
                                    num_neighbors=num_neighbors,
                                    ftype=ftype)
 

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -77,7 +77,7 @@
 
     def _set_units(self):
         self.unit_registry = UnitRegistry()
-        self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
+        self.unit_registry.add("code_time", 1.0, dimensions.time)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
             self.cosmology = \

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -420,7 +420,7 @@
         Generates the conversion to various physical _units based on the parameter file
         """
         default_length_units = [u for u,v in default_unit_symbol_lut.items()
-                                if str(v[-1]) == "(length)"]
+                                if str(v[1]) == "(length)"]
         more_length_units = []
         for unit in default_length_units:
             if unit in prefixable_units:

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-Gadget frontend tests using the IsothermalCollapse dataset
+Gadget frontend tests
 
 
 
@@ -14,15 +14,49 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import \
+    data_dir_load, \
     requires_ds, \
-    data_dir_load
-from yt.frontends.gadget.api import GadgetHDF5Dataset
+    sph_answer
+from yt.frontends.gadget.api import GadgetHDF5Dataset, GadgetDataset
 
-isothermal = "IsothermalCollapse/snap_505.hdf5"
- at requires_file(isothermal)
+isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
+isothermal_bin = "IsothermalCollapse/snap_505"
+gdg = "GadgetDiskGalaxy/snapshot_0200.hdf5"
+
+iso_fields = (
+    ("gas", "density"),
+    ("gas", "temperature"),
+    ('gas', 'velocity_magnitude'),
+    ("deposit", "all_density"),
+    ("deposit", "all_count"),
+    ("deposit", "all_cic"),
+    ("deposit", "PartType0_density"),
+)
+iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
+
+gdg_fields = iso_fields + (("deposit", "PartType4_density"), )
+gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+
+
+ at requires_file(isothermal_h5)
+ at requires_file(isothermal_bin)
 def test_GadgetDataset():
-    kwargs = dict(bounding_box=[[-3,3], [-3,3], [-3,3]])
-    assert isinstance(data_dir_load(isothermal, kwargs=kwargs),
+    assert isinstance(data_dir_load(isothermal_h5, kwargs=iso_kwargs),
                       GadgetHDF5Dataset)
+    assert isinstance(data_dir_load(isothermal_bin, kwargs=iso_kwargs),
+                      GadgetDataset)
+
+
+ at requires_ds(isothermal_h5)
+def test_iso_collapse():
+    for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
+                           iso_fields, ds_kwargs=iso_kwargs):
+        yield test
+
+ at requires_ds(gdg, big_data=True)
+def test_gadget_disk_galaxy():
+    for test in sph_answer(gdg, 'snap_505', 11907080, gdg_fields,
+                           ds_kwargs=gdg_kwargs):
+        yield test

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -14,45 +14,33 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
-    big_patch_amr, \
     data_dir_load, \
-    PixelizedProjectionValuesTest, \
-    FieldValuesTest, \
-    create_obj
+    sph_answer
 from yt.frontends.owls.api import OWLSDataset
 
-_fields = (("deposit", "all_density"), ("deposit", "all_count"),
-           ("deposit", "PartType0_density"),
-           ("deposit", "PartType4_density"))
+os33 = "snapshot_033/snap_033.0.hdf5"
 
-os33 = "snapshot_033/snap_033.0.hdf5"
+_fields = (
+    ("gas", "density"),
+    ("gas", "temperature"),
+    ('gas', 'He_p0_number_density'),
+    ('gas', 'N_p1_number_density'),
+    ('gas', 'velocity_magnitude'),
+    ("deposit", "all_density"),
+    ("deposit", "all_count"),
+    ("deposit", "all_cic"),
+    ("deposit", "PartType0_density"),
+    ("deposit", "PartType4_density"))
+
+
 @requires_ds(os33, big_data=True)
 def test_snapshot_033():
-    ds = data_dir_load(os33)
-    yield assert_equal, str(ds), "snap_033"
-    dso = [ None, ("sphere", ("c", (0.1, 'unitary')))]
-    dd = ds.all_data()
-    yield assert_equal, dd["particle_position"].shape[0], 2*(128*128*128)
-    yield assert_equal, dd["particle_position"].shape[1], 3
-    tot = sum(dd[ptype,"particle_position"].shape[0]
-              for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, (2*128*128*128)
-    for dobj_name in dso:
-        for field in _fields:
-            for axis in [0, 1, 2]:
-                for weight_field in [None, "density"]:
-                    yield PixelizedProjectionValuesTest(
-                        os33, axis, field, weight_field,
-                        dobj_name)
-            yield FieldValuesTest(os33, field, dobj_name)
-        dobj = create_obj(ds, dobj_name)
-        s1 = dobj["ones"].sum()
-        s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+    for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+        yield test
 
 
 @requires_file(os33)

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -39,7 +39,7 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Metallicity", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
-        ("StarFormationRate", ("code_mass / code_time", [], None)),
+        ("StarFormationRate", ("Msun / yr", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
         # These are metallicity fields that get discovered for FIRE simulations
         ("Metallicity_00", ("", ["metallicity"], None)),

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,7 +19,7 @@
 import unittest
 
 from yt.testing import assert_raises
-from yt.utilities.answer_testing.framework import data_dir_load
+from yt.convenience import load
 from yt.utilities.exceptions import YTOutputNotIdentified
 
 class TestEmptyLoad(unittest.TestCase):
@@ -40,6 +40,6 @@
         shutil.rmtree(self.tmpdir)
 
     def test_load_empty_file(self):
-        assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
+        assert_raises(YTOutputNotIdentified, load, "not_a_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_directory")

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -32,6 +32,7 @@
 from yt.utilities.physical_constants import \
     G, \
     cm_per_kpc
+from yt import YTQuantity
 
 from .fields import \
     TipsyFieldInfo
@@ -167,9 +168,9 @@
         self.domain_dimensions = np.ones(3, "int32") * nz
         periodic = self.parameters.get('bPeriodic', True)
         period = self.parameters.get('dPeriod', None)
-        comoving = self.parameters.get('bComove', False)
         self.periodicity = (periodic, periodic, periodic)
-        if comoving and period is None:
+        self.comoving = self.parameters.get('bComove', False)
+        if self.comoving and period is None:
             period = 1.0
         if self.bounding_box is None:
             if periodic and period is not None:
@@ -186,7 +187,9 @@
             self.domain_left_edge = bbox[:,0]
             self.domain_right_edge = bbox[:,1]
 
-        if comoving:
+        # If the cosmology parameters dictionary got set when data is
+        # loaded, we can assume it's a cosmological data set
+        if self.comoving or self._cosmology_parameters is not None:
             cosm = self._cosmology_parameters or {}
             self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1
@@ -224,8 +227,15 @@
             self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
             self.mass_unit = self.quan(mu, 'Msun')
             density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
-            # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
-            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)  
+
+            # If self.comoving is set, we know this is a gasoline data set,
+            # and we do the conversion on the hubble constant.
+            if self.comoving:
+                # Gasoline's hubble constant, dHubble0, is stored units of
+                # proper code time.
+                self.hubble_constant *= np.sqrt(G.in_units(
+                    'kpc**3*Msun**-1*s**-2') * density_unit).value / (
+                    3.2407793e-18)
             cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
             self.current_time = cosmo.hubble_time(self.current_redshift)
@@ -237,6 +247,24 @@
             density_unit = self.mass_unit / self.length_unit**3
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
+        # If unit base is defined by the user, override all relevant units
+        if self._unit_base is not None:
+            length = self._unit_base.get('length', self.length_unit)
+            length = self.quan(*length) if isinstance(length, tuple) else self.quan(length)
+            self.length_unit = length
+
+            mass = self._unit_base.get('mass', self.mass_unit)
+            mass = self.quan(*mass) if isinstance(mass, tuple) else self.quan(mass)
+            self.mass_unit = mass
+
+            density_unit = self.mass_unit / self.length_unit**3
+            self.time_unit = 1.0 / np.sqrt(G * density_unit)
+
+            time = self._unit_base.get('time', self.time_unit)
+            time = self.quan(*time) if isinstance(time, tuple) else self.quan(time)
+            self.time_unit = time
+
+
     @staticmethod
     def _validate_header(filename):
         '''

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/tipsy/fields.py
--- a/yt/frontends/tipsy/fields.py
+++ b/yt/frontends/tipsy/fields.py
@@ -38,7 +38,8 @@
         'FeMassFrac':("FeMassFrac", ("dimensionless", ["Fe_fraction"], None)),
         'c':("c", ("code_velocity", [""], None)),
         'acc':("acc", ("code_velocity / code_time", [""], None)),
-        'accg':("accg", ("code_velocity / code_time", [""], None))}
+        'accg':("accg", ("code_velocity / code_time", [""], None)),
+        'smoothlength':('smoothlength', ("code_length", ["smoothing_length"], None))}
 
     def __init__(self, ds, field_list, slice_info = None):
         for field in field_list:
@@ -60,15 +61,19 @@
 
     def setup_gas_particle_fields(self, ptype):
 
-        def _smoothing_length(field, data):
-            # For now, we hardcode num_neighbors.  We should make this configurable
-            # in the future.
-            num_neighbors = 64
-            fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
-            return data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+        num_neighbors = 65
+        fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
+        def _func():
+            def _smoothing_length(field, data):
+                # For now, we hardcode num_neighbors.  We should make this configurable
+                # in the future.
+                rv = data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+                #np.maximum(rv, 0.5*data[ptype, "Epsilon"], rv)
+                return rv
+            return _smoothing_length
 
         self.add_field(
             (ptype, "smoothing_length"),
-            function=_smoothing_length,
+            function=_func(),
             particle_type=True,
             units="code_length")

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -14,15 +14,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import assert_equal, requires_file
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
-    big_patch_amr, \
     data_dir_load, \
-    PixelizedProjectionValuesTest, \
+    sph_answer, \
+    create_obj, \
     FieldValuesTest, \
-    create_obj
+    PixelizedProjectionValuesTest
 from yt.frontends.tipsy.api import TipsyDataset
 
 _fields = (("deposit", "all_density"),
@@ -62,9 +61,9 @@
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
 
-gasoline = "agora_1e11.00400/agora_1e11.00400"
- at requires_ds(gasoline, big_data = True, file_check = True)
-def test_gasoline():
+gasoline_dmonly = "agora_1e11.00400/agora_1e11.00400"
+ at requires_ds(gasoline_dmonly, big_data = True, file_check = True)
+def test_gasoline_dmonly():
     cosmology_parameters = dict(current_redshift = 0.0,
                                 omega_lambda = 0.728,
                                 omega_matter = 0.272,
@@ -72,7 +71,7 @@
     kwargs = dict(cosmology_parameters = cosmology_parameters,
                   unit_base = {'length': (1.0/60.0, "Mpccm/h")},
                   n_ref = 64)
-    ds = data_dir_load(gasoline, TipsyDataset, (), kwargs)
+    ds = data_dir_load(gasoline_dmonly, TipsyDataset, (), kwargs)
     yield assert_equal, str(ds), "agora_1e11.00400"
     dso = [ None, ("sphere", ("c", (0.3, 'unitary')))]
     dd = ds.all_data()
@@ -93,7 +92,22 @@
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
 
+tg_fields = (
+    ('gas', 'density'),
+    ('gas', 'temperature'),
+    ('gas', 'velocity_magnitude'),
+    ('gas', 'Fe_fraction'),
+    ('Stars', 'Metals'),
+)
 
+tipsy_gal = 'TipsyGalaxy/galaxy.00300'
+ at requires_ds(tipsy_gal)
+def test_tipsy_galaxy():
+    for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
+        yield test
+        
+ at requires_file(gasoline_dmonly)
 @requires_file(pkdgrav)
 def test_TipsyDataset():
     assert isinstance(data_dir_load(pkdgrav), TipsyDataset)
+    assert isinstance(data_dir_load(gasoline_dmonly), TipsyDataset)

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -40,13 +40,14 @@
 
 cdef inline np.float64_t sph_kernel_cubic(np.float64_t x) nogil:
     cdef np.float64_t kernel
+    cdef np.float64_t C = 2.5464790894703255
     if x <= 0.5:
         kernel = 1.-6.*x*x*(1.-x)
     elif x>0.5 and x<=1.0:
         kernel = 2.*(1.-x)*(1.-x)*(1.-x)
     else:
         kernel = 0.
-    return kernel
+    return kernel * C
 
 ########################################################
 # Alternative SPH kernels for use with the Grid method #

diff -r 34c12c260c5b9550a77cfdcf950931de8ba55a05 -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -55,10 +55,12 @@
                                np.int64_t *pinds, np.int64_t *pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
+                               int *nsize, np.float64_t *oct_left_edges,
+                               np.float64_t *oct_dds)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
-                             np.int64_t nneighbors, np.int64_t domain_id, Oct **oct = ?)
+                             np.int64_t nneighbors, np.int64_t domain_id, 
+                             Oct **oct = ?, int extra_layer = ?)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
                                np.float64_t *ppos,
                                np.float64_t **fields, 
@@ -78,7 +80,9 @@
                             np.int64_t *pcounts,
                             np.int64_t *pinds,
                             np.float64_t *ppos,
-                            np.float64_t cpos[3])
+                            np.float64_t cpos[3],
+                            np.float64_t* oct_left_edges,
+                            np.float64_t* oct_dds)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b26c43ce4542/
Changeset:   b26c43ce4542
Branch:      yt
User:        ngoldbaum
Date:        2015-09-22 19:54:40+00:00
Summary:     Deal with the project state after a bugfix release.

We now get the lineage of commits from the last "major" release (e.g. yt 3.2.0)
and use that to find merged PRs. Before we were going back merely to the last
release which is incorrect.  We still disregard PRs that hapenned before the last
minor release.

One caveat is that we're now no longer dealing with commits that were pushed
directly to the main repo and thus didn't come in as part of a pull request.
Supporting this would make the script much slower (we'd need to look at *all*
of the pull requests that came in since the last major release, not just the
ones that came in since the last bugfix release) and more complicated. Since
it's a rare event anyway, I'm just going to go ahead and punt on this.
Affected #:  1 file

diff -r 58d619fa41d9765b52288c7ed571a7f0c1ab4120 -r b26c43ce45428b7e9e0a1c546874cfd0192ad9b2 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -4,6 +4,7 @@
 import tempfile
 
 from datetime import datetime
+from distutils.version import LooseVersion
 from time import strptime, mktime
 
 MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
@@ -18,46 +19,60 @@
     dest_repo_path = path+'/yt-backport'
     if source is None:
         source = YT_REPO
-    hglib.clone(source=source, dest=dest_repo_path, updaterev='yt')
+    hglib.clone(source=source, dest=dest_repo_path)
+    with hglib.open(dest_repo_path) as client:
+        # Changesets that are on the yt branch but aren't topological ancestors
+        # of whichever changeset the experimental bookmark is pointing at
+        client.update('heads(branch(yt) - ::bookmark(experimental))')
     return dest_repo_path
 
 
-def get_first_commit_after_release(repo_path):
+def get_first_commit_after_last_major_release(repo_path):
     """Returns the SHA1 hash of the first commit to the yt branch that wasn't
     included in the last tagged release.
     """
     with hglib.open(repo_path) as client:
-        most_recent_tag = client.log("reverse(tag())")[0]
-        tag_name = most_recent_tag[2]
+        tags = client.log("reverse(tag())")
+        tags = sorted([LooseVersion(t[2]) for t in tags])
+        for t in tags[::-1]:
+            if t.version[0:2] != ['yt', '-']:
+                continue
+            if len(t.version) == 4 or t.version[4] == 0:
+                last_major_tag = t
+                break
         last_before_release = client.log(
-            "last(ancestors(%s) and branch(yt))" % tag_name)
+            "last(ancestors(%s) and branch(yt))" % str(last_major_tag))
         first_after_release = client.log(
             "first(descendants(%s) and branch(yt) and not %s)"
             % (last_before_release[0][1], last_before_release[0][1]))
-    return first_after_release[0]
+    return str(first_after_release[0][1][:12])
 
 
-def get_branch_tip(repo_path, branch):
+def get_branch_tip(repo_path, branch, exclude=None):
     """Returns the SHA1 hash of the most recent commit on the given branch"""
+    revset = "head() and branch(%s)" % branch
+    if exclude is not None:
+        revset += "and not %s" % exclude
     with hglib.open(repo_path) as client:
-        change = client.identify(rev=branch, id=True)
-        change.strip('\n')
+        change = client.log(revset)[0][1][:12]
     return change
 
 
 def get_lineage_between_release_and_tip(repo_path, first, last):
     """Returns the lineage of changesets that were at one point the public tip"""
-    fhash = first[1]
     with hglib.open(repo_path) as client:
-        return client.log("%s::%s and p1(%s::%s) + %s"
-                          % (fhash, last, fhash, last, last))
+        lineage = client.log("'%s'::'%s' and p1('%s'::'%s') + '%s'"
+                             % (first, last, first, last, last))
+        return lineage
 
 
-def get_pull_requests_since_last_release(first):
+def get_pull_requests_since_last_release(repo_path):
     """Returns a list of pull requests made since the last tagged release"""
     r = requests.get(MERGED_PR_ENDPOINT)
     done = False
     merged_prs = []
+    with hglib.open(repo_path) as client:
+        last_tag = client.log("reverse(tag())")[0]
     while not done:
         if r.status_code != 200:
             raise RuntimeError
@@ -75,11 +90,10 @@
                     break
             if merge_date is None:
                 break
-            if merge_date < first[6]:
+            if merge_date < last_tag[6]:
+                done = True
                 break
             merged_prs.append(pr)
-        if merge_date is not None and merge_date < first[6]:
-            done = True
         r = requests.get(data['next'])
     return merged_prs
 
@@ -89,6 +103,10 @@
     commit_data = {}
     for pr in prs:
         data = requests.get(pr['links']['commits']['href']).json()
+        if data.keys() == [u'error']:
+            # this happens when commits have been stripped, e.g.
+            # https://bitbucket.org/yt_analysis/yt/pull-requests/1641
+            continue
         done = False
         commits = []
         while not done:
@@ -119,9 +137,8 @@
 def find_merge_commit_in_prs(needle, prs):
     """Find the merge commit `needle` in the list of `prs`
 
-    If found, returns the pr the merge commit comes from. If not found, raises a
-    RuntimeError, since all merge commits are supposed to be associated with a
-    PR.
+    If found, returns the pr the merge commit comes from. If not found, return
+    None
     """
     for pr in prs[::-1]:
         if pr['merge_commit'] is not None:
@@ -152,8 +169,6 @@
         else:
             pr = find_commit_in_prs(commit, commit_data, my_prs)
             commits_to_prs[cset_hash] = pr
-        if commits_to_prs[cset_hash] is None:
-            continue
     return commits_to_prs
 
 
@@ -181,28 +196,13 @@
         com = client.log('last(%s::)' % commit)
     return com[0][1][:12]
 
-
-def get_no_pr_commits(repo_path, inv_map):
-    """"get a list of commits that aren't in any pull request"""
-    try:
-        no_pr_commits = inv_map[None]
-        del inv_map[None]
-    except KeyError:
-        no_pr_commits = []
+def screen_already_backported(repo_path, inv_map):
     with hglib.open(repo_path) as client:
-        # remove merge commits since they can't be grafted
-        no_pr_commits = [com for com in no_pr_commits if
-                         len(client.log('%s and merge()' % com)) == 0]
-    return no_pr_commits
-
-
-def screen_already_backported(repo_path, inv_map, no_pr_commits):
-    with hglib.open(repo_path) as client:
-        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        tags = client.log("reverse(tag())")
+        major_tags = [t for t in tags if t[2].endswith('.0')]
+        most_recent_major_tag_name = major_tags[0][2]
         lineage = client.log(
-            "descendants(%s) and branch(stable)" % most_recent_tag_name)
-        for commit in no_pr_commits:
-            lineage.remove(commit)
+            "descendants(%s) and branch(stable)" % most_recent_major_tag_name)
         prs_to_screen = []
         for pr in inv_map:
             for commit in lineage:
@@ -210,7 +210,7 @@
                     prs_to_screen.append(pr)
         for pr in prs_to_screen:
             del inv_map[pr]
-        return inv_map, no_pr_commits
+        return inv_map
 
 def commit_already_on_stable(repo_path, commit):
     with hglib.open(repo_path) as client:
@@ -224,23 +224,6 @@
             return True
         return False
 
-
-def backport_no_pr_commits(repo_path, no_pr_commits):
-    """backports commits that aren't in a pull request"""
-    for commit in no_pr_commits:
-        with hglib.open(repo_path) as client:
-            client.update('stable')
-            commit_info = client.log(commit)[0]
-            commit_info = (commit_info[1][:12], commit_info[4], commit_info[5])
-            print "Commit %s by %s\n%s" % commit_info
-        print ""
-        print "To backport issue the following command:"
-        print ""
-        print "hg graft %s\n" % commit_info[0]
-        raw_input('Press any key to continue')
-        print ""
-
-
 def backport_pr_commits(repo_path, inv_map, last_stable, prs):
     """backports pull requests to the stable branch.
 
@@ -301,21 +284,24 @@
     print ""
     repo_path = clone_new_repo()
     try:
-        first_dev = get_first_commit_after_release(repo_path)
-        last_dev = get_branch_tip(repo_path, 'yt')
+        last_major_release = get_first_commit_after_last_major_release(repo_path)
+        last_dev = get_branch_tip(repo_path, 'yt', 'experimental')
         last_stable = get_branch_tip(repo_path, 'stable')
         lineage = get_lineage_between_release_and_tip(
-            repo_path, first_dev, last_dev)
-        prs = get_pull_requests_since_last_release(first_dev)
+            repo_path, last_major_release, last_dev)
+        prs = get_pull_requests_since_last_release(repo_path)
         commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
         inv_map = invert_commits_to_prs_mapping(commits_to_prs)
-        no_pr_commits = get_no_pr_commits(repo_path, inv_map)
-        inv_map, no_pr_commits = \
-            screen_already_backported(repo_path, inv_map, no_pr_commits)
+        # for now, ignore commits that aren't part of a pull request since
+        # the last bugfix release. These are mostly commits in pull requests
+        # from before the last bugfix release but might include commits that
+        # were pushed directly to the repo.
+        del inv_map[None]
+
+        inv_map = screen_already_backported(repo_path, inv_map)
         print "In another terminal window, navigate to the following path:"
         print "%s" % repo_path
         raw_input("Press any key to continue")
-        backport_no_pr_commits(repo_path, no_pr_commits)
         backport_pr_commits(repo_path, inv_map, last_stable, prs)
         raw_input(
             "Now you need to push your backported changes. The temporary\n"


https://bitbucket.org/yt_analysis/yt/commits/accbf02446b3/
Changeset:   accbf02446b3
Branch:      yt
User:        ngoldbaum
Date:        2015-09-22 23:22:33+00:00
Summary:     Add docs on how to do a release
Affected #:  2 files

diff -r b26c43ce45428b7e9e0a1c546874cfd0192ad9b2 -r accbf02446b3b0cd91047f9681fd8a9589ad3292 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    building_the_docs
    testing
    debugdrive
+   releasing
    creating_datatypes
    creating_derived_fields
    creating_derived_quantities

diff -r b26c43ce45428b7e9e0a1c546874cfd0192ad9b2 -r accbf02446b3b0cd91047f9681fd8a9589ad3292 doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,206 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+  These releases are regularly scheduled and will optimally happen approximately
+  once a month. These releases should contain only fixes for bugs discovered in
+  earlier releases and should not contain new features of API changes. Bugfix
+  releases should increment the ``PATCH`` version number. Bugfix releases should
+  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+  reuqests should be manually backported using the PR backport script, described
+  below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+  These releases happen when new features are deemed ready to be merged into the
+  ``stable`` branch and should not happen on a regular schedule. Minor releases
+  can also include fixes for bugs if the fix is determined to be too invasive
+  for a bugfix release. Minor releases should *not* inlucde
+  backwards-incompatible changes and should not change APIs.  If an API change
+  is deemed to be necessary, the old API should continue to function but might
+  trigger deprecation warnings. Minor releases should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+  Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+  These releases happen when the development community decides to make major
+  backwards-incompatible changes. In principle a major version release could
+  include arbitrary changes to the library. Major version releases should only
+  happen after extensive discussion and vetting among the developer and user
+  community. Like minor rlease, a major release should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+  to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described, bugfix releases are regularly scheduled updates for minor releases
+to ensure fixes for bugs make their way out to users in a timely manner. Since
+bugfix releases should not include new features, we do not issue bugfix releases
+by simply merging from the development ``yt`` branch into the ``stable`` branch.
+Instead, we make use of the ``pr_backport.py`` script to manually cherry-pick
+bugfixes from the from ``yt`` branch onto the ``stable`` branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+live human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+   $ cd $YT_HG/scripts
+   $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection) while the script makes a clone of the main yt repository
+and then gathers information about pull requests that have been merged since the
+last tagged release. Once this step finishes, you will be prompted to navigate
+to the temporary folder in a new separate terminal session. Do so, and then hit
+the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+   [extensions]
+   rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. Be because the temporary repository you have
+been working with will be deleted as soon as the script exits, so take care to
+push your work on the ``stable`` branch to your fork on bitbucket. Once you've
+pushed to your fork, you will be able to issue a pull request containing the
+backported fixes just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release.  All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. The author of this section prefers merge
+markers and suggests the following ``hgrc`` configuration options to get more
+detail during the merge:
+
+.. code-block:: none
+
+   [ui]
+   merge = internal:merge3
+   mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+  The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+  The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+  The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+   hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+   python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.


https://bitbucket.org/yt_analysis/yt/commits/9f9b662ba955/
Changeset:   9f9b662ba955
Branch:      yt
User:        ngoldbaum
Date:        2015-09-23 04:27:25+00:00
Summary:     typo fixes and rewording some
Affected #:  1 file

diff -r accbf02446b3b0cd91047f9681fd8a9589ad3292 -r 9f9b662ba95552ba55eeea855314fd1983beb667 doc/source/developing/releasing.rst
--- a/doc/source/developing/releasing.rst
+++ b/doc/source/developing/releasing.rst
@@ -10,10 +10,10 @@
 
   These releases are regularly scheduled and will optimally happen approximately
   once a month. These releases should contain only fixes for bugs discovered in
-  earlier releases and should not contain new features of API changes. Bugfix
+  earlier releases and should not contain new features or API changes. Bugfix
   releases should increment the ``PATCH`` version number. Bugfix releases should
   *not* be generated by merging from the ``yt`` branch, instead bugfix pull
-  reuqests should be manually backported using the PR backport script, described
+  requests should be manually backported using the PR backport script, described
   below. Version ``3.2.2`` is a bugfix release.
 
 * Minor releases
@@ -35,7 +35,7 @@
   backwards-incompatible changes. In principle a major version release could
   include arbitrary changes to the library. Major version releases should only
   happen after extensive discussion and vetting among the developer and user
-  community. Like minor rlease, a major release should happen by merging the
+  community. Like minor releases, a major release should happen by merging the
   ``yt`` branch into the ``stable`` branch. Major releases should increment the
   ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
   to zero. If it ever happens, version ``4.0.0`` will be a major release.
@@ -46,12 +46,13 @@
 Doing a Bugfix Release
 ~~~~~~~~~~~~~~~~~~~~~~
 
-As described, bugfix releases are regularly scheduled updates for minor releases
-to ensure fixes for bugs make their way out to users in a timely manner. Since
-bugfix releases should not include new features, we do not issue bugfix releases
-by simply merging from the development ``yt`` branch into the ``stable`` branch.
-Instead, we make use of the ``pr_backport.py`` script to manually cherry-pick
-bugfixes from the from ``yt`` branch onto the ``stable`` branch.
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
 
 The backport script issues interactive prompts to backport individual pull
 requests to the ``stable`` branch in a temporary clone of the main yt mercurial
@@ -62,7 +63,7 @@
 
 Rather than automatically manipulating the temporary repository by scripting
 mercurial commands using ``python-hglib``, the script must be "operated" by a
-live human who is ready to think carefully about what the script is telling them
+human who is ready to think carefully about what the script is telling them
 to do. Most operations will merely require copy/pasting a suggested mercurial
 command. However, some changes will require manual backporting.
 
@@ -78,11 +79,11 @@
    $ python pr_backport.py
 
 You will then need to wait for about a minute (depending on the speed of your
-internet connection) while the script makes a clone of the main yt repository
-and then gathers information about pull requests that have been merged since the
-last tagged release. Once this step finishes, you will be prompted to navigate
-to the temporary folder in a new separate terminal session. Do so, and then hit
-the enter key in the original terminal session.
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
 
 For each pull request in the set of pull requests that were merged since the
 last tagged release that were pointed at the "main" line of development
@@ -104,7 +105,8 @@
 version of ``rebase`` bundled with mercurial. Note also that mercurial does not
 automatically update to the tip of the rebased head after executing ``hg
 rebase`` so you will need to manually issue ``hg update stable`` to move your
-working directory to the new head of the stable branch.
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
 
 If the pull request contains merge commits, you must take care to *not* backport
 commits that merge with the main line of development on the ``yt`` branch. Doing
@@ -113,11 +115,11 @@
 backport script should warn you to be extra careful.
 
 Once you've finished backporting, the script will let you know that you are done
-and warn you to push your work. Be because the temporary repository you have
-been working with will be deleted as soon as the script exits, so take care to
-push your work on the ``stable`` branch to your fork on bitbucket. Once you've
-pushed to your fork, you will be able to issue a pull request containing the
-backported fixes just like any other yt pull request.
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
 
 Doing a Minor or Major Release
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -126,9 +128,9 @@
 ``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
 happen must be resolved, almost certainly in favor of the yt branch. This can
 happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
-mercurial to write merge markers. The author of this section prefers merge
-markers and suggests the following ``hgrc`` configuration options to get more
-detail during the merge:
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
 
 .. code-block:: none
 


https://bitbucket.org/yt_analysis/yt/commits/2e98bc17afff/
Changeset:   2e98bc17afff
Branch:      yt
User:        jzuhone
Date:        2015-10-05 18:43:51+00:00
Summary:     Merged in ngoldbaum/yt (pull request #1717)

Add pr backport script
Affected #:  6 files

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    building_the_docs
    testing
    debugdrive
+   releasing
    creating_datatypes
    creating_derived_fields
    creating_derived_quantities

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,208 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+  These releases are regularly scheduled and will optimally happen approximately
+  once a month. These releases should contain only fixes for bugs discovered in
+  earlier releases and should not contain new features or API changes. Bugfix
+  releases should increment the ``PATCH`` version number. Bugfix releases should
+  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+  requests should be manually backported using the PR backport script, described
+  below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+  These releases happen when new features are deemed ready to be merged into the
+  ``stable`` branch and should not happen on a regular schedule. Minor releases
+  can also include fixes for bugs if the fix is determined to be too invasive
+  for a bugfix release. Minor releases should *not* inlucde
+  backwards-incompatible changes and should not change APIs.  If an API change
+  is deemed to be necessary, the old API should continue to function but might
+  trigger deprecation warnings. Minor releases should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+  Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+  These releases happen when the development community decides to make major
+  backwards-incompatible changes. In principle a major version release could
+  include arbitrary changes to the library. Major version releases should only
+  happen after extensive discussion and vetting among the developer and user
+  community. Like minor releases, a major release should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+  to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+   $ cd $YT_HG/scripts
+   $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+   [extensions]
+   rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release.  All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
+
+.. code-block:: none
+
+   [ui]
+   merge = internal:merge3
+   mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+  The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+  The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+  The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+   hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+   python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 scripts/pr_backport.py
--- /dev/null
+++ b/scripts/pr_backport.py
@@ -0,0 +1,311 @@
+import hglib
+import requests
+import shutil
+import tempfile
+
+from datetime import datetime
+from distutils.version import LooseVersion
+from time import strptime, mktime
+
+MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
+                      "yt/pullrequests/?state=MERGED")
+
+YT_REPO = "https://bitbucket.org/yt_analysis/yt"
+
+
+def clone_new_repo(source=None):
+    """Clones a new copy of yt_analysis/yt and returns a path to it"""
+    path = tempfile.mkdtemp()
+    dest_repo_path = path+'/yt-backport'
+    if source is None:
+        source = YT_REPO
+    hglib.clone(source=source, dest=dest_repo_path)
+    with hglib.open(dest_repo_path) as client:
+        # Changesets that are on the yt branch but aren't topological ancestors
+        # of whichever changeset the experimental bookmark is pointing at
+        client.update('heads(branch(yt) - ::bookmark(experimental))')
+    return dest_repo_path
+
+
+def get_first_commit_after_last_major_release(repo_path):
+    """Returns the SHA1 hash of the first commit to the yt branch that wasn't
+    included in the last tagged release.
+    """
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        tags = sorted([LooseVersion(t[2]) for t in tags])
+        for t in tags[::-1]:
+            if t.version[0:2] != ['yt', '-']:
+                continue
+            if len(t.version) == 4 or t.version[4] == 0:
+                last_major_tag = t
+                break
+        last_before_release = client.log(
+            "last(ancestors(%s) and branch(yt))" % str(last_major_tag))
+        first_after_release = client.log(
+            "first(descendants(%s) and branch(yt) and not %s)"
+            % (last_before_release[0][1], last_before_release[0][1]))
+    return str(first_after_release[0][1][:12])
+
+
+def get_branch_tip(repo_path, branch, exclude=None):
+    """Returns the SHA1 hash of the most recent commit on the given branch"""
+    revset = "head() and branch(%s)" % branch
+    if exclude is not None:
+        revset += "and not %s" % exclude
+    with hglib.open(repo_path) as client:
+        change = client.log(revset)[0][1][:12]
+    return change
+
+
+def get_lineage_between_release_and_tip(repo_path, first, last):
+    """Returns the lineage of changesets that were at one point the public tip"""
+    with hglib.open(repo_path) as client:
+        lineage = client.log("'%s'::'%s' and p1('%s'::'%s') + '%s'"
+                             % (first, last, first, last, last))
+        return lineage
+
+
+def get_pull_requests_since_last_release(repo_path):
+    """Returns a list of pull requests made since the last tagged release"""
+    r = requests.get(MERGED_PR_ENDPOINT)
+    done = False
+    merged_prs = []
+    with hglib.open(repo_path) as client:
+        last_tag = client.log("reverse(tag())")[0]
+    while not done:
+        if r.status_code != 200:
+            raise RuntimeError
+        data = r.json()
+        prs = data['values']
+        for pr in prs:
+            activity = requests.get(pr['links']['activity']['href']).json()
+            merge_date = None
+            for action in activity['values']:
+                if 'update' in action and action['update']['state'] == 'MERGED':
+                    merge_date = action['update']['date']
+                    merge_date = merge_date.split('.')[0]
+                    timestamp = mktime(strptime(merge_date, "%Y-%m-%dT%H:%M:%S"))
+                    merge_date = datetime.fromtimestamp(timestamp)
+                    break
+            if merge_date is None:
+                break
+            if merge_date < last_tag[6]:
+                done = True
+                break
+            merged_prs.append(pr)
+        r = requests.get(data['next'])
+    return merged_prs
+
+
+def cache_commit_data(prs):
+    """Avoid repeated calls to bitbucket API to get the list of commits per PR"""
+    commit_data = {}
+    for pr in prs:
+        data = requests.get(pr['links']['commits']['href']).json()
+        if data.keys() == [u'error']:
+            # this happens when commits have been stripped, e.g.
+            # https://bitbucket.org/yt_analysis/yt/pull-requests/1641
+            continue
+        done = False
+        commits = []
+        while not done:
+            commits.extend(data['values'])
+            if 'next' not in data:
+                done = True
+            else:
+                data = requests.get(data['next']).json()
+        commit_data[pr['id']] = commits
+    return commit_data
+
+
+def find_commit_in_prs(needle, commit_data, prs):
+    """Finds the commit `needle` PR in the commit_data dictionary
+
+    If found, returns the pr the needle commit is in. If the commit was not
+    part of the PRs in the dictionary, returns None.
+    """
+    for pr_id in commit_data:
+        commits = commit_data[pr_id]
+        for commit in commits:
+            if commit['hash'] == needle[1]:
+                pr = [pr for pr in prs if pr['id'] == pr_id][0]
+                return pr
+    return None
+
+
+def find_merge_commit_in_prs(needle, prs):
+    """Find the merge commit `needle` in the list of `prs`
+
+    If found, returns the pr the merge commit comes from. If not found, return
+    None
+    """
+    for pr in prs[::-1]:
+        if pr['merge_commit'] is not None:
+            if pr['merge_commit']['hash'] == needle[1][:12]:
+                return pr
+    return None
+
+
+def create_commits_to_prs_mapping(linege, prs):
+    """create a mapping from commits to the pull requests that the commit is
+    part of
+    """
+    commits_to_prs = {}
+    # make a copy of this list to avoid side effects from calling this function
+    my_prs = list(prs)
+    commit_data = cache_commit_data(my_prs)
+    for commit in lineage:
+        cset_hash = commit[1]
+        message = commit[5]
+        if message.startswith('Merged in') and '(pull request #' in message:
+            pr = find_merge_commit_in_prs(commit, my_prs)
+            if pr is None:
+                continue
+            commits_to_prs[cset_hash] = pr
+            # Since we know this PR won't have another commit associated with it,
+            # remove from global list to reduce number of network accesses
+            my_prs.remove(commits_to_prs[cset_hash])
+        else:
+            pr = find_commit_in_prs(commit, commit_data, my_prs)
+            commits_to_prs[cset_hash] = pr
+    return commits_to_prs
+
+
+def invert_commits_to_prs_mapping(commits_to_prs):
+    """invert the mapping from individual commits to pull requests"""
+    inv_map = {}
+    for k, v in commits_to_prs.iteritems():
+        # can't save v itself in inv_map since it's an unhashable dictionary
+        if v is not None:
+            created_date = v['created_on'].split('.')[0]
+            timestamp = mktime(strptime(created_date, "%Y-%m-%dT%H:%M:%S"))
+            created_date = datetime.fromtimestamp(timestamp)
+            pr_desc = (v['id'], v['title'], created_date,
+                       v['links']['html']['href'], v['description'])
+        else:
+            pr_desc = None
+        inv_map[pr_desc] = inv_map.get(pr_desc, [])
+        inv_map[pr_desc].append(k)
+    return inv_map
+
+
+def get_last_descendant(repo_path, commit):
+    """get the most recent descendant of a commit"""
+    with hglib.open(repo_path) as client:
+        com = client.log('last(%s::)' % commit)
+    return com[0][1][:12]
+
+def screen_already_backported(repo_path, inv_map):
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        major_tags = [t for t in tags if t[2].endswith('.0')]
+        most_recent_major_tag_name = major_tags[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_major_tag_name)
+        prs_to_screen = []
+        for pr in inv_map:
+            for commit in lineage:
+                if commit[5].startswith('Backporting PR #%s' % pr[0]):
+                    prs_to_screen.append(pr)
+        for pr in prs_to_screen:
+            del inv_map[pr]
+        return inv_map
+
+def commit_already_on_stable(repo_path, commit):
+    with hglib.open(repo_path) as client:
+        commit_info = client.log(commit)[0]
+        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_tag_name)
+        # if there is a stable commit with the same commit message,
+        # it's been grafted
+        if any([commit_info[5] == c[5] for c in lineage]):
+            return True
+        return False
+
+def backport_pr_commits(repo_path, inv_map, last_stable, prs):
+    """backports pull requests to the stable branch.
+
+    Accepts a dictionary mapping pull requests to a list of commits that
+    are in the pull request.
+    """
+    pr_list = inv_map.keys()
+    pr_list = sorted(pr_list, key=lambda x: x[2])
+    for pr_desc in pr_list:
+        merge_warn = False
+        merge_commits = []
+        pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
+        data = requests.get(pr['links']['commits']['href']).json()
+        commits = data['values']
+        while 'next' in data:
+            data = requests.get(data['next']).json()
+            commits.extend(data['values'])
+        commits = [com['hash'][:12] for com in commits]
+        with hglib.open(repo_path) as client:
+            for com in commits:
+                if client.log('merge() and %s' % com) != []:
+                    merge_warn = True
+                    merge_commits.append(com)
+        if len(commits) > 1:
+            revset = " | ".join(commits)
+            revset = '"%s"' % revset
+            message = "Backporting PR #%s %s" % \
+                (pr['id'], pr['links']['html']['href'])
+            dest = get_last_descendant(repo_path, last_stable)
+            message = \
+                "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
+                (revset, message, dest)
+            message += "hg update stable\n\n"
+            if merge_warn is True:
+                if len(merge_commits) > 1:
+                    merge_commits = ", ".join(merge_commits)
+                else:
+                    merge_commits = merge_commits[0]
+                message += \
+                    "WARNING, PULL REQUEST CONTAINS MERGE COMMITS, CONSIDER\n" \
+                    "BACKPORTING BY HAND TO AVOID BACKPORTING UNWANTED CHANGES\n"
+                message += \
+                    "Merge commits are %s\n\n" % merge_commits
+        else:
+            if commit_already_on_stable(repo_path, commits[0]) is True:
+                continue
+            message = "hg graft %s\n" % commits[0]
+        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+        print "To backport, issue the following command(s):\n"
+        print message
+        raw_input('Press any key to continue')
+
+
+if __name__ == "__main__":
+    print ""
+    print "Gathering PR information, this may take a minute."
+    print "Don't worry, yt loves you."
+    print ""
+    repo_path = clone_new_repo()
+    try:
+        last_major_release = get_first_commit_after_last_major_release(repo_path)
+        last_dev = get_branch_tip(repo_path, 'yt', 'experimental')
+        last_stable = get_branch_tip(repo_path, 'stable')
+        lineage = get_lineage_between_release_and_tip(
+            repo_path, last_major_release, last_dev)
+        prs = get_pull_requests_since_last_release(repo_path)
+        commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
+        inv_map = invert_commits_to_prs_mapping(commits_to_prs)
+        # for now, ignore commits that aren't part of a pull request since
+        # the last bugfix release. These are mostly commits in pull requests
+        # from before the last bugfix release but might include commits that
+        # were pushed directly to the repo.
+        del inv_map[None]
+
+        inv_map = screen_already_backported(repo_path, inv_map)
+        print "In another terminal window, navigate to the following path:"
+        print "%s" % repo_path
+        raw_input("Press any key to continue")
+        backport_pr_commits(repo_path, inv_map, last_stable, prs)
+        raw_input(
+            "Now you need to push your backported changes. The temporary\n"
+            "repository currently being used will be deleted as soon as you\n"
+            "press any key.")
+    finally:
+        shutil.rmtree(repo_path)

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 scripts/yt_lodgeit.py
--- a/scripts/yt_lodgeit.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-    LodgeIt!
-    ~~~~~~~~
-
-    A script that pastes stuff into the yt-project pastebin on
-    paste.yt-project.org.
-
-    Modified (very, very slightly) from the original script by the authors
-    below.
-
-    .lodgeitrc / _lodgeitrc
-    -----------------------
-
-    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
-    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
-
-        language=default_language
-        clipboard=true/false
-        open_browser=true/false
-        encoding=fallback_charset
-
-    :authors: 2007-2008 Georg Brandl <georg at python.org>,
-              2006 Armin Ronacher <armin.ronacher at active-4.com>,
-              2006 Matt Good <matt at matt-good.net>,
-              2005 Raphael Slinckx <raphael at slinckx.net>
-"""
-import os
-import sys
-from optparse import OptionParser
-
-
-SCRIPT_NAME = os.path.basename(sys.argv[0])
-VERSION = '0.3'
-SERVICE_URL = 'http://paste.yt-project.org/'
-SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
-                'open_browser']
-
-# global server proxy
-_xmlrpc_service = None
-
-
-def fail(msg, code):
-    """Bail out with an error message."""
-    print >> sys.stderr, 'ERROR: %s' % msg
-    sys.exit(code)
-
-
-def load_default_settings():
-    """Load the defaults from the lodgeitrc file."""
-    settings = {
-        'language':     None,
-        'clipboard':    True,
-        'open_browser': False,
-        'encoding':     'iso-8859-15'
-    }
-    rcfile = None
-    if os.name == 'posix':
-        rcfile = os.path.expanduser('~/.lodgeitrc')
-    elif os.name == 'nt' and 'APPDATA' in os.environ:
-        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
-    if rcfile:
-        try:
-            f = open(rcfile)
-            for line in f:
-                if line.strip()[:1] in '#;':
-                    continue
-                p = line.split('=', 1)
-                if len(p) == 2:
-                    key = p[0].strip().lower()
-                    if key in settings:
-                        if key in ('clipboard', 'open_browser'):
-                            settings[key] = p[1].strip().lower() in \
-                                            ('true', '1', 'on', 'yes')
-                        else:
-                            settings[key] = p[1].strip()
-            f.close()
-        except IOError:
-            pass
-    settings['tags'] = []
-    settings['title'] = None
-    return settings
-
-
-def make_utf8(text, encoding):
-    """Convert a text to UTF-8, brute-force."""
-    try:
-        u = unicode(text, 'utf-8')
-        uenc = 'utf-8'
-    except UnicodeError:
-        try:
-            u = unicode(text, encoding)
-            uenc = 'utf-8'
-        except UnicodeError:
-            u = unicode(text, 'iso-8859-15', 'ignore')
-            uenc = 'iso-8859-15'
-    try:
-        import chardet
-    except ImportError:
-        return u.encode('utf-8')
-    d = chardet.detect(text)
-    if d['encoding'] == uenc:
-        return u.encode('utf-8')
-    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
-
-
-def get_xmlrpc_service():
-    """Create the XMLRPC server proxy and cache it."""
-    global _xmlrpc_service
-    import xmlrpclib
-    if _xmlrpc_service is None:
-        try:
-            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
-                                                    allow_none=True)
-        except Exception, err:
-            fail('Could not connect to Pastebin: %s' % err, -1)
-    return _xmlrpc_service
-
-
-def copy_url(url):
-    """Copy the url into the clipboard."""
-    # try windows first
-    try:
-        import win32clipboard
-    except ImportError:
-        # then give pbcopy a try.  do that before gtk because
-        # gtk might be installed on os x but nobody is interested
-        # in the X11 clipboard there.
-        from subprocess import Popen, PIPE
-        try:
-            client = Popen(['pbcopy'], stdin=PIPE)
-        except OSError:
-            try:
-                import pygtk
-                pygtk.require('2.0')
-                import gtk
-                import gobject
-            except ImportError:
-                return
-            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
-            gobject.idle_add(gtk.main_quit)
-            gtk.main()
-        else:
-            client.stdin.write(url)
-            client.stdin.close()
-            client.wait()
-    else:
-        win32clipboard.OpenClipboard()
-        win32clipboard.EmptyClipboard()
-        win32clipboard.SetClipboardText(url)
-        win32clipboard.CloseClipboard()
-
-
-def open_webbrowser(url):
-    """Open a new browser window."""
-    import webbrowser
-    webbrowser.open(url)
-
-
-def language_exists(language):
-    """Check if a language alias exists."""
-    xmlrpc = get_xmlrpc_service()
-    langs = xmlrpc.pastes.getLanguages()
-    return language in langs
-
-
-def get_mimetype(data, filename):
-    """Try to get MIME type from data."""
-    try:
-        import gnomevfs
-    except ImportError:
-        from mimetypes import guess_type
-        if filename:
-            return guess_type(filename)[0]
-    else:
-        if filename:
-            return gnomevfs.get_mime_type(os.path.abspath(filename))
-        return gnomevfs.get_mime_type_for_data(data)
-
-
-def print_languages():
-    """Print a list of all supported languages, with description."""
-    xmlrpc = get_xmlrpc_service()
-    languages = xmlrpc.pastes.getLanguages().items()
-    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
-    print 'Supported Languages:'
-    for alias, name in languages:
-        print '    %-30s%s' % (alias, name)
-
-
-def download_paste(uid):
-    """Download a paste given by ID."""
-    xmlrpc = get_xmlrpc_service()
-    paste = xmlrpc.pastes.getPaste(uid)
-    if not paste:
-        fail('Paste "%s" does not exist.' % uid, 5)
-    print paste['code'].encode('utf-8')
-
-
-def create_paste(code, language, filename, mimetype, private):
-    """Create a new paste."""
-    xmlrpc = get_xmlrpc_service()
-    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
-                                private)
-    if not rv:
-        fail('Could not create paste. Something went wrong '
-             'on the server side.', 4)
-    return rv
-
-
-def compile_paste(filenames, langopt):
-    """Create a single paste out of zero, one or multiple files."""
-    def read_file(f):
-        try:
-            return f.read()
-        finally:
-            f.close()
-    mime = ''
-    lang = langopt or ''
-    if not filenames:
-        data = read_file(sys.stdin)
-        if not langopt:
-            mime = get_mimetype(data, '') or ''
-        fname = ""
-    elif len(filenames) == 1:
-        fname = filenames[0]
-        data = read_file(open(filenames[0], 'rb'))
-        if not langopt:
-            mime = get_mimetype(data, filenames[0]) or ''
-    else:
-        result = []
-        for fname in filenames:
-            data = read_file(open(fname, 'rb'))
-            if langopt:
-                result.append('### %s [%s]\n\n' % (fname, langopt))
-            else:
-                result.append('### %s\n\n' % fname)
-            result.append(data)
-            result.append('\n\n')
-        data = ''.join(result)
-        lang = 'multi'
-    return data, lang, fname, mime
-
-
-def main():
-    """Main script entry point."""
-
-    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
-             'Read the files and paste their contents to %s.\n'
-             'If no file is given, read from standard input.\n'
-             'If multiple files are given, they are put into a single paste.'
-             % SERVICE_URL)
-    parser = OptionParser(usage=usage)
-
-    settings = load_default_settings()
-
-    parser.add_option('-v', '--version', action='store_true',
-                      help='Print script version')
-    parser.add_option('-L', '--languages', action='store_true', default=False,
-                      help='Retrieve a list of supported languages')
-    parser.add_option('-l', '--language', default=settings['language'],
-                      help='Used syntax highlighter for the file')
-    parser.add_option('-e', '--encoding', default=settings['encoding'],
-                      help='Specify the encoding of a file (default is '
-                           'utf-8 or guessing if available)')
-    parser.add_option('-b', '--open-browser', dest='open_browser',
-                      action='store_true',
-                      default=settings['open_browser'],
-                      help='Open the paste in a web browser')
-    parser.add_option('-p', '--private', action='store_true', default=False,
-                      help='Paste as private')
-    parser.add_option('--no-clipboard', dest='clipboard',
-                      action='store_false',
-                      default=settings['clipboard'],
-                      help="Don't copy the url into the clipboard")
-    parser.add_option('--download', metavar='UID',
-                      help='Download a given paste')
-
-    opts, args = parser.parse_args()
-
-    # special modes of operation:
-    # - paste script version
-    if opts.version:
-        print '%s: version %s' % (SCRIPT_NAME, VERSION)
-        sys.exit()
-    # - print list of languages
-    elif opts.languages:
-        print_languages()
-        sys.exit()
-    # - download Paste
-    elif opts.download:
-        download_paste(opts.download)
-        sys.exit()
-
-    # check language if given
-    if opts.language and not language_exists(opts.language):
-        fail('Language %s is not supported.' % opts.language, 3)
-
-    # load file(s)
-    try:
-        data, language, filename, mimetype = compile_paste(args, opts.language)
-    except Exception, err:
-        fail('Error while reading the file(s): %s' % err, 2)
-    if not data:
-        fail('Aborted, no content to paste.', 4)
-
-    # create paste
-    code = make_utf8(data, opts.encoding)
-    pid = create_paste(code, language, filename, mimetype, opts.private)
-    url = '%sshow/%s/' % (SERVICE_URL, pid)
-    print url
-    if opts.open_browser:
-        open_webbrowser(url)
-    if opts.clipboard:
-        copy_url(url)
-
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r 2f95674671178a4868f4f6aeff56b041127d189e -r 2e98bc17afff89f7b63aa763b45a82bb8b9fac97 setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@
     config.make_config_py()
     # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
-    config.add_scripts("scripts/*")
+    config.add_scripts("scripts/iyt")
 
     return config

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list