[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Jul 11 16:59:53 PDT 2012


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/22f16945a41f/
changeset:   22f16945a41f
branch:      yt
user:        jzuhone
date:        2012-07-06 23:03:12
summary:     No need to call self._update_plots() twice
affected #:  1 file

diff -r aaea01c1f9cd1f5147d804cf3ecc8245c21255a9 -r 22f16945a41fcc0a245904ee10ac2639ac06a07c yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -517,7 +517,7 @@
 
     @invalidate_data
     def refresh(self):
-        self._setup_plots()
+        pass
 
 class PWViewer(PlotWindow):
     """A viewer for PlotWindows.



https://bitbucket.org/yt_analysis/yt/changeset/dd041de2ab1e/
changeset:   dd041de2ab1e
branch:      yt
user:        jzuhone
date:        2012-07-10 01:44:14
summary:     Merged changes from yt_analysis/yt
affected #:  12 files

diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -216,7 +216,7 @@
                                     lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
                     # Widen wavelength window until optical depth reaches a max value at the ends.
                     if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
-                      (left_index[lixel] == 0 and right_index[lixel] == self.n_lambda - 1):
+                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
                         break
                     my_bin_ratio *= 2
                     left_index[lixel]  = (center_bins[lixel] -


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -53,7 +53,7 @@
     pasteboard_repo = '',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://data.yt-project.org/upload',
+    hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2474,6 +2474,8 @@
     def get_data(self, fields=None, in_grids=False, force_particle_read = False):
         if self._grids == None:
             self._get_list_of_grids()
+        if len(self._grids) == 0:
+            raise YTNoDataInObjectError(self)
         points = []
         if not fields:
             fields_to_get = self.fields[:]
@@ -3584,8 +3586,8 @@
         fields : array_like, optional
             A list of fields that you'd like pre-generated for your object
 
-        Example
-        -------
+        Examples
+        --------
         cube = pf.h.covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
                                   right_edge=[1.0, 1.0, 1.0],
                                   dims=[128, 128, 128])




diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -107,9 +107,15 @@
             self.grid_left_edge[:,i] = DLE[i]
             self.grid_right_edge[:,i] = DRE[i]
         # We only go up to ND for 2D datasets
-        self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
-        self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
-        
+        if (f["/bounding box"][:,:,0].shape[1] == ND) :
+            #FLASH 2/3 2D data
+            self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
+            self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
+        else:
+            self.grid_left_edge[:,:] = f["/bounding box"][:,:,0]
+            self.grid_right_edge[:,:] = f["/bounding box"][:,:,1]
+            
+
         # Move this to the parameter file
         try:
             nxb = pf.parameters['nxb']


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -40,9 +40,12 @@
 import base64
 import imp
 import threading
-import Pyro4
 import Queue
 import zipfile
+try:
+    import Pyro4
+except ImportError:
+    pass
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger, ufstring


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -28,6 +28,7 @@
 from yt.startup_tasks import parser, subparsers
 from yt.mods import *
 from yt.funcs import *
+from yt.utilities.minimal_representation import MinimalProjectDescription
 import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
 import urllib, urllib2, base64
 
@@ -147,7 +148,7 @@
                    help="Number of dex above min to display"),
     width   = dict(short="-w", long="--width",
                    action="store", type=float,
-                   dest="width", default=1.0,
+                   dest="width", default=None,
                    help="Width in specified units"),
     unit    = dict(short="-u", long="--unit",
                    action="store", type=str,
@@ -767,6 +768,14 @@
 
     def __call__(self, args):
         import imp
+        api_key = ytcfg.get("yt","hub_api_key")
+        url = ytcfg.get("yt","hub_url")
+        if api_key == '':
+            print
+            print "You must create an API key before uploading."
+            print "https://data.yt-project.org/getting_started.html"
+            print
+            sys.exit(1)
         from mercurial import hg, ui, commands, error, config
         uri = "http://hub.yt-project.org/3rdparty/API/api.php"
         uu = ui.ui()
@@ -860,12 +869,14 @@
                         bb_username, bb_username, bb_repo_name)
             cedit.config.addsource(uu, repo, "default", bb_url)
             commands.push(uu, repo, bb_url)
+            # Now we reset
+            bb_url = "https://bitbucket.org/%s/%s" % (
+                        bb_username, bb_repo_name)
         if bb_url.startswith("bb://"):
             bb_username, bb_repo_name = bb_url.split("/")[-2:]
-            bb_url = "https://%s@bitbucket.org/%s/%s" % (
-                bb_username, bb_username, bb_repo_name)
+            bb_url = "https://bitbucket.org/%s/%s" % (
+                bb_username, bb_repo_name)
         # Now we can submit
-        import xml.etree.ElementTree as etree
         print
         print "Okay.  Now we're ready to submit to the Hub."
         print "Remember, you can go to the Hub at any time at"
@@ -873,58 +884,46 @@
         print
         print "(Especially if you don't have a user yet!  We can wait.)"
         print
-        hub_username = raw_input("What is your Hub username? ")
-        hub_password = getpass.getpass("What is your Hub password? ")
-        data = urllib.urlencode(dict(fn = "list",
-                                     username=hub_username,
-                                     password=hub_password))
-        req = urllib2.Request(uri, data)
-        rv = urllib2.urlopen(req).read()
-        try:
-            cats = etree.fromstring(rv)
-        except:
-            print "I think you entered your password wrong.  Please check!"
-            return
 
-        categories = {}
-
-        for cat in cats.findall("./cate"):
-            cat_id = int(cat.findall("id")[0].text)
-            cat_name = cat.findall("name")[0].text
-            categories[cat_id] = cat_name
-
-        print
-        for i, n in sorted(categories.items()):
-            print "%i. %s" % (i, n)
-        print
-        cat_id = int(raw_input("Which category number does your script fit into? "))
+        categories = {
+            1: "News",
+            2: "Documents",
+            3: "Simulation Management",
+            4: "Data Management",
+            5: "Analysis and Visualization",
+            6: "Paper Repositories",
+            7: "Astrophysical Utilities",
+            8: "yt Scripts"
+        }
+        cat_id = -1
+        while cat_id not in categories:
+            print
+            for i, n in sorted(categories.items()):
+                print "%i. %s" % (i, n)
+            print
+            cat_id = int(raw_input("Which category number does your script fit into? "))
         print
         print "What is the title of your submission? (Usually a repository name) "
         title = raw_input("Title? ")
         print
-        print "What tags should be applied to this submission?  Separate with commas."
-        print "(e.g., enzo, flash, gadget, ramses, nyx, yt, visualization, analysis,"
-        print " utility, cosmology)"
-        print
-        tags = raw_input("Tags? ")
-        print
         print "Give us a very brief summary of the project -- enough to get someone"
         print "interested enough to click the link and see what it's about.  This"
         print "should be a few sentences at most."
         print
         summary = raw_input("Summary? ")
         print
+        print "Is there a URL that you'd like to point the image to?  Just hit"
+        print "enter if no."
+        print
+        image_url = raw_input("Image URL? ").strip()
+        print
         print "Okay, we're going to submit!  Press enter to submit, Ctrl-C to back out."
         print
         loki = raw_input()
 
-        data = urllib.urlencode(dict(fn = "post",
-                                     username=hub_username, password=hub_password,
-                                     url = bb_url, category = cat_id, title = title,
-                                     content = summary, tags = tags))
-        req = urllib2.Request(uri, data)
-        rv = urllib2.urlopen(req).read()
-        print rv
+        mpd = MinimalProjectDescription(title, bb_url, summary, 
+                categories[cat_id], image_url)
+        mpd.upload()
 
 class YTInstInfoCmd(YTCommand):
     name = "instinfo"
@@ -1134,25 +1133,30 @@
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
-        pc=PlotCollection(pf, center=center)
         if args.axis == 4:
             axes = range(3)
         else:
             axes = [args.axis]
         for ax in axes:
             mylog.info("Adding plot for axis %i", ax)
-            if args.projection: pc.add_projection(args.field, ax,
-                                    weight_field=args.weight, center=center)
-            else: pc.add_slice(args.field, ax, center=center)
-            if args.grids: pc.plots[-1].modify["grids"]()
+            if args.projection:
+                plt = ProjectionPlot(pf, ax, args.field, center=center,
+                                     width=args.width,
+                                     weight_field=args.weight)
+            else:
+                plt = SlicePlot(pf, ax, args.field, center=center,
+                                width=args.width)
+            if args.grids:
+                plt.draw_grids()
             if args.time: 
                 time = pf.current_time*pf['Time']*pf['years']
-                pc.plots[-1].modify["text"]((0.2,0.8), 't = %5.2e yr'%time)
-        pc.set_width(args.width, args.unit)
-        pc.set_cmap(args.cmap)
-        if args.zlim: pc.set_zlim(*args.zlim)
-        if not os.path.isdir(args.output): os.makedirs(args.output)
-        pc.save(os.path.join(args.output,"%s" % (pf)))
+                plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
+
+            plt.set_cmap(args.field,args.cmap)
+            if args.zlim:
+                plt.set_zlim(args.field,*args.zlim)
+            if not os.path.isdir(args.output): os.makedirs(args.output)
+            plt.save(os.path.join(args.output,"%s" % (pf)))
 
 class YTRenderCmd(YTCommand):
         


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -204,7 +204,7 @@
 
         for i in range(3):
             ci[i] = (int)((pos[i]-self.LeftEdge[i])/c.dds[i])
-            dp[i] = (pos[i] - self.LeftEdge[i])%(c.dds[i])
+            dp[i] = (pos[i] - ci[i]*c.dds[i] - self.LeftEdge[i])/c.dds[i]
 
         cdef int offset = ci[0] * (c.dims[1] + 1) * (c.dims[2] + 1) \
                           + ci[1] * (c.dims[2] + 1) + ci[2]


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -98,6 +98,8 @@
             gs = getattr(pobj, attr)
         else:
             gs = getattr(pobj._data_source, attr)
+        if len(gs) == 0:
+            raise YTNoDataInObjectError(pobj)
         if hasattr(gs[0], 'proc_num'):
             # This one sort of knows about MPI, but not quite
             self._objs = [g for g in gs if g.proc_num ==


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -152,7 +152,7 @@
         bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
     if transpose:
         for channel in range(bitmap_array.shape[2]):
-            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].transpose()
+            bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
     au.write_png(bitmap_array.copy(), filename)
     return bitmap_array
 


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -275,10 +275,10 @@
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
+            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx + xx0
+            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy + yy0
+            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx + xx0
+            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy + yy0
             verts = na.array(
                     [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
                      (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
@@ -635,6 +635,7 @@
         """
         self.pos = pos
         self.text = text
+        if text_args is None: text_args = {}
         self.text_args = text_args
 
     def __call__(self, plot):


diff -r 22f16945a41fcc0a245904ee10ac2639ac06a07c -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -30,12 +30,14 @@
 from functools import wraps
 
 import numpy as na
+from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
     write_image, apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer
-from .plot_modifications import get_smallest_appropriate_unit
+from .plot_modifications import get_smallest_appropriate_unit, \
+    GridBoundaryCallback, TextLabelCallback
 from .tick_locators import LogLocator, LinearLocator
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 
@@ -72,6 +74,21 @@
 
 field_transforms = {}
 
+class IMPlot(object): pass
+
+class CallbackWrapper(object):
+    def __init__(self, window_plot, frb, field):
+        self.data = frb.data_source
+        self._axes = window_plot.axes
+        self._figure = window_plot.figure
+        if len(self._axes.images) > 0:
+            self.image = self._axes.images[0]
+        self._period = frb.pf.domain_width
+        self.pf = frb.pf
+        self.xlim = (frb.bounds[0], frb.bounds[1])
+        self.ylim = (frb.bounds[2], frb.bounds[3])
+        self._type_name = ''
+
 class FieldTransform(object):
     def __init__(self, name, func, locator):
         self.name = name
@@ -94,8 +111,6 @@
 
 def SlicePlot(pf, axis, fields, center=None, width=None, origin='center-window'):
     r"""
-    SlicePlot(pf, axis, fields, center=None, width=None, origin='center-window')
-
     Given a pf object, an axis to slice along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
@@ -105,48 +120,38 @@
 
     Parameters
     ----------
-    pf : :class:`yt.data_objects.apy.StaticOutput`
-
+    pf : :class:`yt.data_objects.api.StaticOutput`
         This is the parameter file object corresponding to the
         simulation output to be plotted.
-
     axis : int
-
-         An int corresponding to the axis to slice along.  
-	 (0 : x, 1 : y, 2 : z)
-
+        An int corresponding to the axis to slice along.  (0=x, 1=y, 2=z)
     fields : string
-    
-          The name of the field(s) to be plotted.
-
+        The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or
              'center'
+        The coordinate of the center of the image.  If left blanck,
+        the image centers on the location of the maximum density
+        cell.  If set to 'c' or 'center', the plot is centered on
+        the middle of the domain.
+    width : A tuple or a float
+        A tuple containing the width of image and the string key of
+        the unit: (width, 'unit').  If set to a float, code units
+        are assumed
+    origin : A string
+        The location of the origin of the plot coordinate system.
+        Currently, can be set to three options: 'left-domain', corresponding
+        to the bottom-left hand corner of the simulation domain, 'left-window',
+        corresponding to the bottom-left hand cordiner of the plot window, or
+        'center-window' for the center of the plot window.
 
-           The coordinate of the center of the image.  If left blanck,
-           the image centers on the location of the maximum density
-           cell.  If set to 'c' or 'center', the plot is centered on
-           the middle of the domain.
+    Examples
+    --------
 
-    width : A tuple or a float
-    
-           A tuple containing the width of image and the string key of
-           the unit: (width, 'unit').  If set to a float, code units
-           are assumed
+    This is a very simple way of creating a slice plot.
 
-    origin : A string
-
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options:
-
-	    'left-domain':
-	         The bottom-left hand corner of the simulation domain.
-
-            'left-window':
-	         The bottom-left hand cordiner of the plot window.
-
-	    'center-window'
-	         The center of the plot window.
-
+    >>> pf = load('galaxy0030/galaxy0030')
+    >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
+    >>> p.save('sliceplot')
     """
     (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
     slice = pf.h.slice(axis,center[axis],fields=fields)
@@ -155,9 +160,6 @@
 def ProjectionPlot(pf, axis, fields, center=None, width=None,
                    weight_field=None, max_level=None, origin='center-window'):
     r"""
-    ProjectionPlot(pf, axis, fields, center=None, width=None, 
-                   weight_field=None, max_level=None, origin='center-window')
-
     Given a pf object, an axis to project along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
@@ -167,55 +169,48 @@
 
     Parameters
     ----------
-    pf : :class:`yt.data_objects.apy.StaticOutput`
-
+    pf : :class:`yt.data_objects.api.StaticOutput`
         This is the parameter file object corresponding to the
         simulation output to be plotted.
-
     axis : int
-
-         An int corresponding to the axis to project along.  
-	 (0 : x, 1 : y, 2 : z)
-
+        An int corresponding to the axis to slice along.  (0=x, 1=y, 2=z)
     fields : string
-    
-          The name of the field(s) to be plotted.
-
+        The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or
              'center'
+        The coordinate of the center of the image.  If left blanck,
+        the image centers on the location of the maximum density
+        cell.  If set to 'c' or 'center', the plot is centered on
+        the middle of the domain.
+    width : A tuple or a float
+        A tuple containing the width of image and the string key of
+        the unit: (width, 'unit').  If set to a float, code units
+        are assumed
+    origin : A string
+        The location of the origin of the plot coordinate system.
+        Currently, can be set to three options: 'left-domain', corresponding
+        to the bottom-left hand corner of the simulation domain, 'left-window',
+        corresponding to the bottom-left hand cordiner of the plot window, or
+        'center-window' for the center of the plot window.
+    weight_field : string
+        The name of the weighting field.  Set to None for no weight.
+    max_level: int
+        The maximum level to project to.
+    origin : A string
+        The location of the origin of the plot coordinate system.
+        Currently, can be set to three options: 'left-domain', corresponding
+        to the bottom-left hand corner of the simulation domain, 'left-window',
+        corresponding to the bottom-left hand cordiner of the plot window, or
+        'center-window' for the center of the plot window.
 
-           The coordinate of the center of the image.  If left blanck,
-           the image centers on the location of the maximum density
-           cell.  If set to 'c' or 'center', the plot is centered on
-           the middle of the domain.
+    Examples
+    --------
 
-    width : A tuple or a float
-    
-           A tuple containing the width of image and the string key of
-           the unit: (width, 'unit').  If set to a float, code units
-           are assumed
+    This is a very simple way of creating a projection plot.
 
-    weight_field : string
-
-            The name of the weighting field.  Set to None for no weight.
-
-    max_level: int
-
-            The maximum level to project to.
-            
-    origin : string
-
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options:
-
-	    'left-domain':
-	         The bottom-left hand corner of the simulation domain.
-
-            'left-window':
-	         The bottom-left hand cordiner of the plot window.
-
-	    'center-window'
-	         The center of the plot window.
+    >>> pf = load('galaxy0030/galaxy0030')
+    >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
+    >>> p.save('sliceplot')
 
     """
     (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
@@ -224,9 +219,7 @@
 
 def OffAxisSlicePlot(pf, normal, fields, center=None, width=None, north_vector=None):
     r"""
-    SlicePlot(pf, normal, fields, center=None, width=None, north_vector=None)
-
-    Given a pf object, a normal vector defining a slicing plonae, and
+    Given a pf object, a normal vector defining a slicing plane, and
     a field name string, this will return a PWViewrMPL object
     containing the plot.
 
@@ -235,39 +228,27 @@
 
     Parameters
     ----------
-    pf : :class:`yt.data_objects.apy.StaticOutput`
-
+    pf : :class:`yt.data_objects.api.StaticOutput`
         This is the parameter file object corresponding to the
         simulation output to be plotted.
-
     normal : a sequence of floats
-
          The vector normal to the slicing plane.
-
     fields : string
-    
-          The name of the field(s) to be plotted.
-
+        The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or
              'center'
-
-           The coordinate of the center of the image.  If left blanck,
-           the image centers on the location of the maximum density
-           cell.  If set to 'c' or 'center', the plot is centered on
-           the middle of the domain.
-
+        The coordinate of the center of the image.  If left blanck,
+        the image centers on the location of the maximum density
+        cell.  If set to 'c' or 'center', the plot is centered on
+        the middle of the domain.
     width : A tuple or a float
-    
-           A tuple containing the width of image and the string key of
-           the unit: (width, 'unit').  If set to a float, code units
-           are assumed
-
+        A tuple containing the width of image and the string key of
+        the unit: (width, 'unit').  If set to a float, code units
+        are assumed
     north-vector : a sequence of floats
-    
-           A vector defining the 'up' direction in the plot.  This
-           option sets the orientation of the slicing plane.  If not
-           set, an arbitrary grid-aligned north-vector is chosen.
-
+        A vector defining the 'up' direction in the plot.  This
+        option sets the orientation of the slicing plane.  If not
+        set, an arbitrary grid-aligned north-vector is chosen.
     """
     (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
     cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -277,7 +258,7 @@
 
 def GetBoundsAndCenter(axis, center, width, pf):
     if width == None:
-        width = (pf.domain_right_edge - pf.domain_left_edge)
+        width = pf.domain_width.min()
     elif iterable(width):
         w,u = width
         width = w/pf[u]
@@ -464,22 +445,27 @@
             self.ylim = bounds[2:]
             
     @invalidate_data
-    def set_width(self, new_width):
+    def set_width(self, width, unit = '1'):
         """set the width of the plot window
 
         parameters
         ----------
-        new_width : float
-            the width of the image in code units.
+        width : float
+            the width of the image.
+        unit : str
+            the unit the width has been specified in.
+            defaults to code units.
 
         """
         Wx, Wy = self.width
+        width = width / self.pf[unit]
+        
         centerx = self.xlim[0] + Wx*0.5
         centery = self.ylim[0] + Wy*0.5
-        self.xlim = (centerx - new_width/2.,
-                     centerx + new_width/2.)
-        self.ylim = (centery - new_width/2.,
-                     centery + new_width/2.)
+        self.xlim = (centerx - width/2.,
+                     centerx + width/2.)
+        self.ylim = (centery - width/2.,
+                     centery + width/2.)
 
     @invalidate_data
     def set_center(self, new_center):
@@ -495,10 +481,6 @@
         Wy = self.ylim[1] - self.ylim[0]
         return (Wx, Wy)
 
-    # @property
-    # def window(self):
-    #     return self.xlim + self.ylim
-
     @invalidate_data
     def set_antialias(self,aa):
         self.antialias = aa
@@ -517,6 +499,7 @@
 
     @invalidate_data
     def refresh(self):
+        # invalidate_data will take care of everything
         pass
 
 class PWViewer(PlotWindow):
@@ -526,8 +509,13 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
+        self._colormaps = defaultdict(lambda: 'algae')
+        self.zmin = None
+        self.zmax = None
+        self._draw_grids = False
+        self._annotate_text = False
+        self._callbacks = []
         self._field_transform = {}
-        self._colormaps = defaultdict(lambda: 'algae')
         for field in self._frb.data.keys():
             if self.pf.field_info[field].take_log:
                 self._field_transform[field] = log_transform
@@ -565,8 +553,19 @@
         self._colormaps[field] = cmap_name
 
     @invalidate_plot
-    def set_zlim(self):
-        pass
+    def set_zlim(self, field, zmin, zmax):
+        self.zmin = zmin
+        self.zmax = zmax
+
+    @invalidate_plot
+    def draw_grids(self, alpha=1.0, min_pix=1, annotate = False, periodic = True):
+        self._draw_grids = True
+        self.grid_params = (alpha, min_pix, annotate, periodic)
+
+    @invalidate_plot
+    def annotate_text(self, position, message, data_coords = False, text_args = None):
+        self._annotate_text = True
+        self.text_params = (position, message, data_coords, text_args)
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -648,7 +647,8 @@
             extent.extend([self.xlim[i] - yc for i in (0,1)])
             extent = [el*self.pf[md['unit']] for el in extent]
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f])
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+                                          self._colormaps[f], zlim = (self.zmin,self.zmax))
             
             cb = matplotlib.pyplot.colorbar(self.plots[f].image,cax = self.plots[f].cax)
 
@@ -664,11 +664,39 @@
 
             cb.set_label(r'$\rm{'+f.encode('string-escape')+r'}\/\/('+md['units']+r')$')
 
+            if self._draw_grids:
+                self._callbacks.append(GridBoundaryCallback(*self.grid_params))
+
+            if self._annotate_text:
+                self._callbacks.append(TextLabelCallback(*self.text_params))
+                
+            for callback in self._callbacks:
+                cbr = CallbackWrapper(self.plots[f], self._frb, f)
+                callback(cbr)
+
         self._plot_valid = True
 
+    @invalidate_plot
+    def set_cmap(self, field, cmap):
+        if isinstance(cmap, types.StringTypes):
+            if str(cmap) in yt_colormaps:
+                cmap = yt_colormaps[str(cmap)]
+            elif hasattr(matplotlib.cm, cmap):
+                cmap = getattr(matplotlib.cm, cmap)
+        if not is_colormap(cmap) and cmap is not None:
+            raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+        else:
+            self.cmap = cmap
+        self.plots[field].image.set_cmap(cmap)
+
     def save(self,name):
+        axis = axis_names[self.data_source.axis]
+        if 'Slice' in self.data_source.__class__.__name__:
+            type = 'Slice'
+        if 'Proj' in self.data_source.__class__.__name__:
+            type = 'Projection'
         for k,v in self.plots.iteritems():
-            n = "%s_%s" % (name, k)
+            n = "%s_%s_%s_%s" % (name, type, axis, k)
             v.save(n)
 
 class PWViewerRaw(PWViewer):
@@ -858,14 +886,7 @@
             self._field_transform[field] = linear_transform
 
 class PlotMPL(object):
-    """A base class for all yt plots made using matplotl5Bib.
-
-    YtPlot and the classes that derive from it are *by design* limited
-    and designed for rapid, production quality plot production, rather
-    than full generality. If you require more customization of the end
-    result, these objects are designed to return to you the basic data
-    so you the user can insert them into a matplotlib figure on your
-    own outside of the YtPlot class.
+    """A base class for all yt plots made using matplotlib.
 
     """
     datalabel = None
@@ -877,27 +898,19 @@
         self.cax = self.figure.add_axes((.86,.10,.04,.8))
 
     def save(self,name):
-        print "saving plot %s" % name
+        print "saving plot %s.png" % name
         self.figure.savefig('%s.png' % name)
 
 class WindowPlotMPL(PlotMPL):
-    zmin = None
-    zmax = None
-    zlabel = None
+    def __init__(self, data, extent, field_transform, cmap, size=(9,8), zlim = (None, None)):
+        PlotMPL.__init__(self, data, size)
+        self.__init_image(data, extent, field_transform, zlim, cmap)
 
-    def __init__(self, data, extent, field_transform, size=(9,8)):
-        PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform)
-
-    def __init_image(self, data, extent, field_transform):
+    def __init_image(self, data, extent, field_transform, zlim, cmap):
         if (field_transform.name == 'log10'):
-            self.image = self.axes.imshow(data,origin='lower',extent=extent,
-                                          norm=matplotlib.colors.LogNorm())
+            norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
-            self.image = self.axes.imshow(data,origin='lower',extent=extent)
-
-    @invalidate_plot
-    def set_zlim(self, zmin, zmax):
-        self.zmin = zmin
-        self.zmax = zmax
-
+            norm = matplotlib.colors.Normalize()
+        self.image = self.axes.imshow(data, origin='lower', extent = extent,
+                                      norm = norm, vmin = zlim[0], vmax = zlim[1],
+                                      cmap = cmap)



https://bitbucket.org/yt_analysis/yt/changeset/0607bf437017/
changeset:   0607bf437017
branch:      yt
user:        jzuhone
date:        2012-07-11 23:36:44
summary:     Merging
affected #:  9 files

diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -44,4 +44,5 @@
     HOPHaloFinder, \
     FOFHaloFinder, \
     HaloFinder, \
-    LoadHaloes
+    LoadHaloes, \
+    LoadTextHaloes


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -78,7 +78,7 @@
 
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
-        bulk_vel=None, tasks=None, rms_vel=None):
+        bulk_vel=None, tasks=None, rms_vel=None, supp=None):
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -101,6 +101,11 @@
         self.rms_vel = rms_vel
         self.bin_count = None
         self.overdensity = None
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -409,7 +414,7 @@
         period = self.pf.domain_right_edge - \
             self.pf.domain_left_edge
         cm = self.pf["cm"]
-        thissize = max(self.size, self.indices.size)
+        thissize = self.get_size()
         rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
@@ -801,7 +806,7 @@
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
-        e1_vec=None, tilt=None):
+        e1_vec=None, tilt=None, supp=None):
 
         self.pf = pf
         self.gridsize = (self.pf.domain_right_edge - \
@@ -827,6 +832,11 @@
         self.particle_mask = None
         self.ds_sort = None
         self.indices = na.array([])  # Never used for a LoadedHalo.
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -990,6 +1000,54 @@
         r = self.maximum_radius()
         return self.pf.h.sphere(cen, r)
 
+class TextHalo(LoadedHalo):
+    def __init__(self, pf, id, size=None, CoM=None,
+
+        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
+        e1_vec=None, tilt=None, supp=None):
+
+        self.pf = pf
+        self.gridsize = (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
+        self.id = id
+        self.size = size
+        self.CoM = CoM
+        self.max_dens_point = max_dens_point
+        self.group_total_mass = group_total_mass
+        self.max_radius = max_radius
+        self.bulk_vel = bulk_vel
+        self.rms_vel = rms_vel
+        self.mag_A = mag_A
+        self.mag_B = mag_B
+        self.mag_C = mag_C
+        self.e1_vec = e1_vec
+        self.tilt = tilt
+        self.bin_count = None
+        self.overdensity = None
+        self.indices = na.array([])  # Never used for a LoadedHalo.
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
+
+    def __getitem__(self, key):
+        # We'll just pull it from the sphere.
+        return self.get_sphere()[key]
+
+    def maximum_density(self):
+        r"""Undefined for text halos."""
+        return -1
+    
+    def maximum_density_location(self):
+        r"""Undefined, default to CoM"""
+        return self.center_of_mass()
+    
+    def get_size(self):
+        # Have to just get it from the sphere.
+        return self["particle_position_x"].size
+
 
 class HaloList(object):
 
@@ -1514,6 +1572,42 @@
         lines.close()
         return locations
 
+class TextHaloList(HaloList):
+    _name = "Text"
+
+    def __init__(self, pf, fname, columns, comment):
+        ParallelAnalysisInterface.__init__(self)
+        self.pf = pf
+        self._groups = []
+        self._retrieve_halos(fname, columns, comment)
+
+    def _retrieve_halos(self, fname, columns, comment):
+        # First get the halo particulars.
+        lines = file(fname)
+        halo = 0
+        base_set = ['x', 'y', 'z', 'r']
+        keys = columns.keys()
+        extra = (len(keys) > 4)
+        for line in lines:
+            # Skip commented lines.
+            if line[0] == comment: continue
+            line = line.split()
+            x = float(line[columns['x']])
+            y = float(line[columns['y']])
+            z = float(line[columns['z']])
+            r = float(line[columns['r']])
+            cen = na.array([x, y, z])
+            # Now we see if there's anything else.
+            if extra:
+                temp_dict = {}
+                for key in columns:
+                    if key not in base_set:
+                        val = float(line[columns[key]])
+                        temp_dict[key] = val
+            self._groups.append(TextHalo(self.pf, halo,
+                CoM = cen, max_radius = r, supp = temp_dict))
+            halo += 1
+
 
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     _name = "parallelHOP"
@@ -2497,3 +2591,39 @@
         """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
+
+class LoadTextHaloes(GenericHaloFinder, TextHaloList):
+    def __init__(self, pf, filename, columns, comment = "#"):
+        r"""Load a text file of halos.
+        
+        Like LoadHaloes, but when all that is available is a plain
+        text file. This assumes the text file has the 3-positions of halos
+        along with a radius. The halo objects created are spheres.
+
+        Parameters
+        ----------
+        fname : String
+            The name of the text file to read in.
+        
+        columns : dict
+            A dict listing the column name : column number pairs for data
+            in the text file. It is zero-based (like Python).
+            An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
+            Any column name outside of ['x', 'y', 'z', 'r'] will be attached
+            to each halo object in the supplementary dict 'supp'. See
+            example.
+        
+        comment : String
+            If the first character of a line is equal to this, the line is
+            skipped. Default = "#".
+
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadTextHaloes(pf, "list.txt",
+            {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
+            comment = ";")
+        >>> halos[0].supp['m']
+            3.28392048e14
+        """
+        TextHaloList.__init__(self, pf, filename, columns, comment)


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,6 +33,8 @@
 
 import time
 import numpy as na
+import numpy.linalg as linalg
+import collections
 
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
@@ -41,7 +43,8 @@
 
 debug = True
 
-def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
+def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
+        debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -54,12 +57,14 @@
 
     Parameters
     ----------
-    pf : `StaticOutput`
-        The parameter file to convert.
-    fn : string
-        The filename of the output FITS file.
-    dle : The domain left edge to extract
-    dre : The domain rght edge to extract
+    pf      : `StaticOutput`
+                The parameter file to convert.
+    fn      : string
+                The filename of the output FITS file.
+    fc      : array
+                The center of the extraction region
+    fwidth  : array  
+                Ensure this radius around the center is enclosed
         Array format is (nx,ny,nz) where each element is floating point
         in unitary position units where 0 is leftmost edge and 1
         the rightmost. 
@@ -72,33 +77,123 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
+
+    fc = na.array(fc)
+    fwidth = na.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
-    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
-    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    ile,ire,super_level,ncells_wide= \
+            round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
+
+    assert na.all((ile-ire)==(ile-ire)[0])
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
     fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
-    mylog.info("rounding specified region:")
-    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
-    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
     mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+                                           dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
     #For every leaf (not-refined) cell we have a column n GRIDDATA
     #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
     #since the octree always starts with one cell, an our 0-level mesh
-    #may have many cells, we must #create the octree region sitting 
+    #may have many cells, we must create the octree region sitting 
     #ontop of the first mesh by providing a negative level
-    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
+    output, refinement,dd,nleaf = prepare_octree(pf,ile,start_level=super_level,
+            debug=debug,dd=dd,center=fc)
 
-    #Create a list of the star particle properties in PARTICLE_DATA
-    #Include ID, parent-ID, position, velocity, creation_mass, 
-    #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
+    create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
+    return fle,fre,ile,ire,dd,nleaf
 
-def prepare_octree(pf,ile,start_level=0):
+def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
+                                        halo_list,domains_list=None,**kwargs):
+    """
+    Using the center of mass and the virial radius
+    for a halo, calculate the regions to extract for sunrise.
+    The regions are defined on the root grid, and so individual
+    octs may span a large range encompassing many halos
+    and subhalos. Instead of repeating the oct extraction for each
+    halo, arrange halos such that we only calculate what we need to.
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file to convert. We use the root grid to specify the domain.
+    fni : string
+        The filename of the output FITS file, but depends on the domain. The
+        dle and dre are appended to the name.
+    particle_type : int
+        The particle index for stars
+    halo_list : list of halo objects
+        The halo list objects must have halo.CoM and halo.Rvir,
+        both of which are assumed to be in unitary length units.
+    frvir (optional) : float
+        Ensure that CoM +/- frvir*Rvir is contained within each domain
+    domains_list (optiona): dict of halos
+        Organize halos into a dict of domains. Keys are DLE/DRE tuple
+        values are a list of halos
+    """
+    dn = pf.domain_dimensions
+    if domains_list is None:
+        domains_list = domains_from_halos(pf,halo_list,**kwargs)
+    if fni.endswith('.fits'):
+        fni = fni.replace('.fits','')
+
+    ndomains_finished = 0
+    for (num_halos, domain, halos) in domains_list:
+        dle,dre = domain
+        print 'exporting: '
+        print "[%03i %03i %03i] -"%tuple(dle),
+        print "[%03i %03i %03i] "%tuple(dre),
+        print " with %i halos"%num_halos
+        dle,dre = domain
+        dle, dre = na.array(dle),na.array(dre)
+        fn = fni 
+        fn += "%03i_%03i_%03i-"%tuple(dle)
+        fn += "%03i_%03i_%03i"%tuple(dre)
+        fnf = fn + '.fits'
+        fnt = fn + '.halos'
+        if os.path.exists(fnt):
+            os.remove(fnt)
+        fh = open(fnt,'w')
+        for halo in halos:
+            fh.write("%i "%halo.ID)
+            fh.write("%6.6e "%(halo.CoM[0]*pf['kpc']))
+            fh.write("%6.6e "%(halo.CoM[1]*pf['kpc']))
+            fh.write("%6.6e "%(halo.CoM[2]*pf['kpc']))
+            fh.write("%6.6e "%(halo.Mvir))
+            fh.write("%6.6e \n"%(halo.Rvir*pf['kpc']))
+        fh.close()
+        export_to_sunrise(pf, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
+        ndomains_finished +=1
+
+def domains_from_halos(pf,halo_list,frvir=0.15):
+    domains = {}
+    dn = pf.domain_dimensions
+    for halo in halo_list:
+        fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
+        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
+        if (dle,dre) in domains.keys():
+            domains[(dle,dre)] += halo,
+        else:
+            domains[(dle,dre)] = [halo,]
+    #for niceness, let's process the domains in order of 
+    #the one with the most halos
+    domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
+    domains_list.sort() 
+    domains_list.reverse() #we want the most populated domains first
+    domains_limits = [d[1] for d in domains_list]
+    domains_halos  = [d[2] for d in domains_list]
+    return domains_list
+
+def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
     add_fields() #add the metal mass field that sunrise wants
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
@@ -106,7 +201,9 @@
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    dd = pf.h.all_data()
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -146,7 +243,7 @@
     pbar.finish()
     
     #create the octree grid list
-    oct_list =  amr_utils.OctreeGridList(grids)
+    #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
     o_length = na.sum(levels_all.values())
@@ -156,115 +253,102 @@
     levels   = na.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
-    refined[0] = 1 #introduce the first cell as divided
-    levels[0]  = start_level-1 #introduce the first cell as divided
-    pos.refined_pos += 1
+    start_time = time.time()
+    if debug:
+        if center is not None: 
+            c = center*pf['kpc']
+        else:
+            c = ile*1.0/pf.domain_dimensions*pf['kpc']
+        printing = lambda x: print_oct(x,pf['kpc'],c)
+    else:
+        printing = None
+    pbar = get_pbar("Building Hilbert DFO octree",len(refined))
     RecurseOctreeDepthFirstHilbert(
-            ile[0],ile[1],ile[2],
-            pos,0, hs, 
+            ile,
+            pos,
+            grids[0], #we always start on the root grid
+            hs, 
             output,refined,levels,
             grids,
             start_level,
-            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
-            physical_center = ile,
-            #physical_width  = pf['kpc'])
-            physical_width  = pf.domain_dimensions)
+            debug=printing,
+            tracker=pbar)
+    pbar.finish()
     #by time we get it here the 'current' position is actually 
     #for the next spot, so we're off by 1
+    print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
-    return output,refined
+    return output,refined,dd,pos.refined_pos
 
-def print_row(level,ple,pre,pw,pc,hs):
-    print level, 
-    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
-    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
-    print hs.dim, hs.sgn
+def print_oct(data,nd=None,nc=None):
+    ci = data['cell_index']
+    l  = data['level']
+    g  = data['grid']
+    fle = g.left_edges+g.dx*ci
+    fre = g.left_edges+g.dx*(ci+1)
+    if nd is not None:
+        fle *= nd
+        fre *= nd
+        if nc is not None:
+            fle -= nc
+            fre -= nc
+    txt  = '%1i '
+    txt += '%1.3f '*3+'- '
+    txt += '%1.3f '*3
+    print txt%((l,)+tuple(fle)+tuple(fre))
 
-def print_child(level,grid,i,j,k,pw,pc):
-    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
-    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
-    print level, 
-    print '%1.5f %1.5f %1.5f '%tuple(ple),
-    print '%1.5f %1.5f %1.5f '%tuple(pre)
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+                            pos, #the output hydro data position and refinement position
+                            grid,  #grid that this oct lives on (not its children)
+                            hilbert,  #the hilbert state
+                            output, #holds the hydro data
+                            refined, #holds the refinement status  of Octs, 0s and 1s
+                            levels, #For a given Oct, what is the level
+                            grids, #list of all patch grids available to us
+                            level, #starting level of the oct (not the children)
+                            debug=None,tracker=True):
+    if tracker is not None:
+        if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
+    if debug is not None: 
+        debug(vars())
+    child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
+    #record the refinement state
+    refined[pos.refined_pos] = child_grid_index!=-1
+    levels[pos.output_pos]  = level
+    pos.refined_pos+= 1 
+    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+        #then we have hit a leaf cell; write it out
+        for field_index in range(grid.fields.shape[0]):
+            output[pos.output_pos,field_index] = \
+                    grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
+        pos.output_pos+= 1 
+    else:
+        #find the grid we descend into
+        #then find the eight cells we break up into
+        subgrid = grids[child_grid_index]
+        #calculate the floating point LE of the children
+        #then translate onto the subgrid integer index 
+        parent_fle  = grid.left_edges + cell_index*grid.dx
+        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        for i, (vertex,hilbert_child) in enumerate(hilbert):
+            #vertex is a combination of three 0s and 1s to 
+            #denote each of the 8 octs
+            if level < 0:
+                subgrid = grid #we don't actually descend if we're a superlevel
+                child_ile = cell_index + vertex*2**(-level)
+            else:
+                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = child_ile.astype('int')
+            RecurseOctreeDepthFirstHilbert(child_ile,pos,
+                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
+                    debug=debug,tracker=tracker)
 
-def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
-                            curpos, gi, 
-                            hs,
-                            output,
-                            refined,
-                            levels,
-                            grids,
-                            level,
-                            physical_center=None,
-                            physical_width=None,
-                            printr=False):
-    grid = grids[gi]
-    m = 2**(-level-1) if level < 0 else 1
-    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
-    pre = ple+grid.dx*m
-    if printr:
-        print_row(level,ple,pre,physical_width,physical_center,hs)
 
-    #here we go over the 8 octants
-    #in general however, a mesh cell on this level
-    #may have more than 8 children on the next level
-    #so we find the int float center (cxyz) of each child cell
-    # and from that find the child cell indices
-    for iv, (vertex,hs_child) in enumerate(hs):
-        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
-        #negative level indicates that we need to build a super-octree
-        if level < 0: 
-            #print ' '
-            #we are not on the root grid yet, but this is 
-            #how many equivalent root grid cells we would have
-            #level -1 means our oct grid's children are the same size
-            #as the root grid (hence the -level-1)
-            dx = 2**(-level-1) #this is the child width 
-            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
-            #we always refine the negative levels
-            refined[curpos.refined_pos] = 1
-            levels[curpos.refined_pos] = level
-            curpos.refined_pos += 1
-            RecurseOctreeDepthFirstHilbert(i, j, k,
-                                curpos, 0, hs_child, output, refined, levels, grids,
-                                level+1,
-                                physical_center=physical_center,
-                                physical_width=physical_width,)
-        else:
-            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
-            ci = grid.child_indices[i,j,k] #is this oct subdivided?
-            if ci == -1:
-                for fi in range(grid.fields.shape[0]):
-                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
-                refined[curpos.refined_pos] = 0
-                levels[curpos.refined_pos] = level
-                curpos.output_pos += 1 #position updated after write
-                curpos.refined_pos += 1
-                if printr:
-                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
-            else:
-                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
-                cy = (grid.left_edges[1] + j*grid.dx[0])
-                cz = (grid.left_edges[2] + k*grid.dx[0])
-                refined[curpos.refined_pos] = 1
-                levels[curpos.refined_pos] = level
-                curpos.refined_pos += 1 #position updated after write
-                child_grid = grids[ci]
-                child_dx = child_grid.dx[0]
-                child_leftedges = child_grid.left_edges
-                child_i = int((cx - child_leftedges[0])/child_dx)
-                child_j = int((cy - child_leftedges[1])/child_dx)
-                child_k = int((cz - child_leftedges[2])/child_dx)
-                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
-                                    curpos, ci, hs_child, output, refined, levels, grids,
-                                    level+1,
-                                    physical_center=physical_center,
-                                    physical_width=physical_width)
 
-def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
 
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
@@ -351,25 +435,66 @@
     x+=1 
     return x
 
-def round_nearest_edge(pf,dle,dre):
+def round_ncells_wide(dds,fle,fre,nwide=None):
+    fc = (fle+fre)/2.0
+    assert na.all(fle < fc)
+    assert na.all(fre > fc)
+    ic = na.rint(fc*dds) #nearest vertex to the center
+    ile,ire = ic.astype('int'),ic.astype('int')
+    cfle,cfre = fc.copy(),fc.copy()
+    idx = na.array([0,0,0]) #just a random non-equal array
+    width = 0.0
+    if nwide is None:
+        #expand until borders are included and
+        #we have an equaly-sized, non-zero box
+        idxq,out=False,True
+        while not out or not idxq:
+            cfle,cfre = fc-width, fc+width
+            ile = na.rint(cfle*dds).astype('int')
+            ire = na.rint(cfre*dds).astype('int')
+            idx = ire-ile
+            width += 0.1/dds
+            #quit if idxq is true:
+            idxq = idx[0]>0 and na.all(idx==idx[0])
+            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            assert width[0] < 1.1 #can't go larger than the simulation volume
+        nwide = idx[0]
+    else:
+        #expand until we are nwide cells span
+        while not na.all(idx==nwide):
+            assert na.any(idx<=nwide)
+            cfle,cfre = fc-width, fc+width
+            ile = na.rint(cfle*dds).astype('int')
+            ire = na.rint(cfre*dds).astype('int')
+            idx = ire-ile
+            width += 1e-2*1.0/dds
+    assert na.all(idx==nwide)
+    assert idx[0]>0
+    maxlevel = -na.rint(na.log2(nwide)).astype('int')
+    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    return ile,ire,maxlevel,nwide
+
+def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(dle*dds).astype('int')
-    ire = na.ceil(dre*dds).astype('int') 
+    ile = na.floor(fle*dds).astype('int')
+    ire = na.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
     width = na.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = na.rint(na.log2(width)).astype('int')
+    maxlevel = -na.rint(na.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
                           creation_time=None,initial_mass=None,
                           current_mass=None,metallicity=None,
                           radius = None,
-                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
-    dd = pf.h.all_data()
+                          fle=[0.,0.,0.],fre=[1.,1.,1.],
+                          dd=None):
+    if dd is None:
+        dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
         pos = na.array([dd["particle_position_%s" % ax]
@@ -393,28 +518,29 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
+        #metallicity *=0.0198
+        #print 'WARNING: multiplying metallicirt by 0.0198'
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
-
-    formation_time = pf.current_time-age
+    formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
     col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-    col_list.append(pyfits.Column("age_m", format="D", array=age))
-    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
+    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    col_list.append(pyfits.Column("L_bol", format="D",
-        array=na.zeros(current_mass.size)))
+    #col_list.append(pyfits.Column("L_bol", format="D",
+    #    array=na.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -426,10 +552,10 @@
 def add_fields():
     """Add three Eulerian fields Sunrise uses"""
     def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
+        return data["Metallicity"] * data["CellMassMsun"]
         
     def _convMetalMass(data):
-        return 1.0/1.989e33
+        return 1.0
     
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
@@ -542,7 +668,254 @@
         j+=1
         yield vertex, self.descend(j)
 
+def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
+    if cameraset is None:
+        cameraset =cameraset_vertex 
+    campos =[]
+    names = []
+    dd = pf.h.all_data()
+    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
+        kwargs['scene_position']=scene_pos
+        kwargs['scene_up']=scene_up
+        kwargs['scene_rot']=scene_rot
+        kwargs['dd']=dd
+        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
+        campos += line,
+        names += name,
+    return names,campos     
 
+def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
+                                     sim_sphere_radius=None,sim_halo_radius=None,
+                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
+                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
+                                     dd=None):
+    """Translate the simulation to center on sim_center, 
+    then rotate such that sim_up is along the +z direction. Then we are in the 
+    'scene' basis coordinates from which scene_up and scene_offset are defined.
+    Then a position vector, direction vector, up vector and angular field of view
+    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
+    The angular field of view is in radians. The 10 numbers should match the inputs to
+    camera_positions in Sunrise.
+    """
 
+    sim_center = na.array(sim_center)
+    if sim_sphere_radius is None:
+        sim_sphere_radius = 10.0/pf['kpc']
+    if sim_axis_short is None:
+        if dd is None:
+            dd = pf.h.all_data()
+        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        mas = dd["particle_mass"]
+        pos = pos[idx]
+        mas = mas[idx]
+        mo_inertia = position_moment(pos,mas)
+        eigva, eigvc = linalg.eig(mo_inertia)
+        #order into short, long axes
+        order = eigva.real.argsort()
+        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
+    else:
+        ax_short = sim_axis_short
+        ax_long  = sim_axis_long
+    if sim_halo_radius is None:
+        sim_halo_radius = 200.0/pf['kpc']
+    if scene_distance is  None:
+        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
+    if scene_fov is None:
+        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        #idx= radii < sim_halo_radius*0.10
+        #radii = radii[idx]
+        #mass  = mas[idx] #copying mass into mas
+        si = na.argsort(radii)
+        radii = radii[si]
+        mass  = mas[si]
+        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        re = radii[idx[0]]
+        scene_fov = 5*re
+        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
+        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
+    #find rotation matrix
+    angles=find_half_euler_angles(ax_short,ax_long)
+    rotation  = euler_matrix(*angles)
+    irotation = numpy.linalg.inv(rotation)
+    axs = (ax_short,ax_med,ax_long)
+    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
+    axs = ([1,0,0],[0,1,0],[0,0,1])
+    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
+    
+    #rotate the camera
+    if scene_rot :
+        irotation = na.eye(3)
+    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+    sunrise_up  = matmul(irotation,scene_up)
+    sunrise_direction = -sunrise_pos
+    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
+    #change to physical kpc
+    sunrise_pos *= pf['kpc']
+    sunrise_direction *= pf['kpc']
+    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
 
+def matmul(m, v):
+    """Multiply a matrix times a set of vectors, or a single vector.
+    My nPart x nDim convention leads to two transpositions, which is
+    why this is hidden away in a function.  Note that if you try to
+    use this to muliply two matricies, it will think that you're
+    trying to multiply by a set of vectors and all hell will break
+    loose."""    
+    assert type(v) is not na.matrix
+    v = na.asarray(v)
+    m, vs = [na.asmatrix(a) for a in (m, v)]
+
+    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    if len(v.shape) == 1:
+        return result[0]
+    return result
+
+
+def mag(vs):
+    """Compute the norms of a set of vectors or a single vector."""
+    vs = na.asarray(vs)
+    if len(vs.shape) == 1:
+        return na.sqrt( (vs**2).sum() )
+    return na.sqrt( (vs**2).sum(axis=1) )
+
+def mag2(vs):
+    """Compute the norms of a set of vectors or a single vector."""
+    vs = na.asarray(vs)
+    if len(vs.shape) == 1:
+        return (vs**2).sum()
+    return (vs**2).sum(axis=1)
+
+
+def position_moment(rs, ms=None, axes=None):
+    """Find second position moment tensor.
+    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
+    rs = na.asarray(rs)
+    Npart, N = rs.shape
+    if ms is None: ms = na.ones(Npart)
+    else: ms = na.asarray(ms)    
+    if axes is not None:
+        axes = na.asarray(axes,dtype=float64)
+        axes = axes/axes.max()
+        norms2 = mag2(rs/axes)
+    else:
+        norms2 = na.ones(Npart)
+    M = ms.sum()
+    result = na.zeros((N,N))
+    # matrix is symmetric, so only compute half of it then fill in the
+    # other half
+    for i in range(N):
+        for j in range(i+1):
+            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
+        
+    result = result + result.transpose() - na.identity(N)*result
+    return result
+    
+
+
+def find_half_euler_angles(v,w,check=True):
+    """Find the passive euler angles that will make v lie along the z
+    axis and w lie along the x axis.  v and w are uncertain up to
+    inversions (ie, eigenvectors) so this routine removes degeneracies
+    associated with that
+
+    (old) Calculate angles to bring a body into alignment with the
+    coordinate system.  If v1 is the SHORTEST axis and v2 is the
+    LONGEST axis, then this will return the angle (Euler angles) to
+    make the long axis line up with the x axis and the short axis line
+    up with the x (z) axis for the 2 (3) dimensional case."""
+    # Make sure the vectors are normalized and orthogonal
+    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    v = v/mag(v)
+    w = w/mag(w)    
+    if check:
+        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
+
+    # Break eigenvector scaling degeneracy by forcing it to have a positive
+    # z component
+    if v[2] < 0: v = -v
+    phi,theta = find_euler_phi_theta(v)
+
+    # Rotate w according to phi,theta and then break inversion
+    # degeneracy by requiring that resulting vector has positive
+    # x component
+    w_prime = euler_passive(w,phi,theta,0.)
+    if w_prime[0] < 0: w_prime = -w_prime
+    # Now last Euler angle should just be this:
+    psi = na.arctan2(w_prime[1],w_prime[0])
+    return phi, theta, psi
+
+def find_euler_phi_theta(v):
+    """Find (passive) euler angles that will make v point in the z
+    direction"""
+    # Make sure the vector is normalized
+    v = v/mag(v)
+    theta = na.arccos(v[2])
+    phi = na.arctan2(v[0],-v[1])
+    return phi,theta
+
+def euler_matrix(phi, the, psi):
+    """Make an Euler transformation matrix"""
+    cpsi=na.cos(psi)
+    spsi=na.sin(psi)
+    cphi=na.cos(phi)
+    sphi=na.sin(phi)
+    cthe=na.cos(the)
+    sthe=na.sin(the)
+    m = na.mat(na.zeros((3,3)))
+    m[0,0] = cpsi*cphi - cthe*sphi*spsi
+    m[0,1] = cpsi*sphi + cthe*cphi*spsi
+    m[0,2] = spsi*sthe
+    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
+    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
+    m[1,2] = cpsi*sthe
+    m[2,0] = sthe*sphi
+    m[2,1] = -sthe*cphi
+    m[2,2] = cthe
+    return m
+
+def euler_passive(v, phi, the, psi):
+    """Passive Euler transform"""
+    m = euler_matrix(phi, the, psi)
+    return matmul(m,v)
+
+
+#the format for these camerasets is name,up vector,camera location, 
+#rotate to the galaxy's up direction?
+cameraset_compass = collections.OrderedDict([
+    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
+    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
+    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
+    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
+    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
+    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
+    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
+    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
+    ])
+
+cameraset_vertex = collections.OrderedDict([
+    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
+    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
+    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
+    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
+    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
+    ])
+
+#up is 45deg down from z, towards north
+#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
+#up is -45deg down from z, towards north
+
+cameraset_ring = collections.OrderedDict()
+
+segments = 20
+for angle in na.linspace(0,360,segments):
+    pos = [na.cos(angle),0.,na.sin(angle)]
+    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+    cameraset_ring['02i'%angle]=(pos,vc)
+            
+
+


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -356,7 +356,6 @@
         
 
         if self.pf.file_particle_data:
-            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
@@ -381,6 +380,7 @@
                     npb = clspecies[self.pf.only_particle_type+1]
             np = npb-npa
             self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #do NOT correct by an offset of 1.0
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
@@ -432,12 +432,6 @@
             for j,np in enumerate(nparticles):
                 mylog.debug('found %i of particle type %i'%(j,np))
             
-            if self.pf.single_particle_mass:
-                #cast all particle masses to the same mass
-                cast_type = self.pf.single_particle_type
-                
-
-            
             self.pf.particle_star_index = i
             
             do_stars = (self.pf.only_particle_type is None) or \
@@ -452,13 +446,15 @@
                     pbar = get_pbar("Stellar Ages        ",n)
                     sages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= 1.0e9
+                    sages *= 1.0e9 #from Gyr to yr
                     sages *= 365*24*3600 #to seconds
                     sages = self.pf.current_time-sages
                     self.pf.particle_age[-nstars:] = sages
                     pbar.finish()
                     self.pf.particle_metallicity1[-nstars:] = metallicity1
                     self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    #self.pf.particle_metallicity1 *= 0.0199 
+                    #self.pf.particle_metallicity2 *= 0.0199 
                     self.pf.particle_mass_initial[-nstars:] = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
@@ -467,25 +463,17 @@
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            #if type(self.pf.grid_particles) == type(5):
-            #    max_level = min(max_level,self.pf.grid_particles)
+            if type(self.pf.grid_particles) == type(5):
+                particle_level = min(self.pf.max_level,self.pf.grid_particles)
+            else:
+                particle_level = 2
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
-            
-            #grid particles at the finest level, removing them once gridded
-            #pbar = get_pbar("Gridding Particles ",init)
-            #assignment = amr_utils.assign_particles_to_cells(
-            #        self.grid_levels.ravel().astype('int32'),
-            #        self.grid_left_edge.astype('float32'),
-            #        self.grid_right_edge.astype('float32'),
-            #        pos[:,0].astype('float32'),
-            #        pos[:,1].astype('float32'),
-            #        pos[:,2].astype('float32'))
-            #pbar.finish()
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    2, #only bother gridding particles to level 2
+                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
                     pos[:,0].astype('float32'),
@@ -493,7 +481,6 @@
                     pos[:,2].astype('float32'))
             pbar.finish()
             
-            
             pbar = get_pbar("Filling grids ",init)
             for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
                 np = len(ilist)
@@ -601,19 +588,19 @@
                  file_particle_header=None, 
                  file_particle_data=None,
                  file_star_data=None,
-                 discover_particles=False,
+                 discover_particles=True,
                  use_particles=True,
                  limit_level=None,
                  only_particle_type = None,
                  grid_particles=False,
                  single_particle_mass=False,
                  single_particle_type=0):
-        import yt.frontends.ramses._ramses_reader as _ramses_reader
         
-        
-        dirn = os.path.dirname(filename)
+        #dirn = os.path.dirname(filename)
         base = os.path.basename(filename)
         aexp = base.split('_')[2].replace('.d','')
+        if not aexp.startswith('a'):
+            aexp = '_'+aexp
         
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
@@ -625,22 +612,30 @@
         if limit_level is None:
             self.limit_level = na.inf
         else:
+            limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
             self.limit_level = limit_level
         
+        def repu(x):
+            for i in range(5):
+                x=x.replace('__','_')
+            return x    
         if discover_particles:
             if file_particle_header is None:
                 loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_particle_header = loc
                     mylog.info("Discovered particle header: %s",os.path.basename(loc))
             if file_particle_data is None:
                 loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_particle_data = loc
                     mylog.info("Discovered particle data:   %s",os.path.basename(loc))
             if file_star_data is None:
                 loc = filename.replace(base,'stars_%s.dat'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_star_data = loc
                     mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
@@ -716,7 +711,7 @@
         self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
         self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
         self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        self.conversion_factors["Temperature"] = tr
+        #self.conversion_factors["Temperature"] = tr 
         self.conversion_factors["Potential"] = 1.0
         self.cosmological_simulation = True
         
@@ -951,4 +946,3 @@
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -36,6 +36,7 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
+import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
@@ -60,6 +61,7 @@
 #Hydro Fields that are verified to be OK unit-wise:
 #Density
 #Temperature
+#metallicities
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,9 +71,6 @@
 #GasEnergy
 #MetalDensity SNII + SNia
 #Potentials
-
-#Hydro Derived fields that are untested:
-#metallicities
 #xyzvelocity
 
 #Particle fields that are tested:
@@ -87,6 +86,8 @@
 #Particle fields that are untested:
 #NONE
 
+#Other checks:
+#CellMassMsun == Density * CellVolume
 
 def _convertDensity(data):
     return data.convert("Density")
@@ -143,13 +144,13 @@
 KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
 
 def _convertMetalDensitySNII(data):
-    return data.convert("Density")
+    return data.convert('Density')
 KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
 
 def _convertMetalDensitySNIa(data):
-    return data.convert("Density")
+    return data.convert('Density')
 KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
@@ -169,68 +170,101 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    tr  = data["GasEnergy"].astype('float64') #~1
-    d = data["Density"].astype('float64')
-    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
-    tr /= d #
-    assert na.all(na.isfinite(tr)) #diagnosing some problem...
+    cd = data.pf.conversion_factors["Density"]
+    cg = data.pf.conversion_factors["GasEnergy"]
+    ct = data.pf.tr
+    dg = data["GasEnergy"].astype('float64')
+    dd = data["Density"].astype('float64')
+    di = dd==0.0
+    #dd[di] = -1.0
+    tr = dg/dd
+    #tr[na.isnan(tr)] = 0.0
+    #if data.id==460:
+    #    import pdb;pdb.set_trace()
+    tr /= data.pf.conversion_factors["GasEnergy"]
+    tr *= data.pf.conversion_factors["Density"]
+    tr *= data.pf.tr
+    #tr[di] = -1.0 #replace the zero-density points with zero temp
+    #print tr.min()
+    #assert na.all(na.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x  = data.pf.conversion_factors["Density"]
-    x /= data.pf.conversion_factors["GasEnergy"]
-    x *= data.pf.conversion_factors["Temperature"]
+    x = data.pf.conversion_factors["Temperature"]
+    x = 1.0
     return x
-add_art_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._projected_units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_converttemperature
+add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_art_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metallicity_SNII"]._units = r""
-KnownARTFields["Metallicity_SNII"]._projected_units = r""
+add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNII"]._units = r""
+ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
     tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_art_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metallicity_SNIa"]._units = r""
-KnownARTFields["Metallicity_SNIa"]._projected_units = r""
+add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNIa"]._units = r""
+ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
+
+def _metallicity(field, data):
+    tr  = data["Metal_Density"] / data["Density"]
+    return tr
+add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity"]._units = r""
+ARTFieldInfo["Metallicity"]._projected_units = r""
 
 def _x_velocity(data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
-add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _y_velocity(data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
-add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _z_velocity(data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
-add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
     return tr
-add_art_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metal_Density"]._units = r""
-KnownARTFields["Metal_Density"]._projected_units = r""
+add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r""
+ARTFieldInfo["Metal_Density"]._projected_units = r""
 
 
 #Particle fields
 
 #Derived particle fields
 
+def mass_dm(field, data):
+    idx = data["particle_type"]<5
+    #make a dumb assumption that the mass is evenly spread out in the grid
+    #must return an array the shape of the grid cells
+    tr  = data["Ones"] #create a grid in the right size
+    if na.sum(idx)>0:
+        tr /= na.prod(tr.shape) #divide by the volume
+        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        return tr
+    else:
+        return tr*0.0
+
+add_field("particle_cell_mass_dm", function=mass_dm,
+          validators=[ValidateSpatial(0)])
+


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1137,15 +1137,24 @@
             axes = range(3)
         else:
             axes = [args.axis]
+
+        unit = args.unit
+        if unit is None:
+            unit = '1'
+        width = args.width
+        if width is None:
+            width = 0.5*(pf.domain_right_edge - pf.domain_left_edge)
+        width /= pf[unit]
+
         for ax in axes:
             mylog.info("Adding plot for axis %i", ax)
             if args.projection:
                 plt = ProjectionPlot(pf, ax, args.field, center=center,
-                                     width=args.width,
+                                     width=width,
                                      weight_field=args.weight)
             else:
                 plt = SlicePlot(pf, ax, args.field, center=center,
-                                width=args.width)
+                                width=width)
             if args.grids:
                 plt.draw_grids()
             if args.time: 


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -150,6 +150,7 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.int32_t,ndim=1] assign,
                               np.int64_t level_max, 
                               np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
                               np.ndarray[np.float32_t, ndim=2] right_edges,
@@ -162,9 +163,10 @@
     cdef long i,j,level
     cdef long npart = pos_x.shape[0]
     cdef long ncells = left_edges.shape[0] 
-    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    #cdef np.ndarray[np.int32_t, ndim=1] assign 
+    #assign = np.zeros(npart,dtype='int32')-1
     index_lists = []
-    for level in range(level_max,0,-1):
+    for level in range(level_max,-1,-1):
         #start with the finest level
         for i in range(ncells):
             #go through every cell on the finest level first
@@ -182,4 +184,39 @@
     return assign,index_lists
 
     
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def recursive_particle_assignment(grids, grid, 
+                                  np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                                  np.ndarray[np.float32_t, ndim=2] right_edges,
+                                  np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                                  np.ndarray[np.float32_t, ndim=1] pos_y,
+                                  np.ndarray[np.float32_t, ndim=1] pos_z):
+    #start on level zero, grid particles onto every mesh
+    #every particle we are fed, we can assume it exists on our grid
+    #must fill in the grid_particle_count array
+    #and particle_indices for every grid
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assigned       = np.zeros(npart,dtype='int32')
+    cdef np.ndarray[np.int32_t, ndim=1] never_assigned = np.ones(npart,dtype='int32')
+    for i in np.unique(grid.child_index_mask):
+        if i== -1: continue
+        #assigned to this subgrid
+        assigned = np.zeros(npart,dtype='int32') 
+        if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+            if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                    assigned[j]=1
+                    never_assigned[j]=0
+        if np.sum(assigned)>0:
+            recursive_particle_assignment(grids,grid,left_edges,right_edges,
+                                           pos_x[assigned],pos_y[assigned],pos_z[assigned])
+    #now we have assigned particles to other subgrids, we are left with particles on our grid
     
+            
+
+
+


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -49,20 +49,13 @@
     def __init__(self, *args, **kwargs):
         pass
 
-    def convert_to_pixels(self, plot, coord, offset = True):
-        if plot.xlim is not None:
-            x0, x1 = plot.xlim
-        else:
-            x0, x1 = plot._axes.get_xlim()
-        if plot.ylim is not None:
-            y0, y1 = plot.ylim
-        else:
-            y0, y1 = plot._axes.get_ylim()
-        l, b, width, height = mpl_get_bounds(plot._axes.bbox)
-        dx = width / (x1-x0)
-        dy = height / (y1-y0)
-        return ((coord[0] - int(offset)*x0)*dx,
-                (coord[1] - int(offset)*y0)*dy)
+    def convert_to_plot(self, plot, coord, offset = True):
+        x0, x1 = plot.xlim
+        xx0, xx1 = plot._axes.get_xlim()
+        y0, y1 = plot.ylim
+        yy0, yy1 = plot._axes.get_ylim()
+        return ((coord[0]-x0)/(x1-x0)*(xx0-xx1) - xx0,
+                (coord[0]-x0)/(x1-x0)*(xx0-xx1) - xx0)
 
 class VelocityCallback(PlotCallback):
     _type_name = "velocity"
@@ -259,10 +252,13 @@
     def __call__(self, plot):
         x0, x1 = plot.xlim
         y0, y1 = plot.ylim
+        width, height = plot.image._A.shape
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
-        dx = (xx1-xx0)/(x1-x0)
-        dy = (yy1-yy0)/(y1-y0)
+        xi = x_dict[plot.data.axis]
+        yi = y_dict[plot.data.axis]
+        dx = width / (x1-x0)
+        dy = height / (y1-y0)
         px_index = x_dict[plot.data.axis]
         py_index = y_dict[plot.data.axis]
         dom = plot.data.pf.domain_right_edge - plot.data.pf.domain_left_edge
@@ -275,21 +271,23 @@
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
-            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx + xx0
-            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy + yy0
-            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx + xx0
-            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy + yy0
+            left_edge_px = (GLE[:,px_index]+pxo-x0)*dx
+            left_edge_py = (GLE[:,py_index]+pyo-y0)*dy
+            right_edge_px = (GRE[:,px_index]+pxo-x0)*dx
+            right_edge_py = (GRE[:,py_index]+pyo-y0)*dy
             verts = na.array(
-                    [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                     (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
+                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
+                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
             visible =  ( right_edge_px - left_edge_px > self.min_pix ) & \
                        ( right_edge_px - left_edge_px > self.min_pix )
             verts=verts.transpose()[visible,:,:]
             if verts.size == 0: continue
             edgecolors = (0.0,0.0,0.0,self.alpha)
+            verts[:,:,0]= (xx1-xx0)*(verts[:,:,0]/width) + xx0
+            verts[:,:,1]= (yy1-yy0)*(verts[:,:,1]/height) + yy0
             grid_collection = matplotlib.collections.PolyCollection(
-                    verts, facecolors="none",
-                           edgecolors=edgecolors)
+                verts, facecolors="none",
+                edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
             if self.annotate:
@@ -497,8 +495,8 @@
         plot._axes.lines = [l for l in plot._axes.lines if id(l) not in self._ids]
         kwargs = self.plot_args.copy()
         if self.data_coords and len(plot.image._A.shape) == 2:
-            p1 = self.convert_to_pixels(plot, self.p1)
-            p2 = self.convert_to_pixels(plot, self.p2)
+            p1 = self.convert_to_plot(plot, self.p1)
+            p2 = self.convert_to_plot(plot, self.p2)
         else:
             p1, p2 = self.p1, self.p2
             if not self.data_coords:
@@ -614,6 +612,8 @@
         units.  *plot_args* is a dict fed to matplotlib with arrow properties.
         """
         self.pos = pos
+        if not iterable(code_size):
+            code_size = (code_size, code_size)
         self.code_size = code_size
         if plot_args is None: plot_args = {}
         self.plot_args = plot_args
@@ -621,8 +621,8 @@
     def __call__(self, plot):
         from matplotlib.patches import Arrow
         # Now convert the pixels to code information
-        x, y = self.convert_to_pixels(plot, self.pos)
-        dx, dy = self.convert_to_pixels(plot, self.code_size, False)
+        x, y = self.convert_to_plot(plot, self.pos)
+        dx, dy = self.convert_to_plot(plot, self.code_size, False)
         arrow = Arrow(x, y, dx, dy, **self.plot_args)
         plot._axes.add_patch(arrow)
 
@@ -639,7 +639,12 @@
         self.text_args = text_args
 
     def __call__(self, plot):
-        x,y = self.convert_to_pixels(plot, self.pos)
+
+
+        width,height = plot.image._A.shape
+        x,y = self.convert_to_plot(plot, self.pos)
+        x,y = x/width,y/height
+
         plot._axes.text(x, y, self.text, **self.text_args)
 
 class MarkerAnnotateCallback(PlotCallback):
@@ -659,7 +664,7 @@
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
         else: pos = self.pos
-        x,y = self.convert_to_pixels(plot, pos)
+        x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
         plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
         plot._axes.hold(False)
@@ -923,7 +928,7 @@
                 pos = (self.pos[x_dict[plot.data.axis]],
                        self.pos[y_dict[plot.data.axis]])
             else: pos = self.pos
-            x,y = self.convert_to_pixels(plot, pos)
+            x,y = self.convert_to_plot(plot, pos)
         else:
             x, y = self.pos
             if not self.data_coords:
@@ -983,7 +988,7 @@
             gg &= (reg["ParticleMassMsun"] >= self.minimum_mass)
             if gg.sum() == 0: return
         plot._axes.hold(True)
-        px, py = self.convert_to_pixels(plot,
+        px, py = self.convert_to_plot(plot,
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,


diff -r dd041de2ab1e3c745d0f389d5f98db59ed6a4a98 -r 0607bf437017825c5a50a0792797c05cc2834b3f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,6 +27,7 @@
 import base64
 import matplotlib.pyplot
 import cStringIO
+import types
 from functools import wraps
 
 import numpy as na
@@ -37,7 +38,8 @@
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer
 from .plot_modifications import get_smallest_appropriate_unit, \
-    GridBoundaryCallback, TextLabelCallback
+    callback_registry
+import plot_modifications as CallbackMod
 from .tick_locators import LogLocator, LinearLocator
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 
@@ -72,12 +74,20 @@
         return rv
     return newfunc
 
+def apply_callback(f):
+    @wraps(f)
+    def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
+        args[0]._callbacks.append((f.__name__,(args,kwargs)))
+        return rv
+    return newfunc
+
 field_transforms = {}
 
 class IMPlot(object): pass
 
 class CallbackWrapper(object):
-    def __init__(self, window_plot, frb, field):
+    def __init__(self, viewer, window_plot, frb, field):
         self.data = frb.data_source
         self._axes = window_plot.axes
         self._figure = window_plot.figure
@@ -85,8 +95,8 @@
             self.image = self._axes.images[0]
         self._period = frb.pf.domain_width
         self.pf = frb.pf
-        self.xlim = (frb.bounds[0], frb.bounds[1])
-        self.ylim = (frb.bounds[2], frb.bounds[3])
+        self.xlim = viewer.xlim
+        self.ylim = viewer.ylim
         self._type_name = ''
 
 class FieldTransform(object):
@@ -512,8 +522,7 @@
         self._colormaps = defaultdict(lambda: 'algae')
         self.zmin = None
         self.zmax = None
-        self._draw_grids = False
-        self._annotate_text = False
+        self.setup_callbacks()
         self._callbacks = []
         self._field_transform = {}
         for field in self._frb.data.keys():
@@ -557,16 +566,17 @@
         self.zmin = zmin
         self.zmax = zmax
 
-    @invalidate_plot
-    def draw_grids(self, alpha=1.0, min_pix=1, annotate = False, periodic = True):
-        self._draw_grids = True
-        self.grid_params = (alpha, min_pix, annotate, periodic)
-
-    @invalidate_plot
-    def annotate_text(self, position, message, data_coords = False, text_args = None):
-        self._annotate_text = True
-        self.text_params = (position, message, data_coords, text_args)
-
+    def setup_callbacks(self):
+        for key in callback_registry:
+            ignored = ['PlotCallback','CoordAxesCallback','LabelCallback',
+                       'UnitBoundaryCallback']
+            if key in ignored: 
+                continue
+            cbname = callback_registry[key]._type_name
+            CallbackMaker = getattr(CallbackMod,key)
+            callback = invalidate_plot(apply_callback(getattr(CallbackMod,key)))
+            self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
+        
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
@@ -602,7 +612,8 @@
     def get_field_units(self, field, strip_mathml = True):
         ds = self._frb.data_source
         pf = self.pf
-        if ds._type_name == "slice" or "cutting":
+        if ds._type_name in ("slice", "cutting") or \
+           (ds._type_name == "proj" and ds.weight_field is not None):
             units = pf.field_info[field].get_units()
         elif ds._type_name == "proj":
             units = pf.field_info[field].get_projected_units()
@@ -644,7 +655,7 @@
                 raise RuntimeError('origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
             
             extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.xlim[i] - yc for i in (0,1)])
+            extent.extend([self.ylim[i] - yc for i in (0,1)])
             extent = [el*self.pf[md['unit']] for el in extent]
 
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
@@ -664,15 +675,11 @@
 
             cb.set_label(r'$\rm{'+f.encode('string-escape')+r'}\/\/('+md['units']+r')$')
 
-            if self._draw_grids:
-                self._callbacks.append(GridBoundaryCallback(*self.grid_params))
-
-            if self._annotate_text:
-                self._callbacks.append(TextLabelCallback(*self.text_params))
-                
-            for callback in self._callbacks:
-                cbr = CallbackWrapper(self.plots[f], self._frb, f)
-                callback(cbr)
+            for name,(args,kwargs) in self._callbacks:
+                cbw = CallbackWrapper(self, self.plots[f], self._frb, f)
+                CallbackMaker = getattr(CallbackMod,name)
+                callback = CallbackMaker(*args[1:],**kwargs)
+                callback(cbw)
 
         self._plot_valid = True
 
@@ -689,7 +696,9 @@
             self.cmap = cmap
         self.plots[field].image.set_cmap(cmap)
 
-    def save(self,name):
+    def save(self,name=None):
+        if name == None:
+            name = str(self.pf.parameter_filename)
         axis = axis_names[self.data_source.axis]
         if 'Slice' in self.data_source.__class__.__name__:
             type = 'Slice'



https://bitbucket.org/yt_analysis/yt/changeset/46ad823d45c0/
changeset:   46ad823d45c0
branch:      yt
user:        jzuhone
date:        2012-07-12 01:43:41
summary:     get_smallest_appropriate_unit returns None if the given width is smaller than or equal to 1 cm.

Unfortunately, FLASH *always* uses cgs units, and some of the test problems have domains with unit width. This led to some problems with creating PlotWindow objects for these datasets.

My suggested solution is to make "cm" the default if the loop runs through all the possibilities and exhausts them, since it is still true that "cm" is still the "smallest appropriate unit" in this case.
affected #:  1 file

diff -r 0607bf437017825c5a50a0792797c05cc2834b3f -r 46ad823d45c0cb1c6277fbc42cce8fc9df2e7c52 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -166,6 +166,7 @@
             if vv < max_nu and vv > 1.0:
                 good_u = unit
                 max_nu = v*self[unit]
+        if good_u is None : good_u = 'cm'
         return good_u
 
     def has_key(self, key):



https://bitbucket.org/yt_analysis/yt/changeset/b4dc90a73895/
changeset:   b4dc90a73895
branch:      yt
user:        jzuhone
date:        2012-07-12 01:53:35
summary:     And we need to do the same thing here too.
affected #:  1 file

diff -r 46ad823d45c0cb1c6277fbc42cce8fc9df2e7c52 -r b4dc90a73895d305a15c4fcd55b13f9659a78ac3 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -395,6 +395,7 @@
         if vv < max_nu and vv > 1.0:
             good_u = unit
             max_nu = v*pf[unit]
+    if good_u is None : good_u = 'cm'
     return good_u
 
 class UnitBoundaryCallback(PlotCallback):



https://bitbucket.org/yt_analysis/yt/changeset/057bb8b6cda1/
changeset:   057bb8b6cda1
branch:      yt
user:        jzuhone
date:        2012-07-12 01:55:56
summary:     Merging
affected #:  4 files

diff -r b4dc90a73895d305a15c4fcd55b13f9659a78ac3 -r 057bb8b6cda1902892411f3831580dff7c1e5092 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -238,17 +238,16 @@
 
 cdef class RockstarInterface
 
-cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
-    dd = rh.data_source
+    print 'reading from particle filename %s'%filename # should print ./inline.0
+    cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     block = int(str(filename).rsplit(".")[-1])
 
     # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
+    dd = rh.pf.h.all_data()
     grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
     tnpart = 0
     for g in grids:
@@ -257,12 +256,9 @@
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.le[0]
-    left_edge[1] = rh.le[1]
-    left_edge[2] = rh.le[2]
-    right_edge[0] = rh.re[0]
-    right_edge[1] = rh.re[1]
-    right_edge[2] = rh.re[2]
+    left_edge[0] = rh.pf.domain_left_edge[0]
+    left_edge[1] = rh.pf.domain_left_edge[1]
+    left_edge[2] = rh.pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
@@ -277,9 +273,6 @@
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
-                if fi<3: 
-                    if  left_edge[i] > arr[i]: continue
-                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
@@ -293,7 +286,7 @@
     cdef public object data_source
     cdef int rank
     cdef int size
-    cdef int block_ratio
+    cdef public int block_ratio
 
     def __cinit__(self, pf, data_source):
         self.pf = pf
@@ -304,8 +297,8 @@
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, int min_halo_size = 20,
-                       char *outbase = 'None'):
+                       int periodic = 1, int num_snaps = 1,
+                       int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -324,10 +317,11 @@
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
         OUTPUT_FORMAT = "ASCII"
+        NUM_SNAPS = num_snaps
+        NUM_READERS = num_readers
         NUM_SNAPS = 1
-        NUM_READERS = num_readers
-        NUM_BLOCKS = num_readers * block_ratio
         NUM_WRITERS = num_writers
+        MIN_HALO_OUTPUT_SIZE=min_halo_size
         self.block_ratio = block_ratio
 
         h0 = self.pf.hubble_constant


diff -r b4dc90a73895d305a15c4fcd55b13f9659a78ac3 -r 057bb8b6cda1902892411f3831580dff7c1e5092 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -944,5 +944,14 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        return False # We make no effort to auto-detect ART data
+        """
+        Defined for Daniel Ceverino's file naming scheme.
+        This could differ for other formats.
+        """
+        fn = ("%s" % (os.path.basename(args[0])))
+        f = ("%s" % args[0])
+        if fn.endswith(".d") and fn.startswith('10Mpc') and\
+                os.path.exists(f): 
+                return True
+        return False
 


diff -r b4dc90a73895d305a15c4fcd55b13f9659a78ac3 -r 057bb8b6cda1902892411f3831580dff7c1e5092 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -61,6 +61,8 @@
     _type_name = "velocity"
     def __init__(self, factor=16, scale=None, scale_units=None, normalize=False):
         """
+        annotate_velocity(factor=16, scale=None, scale_units=None, normalize=False):
+        
         Adds a 'quiver' plot of velocity to the plot, skipping all but
         every *factor* datapoint. *scale* is the data units per arrow
         length unit using *scale_units* (see
@@ -93,6 +95,8 @@
     _type_name = "magnetic_field"
     def __init__(self, factor=16, scale=None, scale_units=None, normalize=False):
         """
+        annotate_magnetic_field(factor=16, scale=None, scale_units=None, normalize=False):
+
         Adds a 'quiver' plot of magnetic field to the plot, skipping all but
         every *factor* datapoint. *scale* is the data units per arrow
         length unit using *scale_units* (see
@@ -121,6 +125,8 @@
     _type_name = "quiver"
     def __init__(self, field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
         """
+        annotate_quiver(field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
+
         Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints
         *scale* is the data units per arrow length unit using *scale_units* 
@@ -173,6 +179,9 @@
     def __init__(self, field, ncont=5, factor=4, take_log=False, clim=None,
                  plot_args = None):
         """
+        annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
+                         plot_args = None):
+
         Add contours in *field* to the plot.  *ncont* governs the number of
         contours generated, *factor* governs the number of points used in the
         interpolation, *take_log* governs how it is contoured and *clim* gives
@@ -239,6 +248,8 @@
     _type_name = "grids"
     def __init__(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
         """
+        annotate_grids(alpha=1.0, min_pix=1, annotate=False, periodic=True)
+
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
         *annotate* puts the grid id in the corner of the grid.  (Not so great in projections...)
@@ -303,6 +314,11 @@
                  start_at_xedge=False, start_at_yedge=False,
                  plot_args=None):
         """
+        annotate_streamlines(field_x, field_y, factor=6.0, nx=16, ny=16,
+                             xstart=(0,1), ystart=(0,1), nsample=256,
+                             start_at_xedge=False, start_at_yedge=False,
+                             plot_args=None):
+
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, using *nx* and *ny* starting points
         that are bounded by *xstart* and *ystart*.  To begin
@@ -462,6 +478,8 @@
     _type_name = "line"
     def __init__(self, x, y, plot_args = None):
         """
+        annotate_line(x, y, plot_args = None)
+
         Over plot *x* and *y* with *plot_args* fed into the plot.
         """
         PlotCallback.__init__(self)
@@ -480,6 +498,8 @@
 
     def __init__(self, p1, p2, data_coords=False, plot_args = None):
         """
+        annotate_image_line(p1, p2, data_coords=False, plot_args = None)
+
         Plot from *p1* to *p2* (image plane coordinates)
         with *plot_args* fed into the plot.
         """
@@ -520,6 +540,8 @@
     _type_name = "cquiver"
     def __init__(self, field_x, field_y, factor):
         """
+        annotate_cquiver(field_x, field_y, factor)
+
         Get a quiver plot on top of a cutting plane, using *field_x* and
         *field_y*, skipping every *factor* datapoint in the discretization.
         """
@@ -562,6 +584,8 @@
     _type_name = "clumps"
     def __init__(self, clumps, plot_args = None):
         """
+        annotate_clumps(clumps, plot_args = None)
+
         Take a list of *clumps* and plot them as a set of contours.
         """
         self.clumps = clumps
@@ -609,6 +633,8 @@
     _type_name = "arrow"
     def __init__(self, pos, code_size, plot_args = None):
         """
+        annotate_arrow(pos, code_size, plot_args = None)
+
         This adds an arrow pointing at *pos* with size *code_size* in code
         units.  *plot_args* is a dict fed to matplotlib with arrow properties.
         """
@@ -631,6 +657,8 @@
     _type_name = "point"
     def __init__(self, pos, text, text_args = None):
         """
+        annotate_point(pos, text, text_args = None)
+
         This adds *text* at position *pos*, where *pos* is in code-space.
         *text_args* is a dict fed to the text placement code.
         """
@@ -652,6 +680,8 @@
     _type_name = "marker"
     def __init__(self, pos, marker='x', plot_args=None):
         """
+        annotate_marker(pos, marker='x', plot_args=None)
+
         Adds text *marker* at *pos* in code-arguments.  *plot_args* is a dict
         that will be forwarded to the plot command.
         """
@@ -675,6 +705,9 @@
     def __init__(self, center, radius, circle_args = None,
                  text = None, text_args = None):
         """
+        annotate_sphere(center, radius, circle_args = None,
+                        text = None, text_args = None)
+        
         A sphere centered at *center* in code units with radius *radius* in
         code units will be created, with optional *circle_args*, *text*, and
         *text_args*.
@@ -714,6 +747,11 @@
                  font_size=8, print_halo_size=False,
                  print_halo_mass=False, width=None):
         """
+        annotate_hop_circles(hop_output, max_number=None,
+                             annotate=False, min_size=20, max_size=10000000,
+                             font_size=8, print_halo_size=False,
+                             print_halo_mass=False, width=None)
+
         Accepts a :class:`yt.HopList` *hop_output* and plots up to
         *max_number* (None for unlimited) halos as circles.
         """
@@ -767,6 +805,9 @@
     def __init__(self, hop_output, max_number, p_size=1.0,
                 min_size=20, alpha=0.2):
         """
+        annotate_hop_particles(hop_output, max_number, p_size=1.0,
+                               min_size=20, alpha=0.2):
+
         Adds particle positions for the members of each halo as identified
         by HOP. Along *axis* up to *max_number* groups in *hop_output* that are
         larger than *min_size* are plotted with *p_size* pixels per particle; 
@@ -912,6 +953,8 @@
     _type_name = "text"
     def __init__(self, pos, text, data_coords=False, text_args = None):
         """
+        annotate_text(pos, text, data_coords=False, text_args = None)
+
         Accepts a position in (0..1, 0..1) of the image, some text and
         optionally some text arguments. If data_coords is True,
         position will be in code units instead of image coordinates.
@@ -944,6 +987,10 @@
                  ptype=None, stars_only=False, dm_only=False,
                  minimum_mass=None, alpha=1.0):
         """
+        annotate_particles(width, p_size=1.0, col='k', marker='o', stride=1.0,
+                           ptype=None, stars_only=False, dm_only=False,
+                           minimum_mass=None, alpha=1.0)
+
         Adds particle positions, based on a thick slab along *axis* with a
         *width* along the line of sight.  *p_size* controls the number of
         pixels per particle, and *col* governs the color.  *ptype* will
@@ -1018,8 +1065,10 @@
 
 class TitleCallback(PlotCallback):
     _type_name = "title"
-    def __init__(self, title="Plot"):
+    def __init__(self, title):
         """
+        annotate_title(title)
+
         Accepts a *title* and adds it to the plot
         """
         PlotCallback.__init__(self)


diff -r b4dc90a73895d305a15c4fcd55b13f9659a78ac3 -r 057bb8b6cda1902892411f3831580dff7c1e5092 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -575,6 +575,7 @@
             cbname = callback_registry[key]._type_name
             CallbackMaker = getattr(CallbackMod,key)
             callback = invalidate_plot(apply_callback(getattr(CallbackMod,key)))
+            callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
         
     def get_metadata(self, field, strip_mathml = True, return_string = True):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list