[yt-svn] commit/yt: 87 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon May 5 06:30:03 PDT 2014


87 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/db524a821753/
Changeset:   db524a821753
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-08 19:53:49
Summary:     Quick implementation of no-copy decompose_array functions that don't require
creating the entire array in memory, or copies thereof. Then use this in the
FITSFrontend.
Affected #:  2 files

diff -r bf628d9eec62eb47e086acc690d045f89b31ce4e -r db524a821753cd40aab5981f2ec6c28044f9d9fd yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -32,7 +32,7 @@
     io_registry
 from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
-    decompose_array, get_psize
+    decompose_array, get_psize, decompose_array_nocopy
 
 class astropy_imports:
     _pyfits = None
@@ -124,12 +124,10 @@
             bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
                                                        pf.domain_right_edge)])
             psize = get_psize(np.array(pf.domain_dimensions), pf.nprocs)
-            temp_arr = np.zeros(pf.domain_dimensions)
-            gle, gre, temp_arr = decompose_array(temp_arr, psize, bbox)
+            gle, gre, shapes = decompose_array_nocopy(pf.domain_dimensions, psize, bbox)
             self.grid_left_edge = self.pf.arr(gle, "code_length")
             self.grid_right_edge = self.pf.arr(gre, "code_length")
-            self.grid_dimensions = np.array([grid.shape for grid in temp_arr], dtype="int32")
-            del temp_arr
+            self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge

diff -r bf628d9eec62eb47e086acc690d045f89b31ce4e -r db524a821753cd40aab5981f2ec6c28044f9d9fd yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -52,6 +52,27 @@
     patches = split_array(arr, psize)
     return grid_left_edges, grid_right_edges, patches
 
+def decompose_array_nocopy(shape, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    shapes = split_array(shape, psize)
+    return grid_left_edges, grid_right_edges, shapes
+
 
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
@@ -141,3 +162,15 @@
                 slices.append(np.s_[lei[0]:rei[0], lei[1]:
                                     rei[1], lei[2]:rei[2]])
     return [tab[slc] for slc in slices]
+
+def split_array_nocopy(shape, psize):
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(shape, dtype=np.int64)
+    shapes = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                shapes.append(rei-lei)
+    return shapes 


https://bitbucket.org/yt_analysis/yt/commits/09362feec07c/
Changeset:   09362feec07c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-04 20:17:18
Summary:     Fixing up particle trajectories analysis module for units, parallelization, and supporting trajectories which start and/or stop at arbitrary points in the simulation.
Affected #:  3 files

diff -r 918a6162882ffc2e6e16f8493e1d098119d9ed27 -r 09362feec07c80eafd8112cf1f4dffeeca75d678 doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:874e85c86cd80a516bb61775b566cd46766c60bdf8f865336bf9dd3505f83821"
+  "signature": "sha256:e4b5ea69687eb79452c16385b3a6f795b4572518dfa7f9d8a8125bd75b5fea85"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,9 +21,11 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import ParticleTrajectories\n",
+      "import glob\n",
+      "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
-      "path = ytcfg.get(\"yt\", \"test_data_dir\")"
+      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "import matplotlib.pyplot as plt"
      ],
      "language": "python",
      "metadata": {},
@@ -75,8 +77,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(my_fns[0])\n",
-      "dd = pf.h.all_data()\n",
+      "ds = load(my_fns[0])\n",
+      "dd = ds.all_data()\n",
       "indices = dd[\"particle_index\"].astype(\"int\")\n",
       "print indices"
      ],
@@ -130,8 +132,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+      "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -148,8 +150,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -166,8 +168,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -185,8 +187,8 @@
      "collapsed": false,
      "input": [
       "particle1 = trajs.trajectory_from_index(1)\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -203,8 +205,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "slc = SlicePlot(pf, \"x\", [\"Density\",\"Dark_Matter_Density\"], center=\"max\", width=(3.0, \"mpc\"))\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "slc = SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -222,7 +224,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.sphere(\"max\", (0.5, \"mpc\"))\n",
+      "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
       "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
      ],
      "language": "python",
@@ -240,7 +242,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.index\")\n",
+      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
       "my_fns.sort()\n",
       "trajs = ParticleTrajectories(my_fns, indices)"
      ],
@@ -263,9 +265,12 @@
       "from mpl_toolkits.mplot3d import Axes3D\n",
       "fig = plt.figure(figsize=(8.0, 8.0))\n",
       "ax = fig.add_subplot(111, projection='3d')\n",
-      "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
-      "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
-      "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+      "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -282,9 +287,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -301,7 +306,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.add_fields([\"Density\"])"
+      "trajs.add_fields([\"density\"])"
      ],
      "language": "python",
      "metadata": {},
@@ -311,17 +316,17 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We also could have included `\"Density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+      "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][25])\n",
-      "pylab.yscale(\"log\")"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
+      "plt.yscale(\"log\")"
      ],
      "language": "python",
      "metadata": {},
@@ -338,29 +343,12 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.write_out(\"halo_trajectories.txt\")\n",
-      "trajs.write_out_h5(\"halo_trajectories.h5\")"
+      "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+      "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 2,
-     "metadata": {},
-     "source": [
-      "Important Caveats"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "* Parallelization is not yet implemented.\n",
-      "* For large datasets, constructing trajectories can be very slow. We are working on optimizing the algorithm for a future release. \n",
-      "* At the moment, trajectories are limited for particles that exist in every dataset. Therefore, for codes like FLASH that allow for particles to exit the domain (and hence the simulation) for certain types of boundary conditions, you need to insure that the particles you wish to examine exist in all datasets in the time series from the beginning to the end. If this is not the case, `ParticleTrajectories` will throw an error. This is a limitation we hope to relax in a future release. "
-     ]
     }
    ],
    "metadata": {}

diff -r 918a6162882ffc2e6e16f8493e1d098119d9ed27 -r 09362feec07c80eafd8112cf1f4dffeeca75d678 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -13,11 +13,18 @@
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.CICDeposit import CICSample_3
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    communication_system, parallel_root_only
 from yt.funcs import *
+from yt.units.yt_array import array_like_field
+from yt.config import ytcfg
+from collections import OrderedDict
 
 import numpy as np
 import h5py
 
+comm = communication_system.communicators[-1]
+
 class ParticleTrajectories(object):
     r"""A collection of particle trajectories in time over a series of
     parameter files. 
@@ -47,30 +54,21 @@
     >>> fields = ["particle_position_x", "particle_position_y",
     >>>           "particle_position_z", "particle_velocity_x",
     >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> pf = load(my_fns[0])
-    >>> init_sphere = pf.sphere(pf.domain_center, (.5, "unitary"))
+    >>> ds = load(my_fns[0])
+    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
     >>> indices = init_sphere["particle_index"].astype("int")
     >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-
-    Notes
-    -----
-    As of this time only particle trajectories that are complete over the
-    set of specified parameter files are supported. If any particle's history
-    ends for some reason (e.g. leaving the simulation domain or being actively
-    destroyed), the whole trajectory collection of which it is a set must end
-    at or before the particle's last timestep. This is a limitation we hope to
-    lift at some point in the future.     
     """
     def __init__(self, filenames, indices, fields=None) :
 
         indices.sort() # Just in case the caller wasn't careful
-        
         self.field_data = YTFieldData()
-        self.pfs = DatasetSeries.from_filenames(filenames)
+        self.data_series = DatasetSeries.from_filenames(filenames)
         self.masks = []
         self.sorts = []
+        self.array_indices = []
         self.indices = indices
         self.num_indices = len(indices)
         self.num_steps = len(filenames)
@@ -79,54 +77,44 @@
         # Default fields 
         
         if fields is None: fields = []
+        fields.append("particle_position_x")
+        fields.append("particle_position_y")
+        fields.append("particle_position_z")
+        fields = list(OrderedDict.fromkeys(fields))
 
-        # Must ALWAYS have these fields
-        
-        fields = fields + ["particle_position_x",
-                           "particle_position_y",
-                           "particle_position_z"]
-
-        # Set up the derived field list and the particle field list
-        # so that if the requested field is a particle field, we'll
-        # just copy the field over, but if the field is a grid field,
-        # we will first interpolate the field to the particle positions
-        # and then return the field. 
-
-        pf = self.pfs[0]
-        self.derived_field_list = pf.derived_field_list
-        self.particle_fields = [field for field in self.derived_field_list
-                                if pf.field_info[field].particle_type]
-
-        """
-        The following loops through the parameter files
-        and performs two tasks. The first is to isolate
-        the particles with the correct indices, and the
-        second is to create a sorted list of these particles.
-        We also make a list of the current time from each file. 
-        Right now, the code assumes (and checks for) the
-        particle indices existing in each dataset, a limitation I
-        would like to lift at some point since some codes
-        (e.g., FLASH) destroy particles leaving the domain.
-        """
-        
-        for pf in self.pfs:
-            dd = pf.h.all_data()
-            newtags = dd["particle_index"].astype("int")
-            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
-                print "Not all requested particle ids contained in this dataset!"
-                raise IndexError
+        old_level = int(ytcfg.get("yt","loglevel"))
+        mylog.setLevel(40)
+        my_storage = {}
+        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            dd = ds.all_data()
+            idx_field = dd._determine_fields("particle_index")[0]
+            newtags = dd[idx_field].ndarray_view().astype("int")
             mask = np.in1d(newtags, indices, assume_unique=True)
             sorts = np.argsort(newtags[mask])
-            self.masks.append(mask)            
+            self.array_indices.append(np.where(np.in1d(indices, newtags, assume_unique=True))[0])
+            self.masks.append(mask)
             self.sorts.append(sorts)
-            self.times.append(pf.current_time)
+            sto.result_id = ds.parameter_filename
+            sto.result = ds.current_time
+            pbar.update(i)
+        pbar.finish()
 
-        self.times = np.array(self.times)
+        mylog.setLevel(old_level)
 
-        # Now instantiate the requested fields 
+        times = []
+        for fn, time in sorted(my_storage.items()):
+            times.append(time)
+
+        self.times = self.data_series[0].arr([time for time in times], times[0].units)
+
+        self.particle_fields = []
+
+        # Instantiate fields the caller requested
+
         for field in fields:
             self._get_data(field)
-            
+
     def has_key(self, key):
         return (key in self.field_data)
     
@@ -135,8 +123,7 @@
 
     def __getitem__(self, key):
         """
-        Get the field associated with key,
-        checking to make sure it is a particle field.
+        Get the field associated with key.
         """
         if key == "particle_time":
             return self.times
@@ -203,33 +190,48 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            particles = np.empty((0))
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+            dd_first = self.data_series[0].all_data()
+            fd = dd_first._determine_fields(field)[0]
+            if field not in self.particle_fields:
+                if self.data_series[0].field_info[fd].particle_type:
+                    self.particle_fields.append(field)
+            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
             step = int(0)
-            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+            pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
+            my_storage={}
+            for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+                mask = self.masks[i]
+                sort = self.sorts[i]
                 if field in self.particle_fields:
                     # This is easy... just get the particle fields
-                    dd = pf.h.all_data()
-                    pfield = dd[field][mask]
-                    particles = np.append(particles, pfield[sort])
+                    dd = ds.all_data()
+                    pfield = dd[fd].ndarray_view()[mask][sort]
                 else:
                     # This is hard... must loop over grids
                     pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step]
-                    y = self["particle_position_y"][:,step]
-                    z = self["particle_position_z"][:,step]
-                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    x = self["particle_position_x"][:,step].ndarray_view()
+                    y = self["particle_position_y"][:,step].ndarray_view()
+                    z = self["particle_position_z"][:,step].ndarray_view()
+                    particle_grids, particle_grid_inds = ds.index.find_points(x,y,z)
                     for grid in particle_grids:
-                        cube = grid.retrieve_ghost_zones(1, [field])
+                        cube = grid.retrieve_ghost_zones(1, [fd])
                         CICSample_3(x,y,z,pfield,
                                     self.num_indices,
-                                    cube[field],
+                                    cube[fd],
                                     np.array(grid.LeftEdge).astype(np.float64),
                                     np.array(grid.ActiveDimensions).astype(np.int32),
-                                    np.float64(grid['dx']))
-                    particles = np.append(particles, pfield)
+                                    grid.dds[0])
+                sto.result_id = ds.parameter_filename
+                sto.result = (self.array_indices[i], pfield)
+                pbar.update(step)
                 step += 1
-            self[field] = particles.reshape(self.num_steps,
-                                            self.num_indices).transpose()
+            pbar.finish()
+            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
+                particles[indices,i] = pfield
+            self.field_data[field] = array_like_field(dd_first, particles, fd)
+            mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):
@@ -269,6 +271,7 @@
             traj[field] = self[field][mask,:][0]
         return traj
 
+    @parallel_root_only
     def write_out(self, filename_base):
         """
         Write out particle trajectories to tab-separated ASCII files (one
@@ -299,7 +302,8 @@
             fid.writelines(outlines)
             fid.close()
             del fid
-            
+
+    @parallel_root_only
     def write_out_h5(self, filename):
         """
         Write out all the particle trajectories to a single HDF5 file

diff -r 918a6162882ffc2e6e16f8493e1d098119d9ed27 -r 09362feec07c80eafd8112cf1f4dffeeca75d678 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -60,7 +60,7 @@
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
         f_part = self._particle_handle
-        p_ind = self.pf.h._particle_indices
+        p_ind = self.pf.index._particle_indices
         px, py, pz = (self._particle_fields["particle_pos%s" % ax]
                       for ax in 'xyz')
         p_fields = f_part["/tracer particles"]
@@ -79,7 +79,7 @@
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
         f_part = self._particle_handle
-        p_ind = self.pf.h._particle_indices
+        p_ind = self.pf.index._particle_indices
         px, py, pz = (self._particle_fields["particle_pos%s" % ax]
                       for ax in 'xyz')
         p_fields = f_part["/tracer particles"]


https://bitbucket.org/yt_analysis/yt/commits/ed5b2b5ac369/
Changeset:   ed5b2b5ac369
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 14:54:36
Summary:     Merge
Affected #:  30 files

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -43,7 +43,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
@@ -56,7 +56,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -19,8 +19,8 @@
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**na.floor(na.log10(data_source[field]).min()  )
-c_max = 10**na.floor(na.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
 from yt.mods import * # set up our namespace
+from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
@@ -18,7 +19,7 @@
 
 slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -31,4 +31,4 @@
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+write_image(np.log10(image), "%s_offaxis_projection.png" % pf)

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -21,13 +21,13 @@
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
-# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+        alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
@@ -40,14 +40,14 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
@@ -55,7 +55,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -12,7 +12,7 @@
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((na.log10(mi)+2.0, na.log10(ma)))
+tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -33,7 +33,7 @@
 # separate columns into separate NumPy arrays, it is essential to set unpack=True.
 
 r, dens, std_dens, temp, std_temp = \
-	na.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
+	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
 
 fig1 = plt.figure()
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -21,7 +21,7 @@
 rect = (0.2,0.2,0.2,0.2)
 new_ax = fig.add_axes(rect)
 
-n, bins, patches = new_ax.hist(na.random.randn(1000)+20, 50,
+n, bins, patches = new_ax.hist(np.random.randn(1000)+20, 50,
     facecolor='yellow', edgecolor='yellow')
 new_ax.set_xlabel('Dinosaurs per furlong')
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -379,7 +379,7 @@
    something_else``.  Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do ``a =
    a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-   ``na.multiply(a, 3, a)``.
+   ``np.multiply(a, 3, a)``.
  * In general, avoid all double-underscore method names: ``__something`` is
    usually unnecessary.
  * Doc strings should describe input, output, behavior, and any state changes

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:d75e416150ccb017cfdf89973f8d4463e780da4d9bdc9a3783001d22021d9081"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -154,7 +155,7 @@
       "Npixels = 512 \n",
       "cam = pf.h.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
       "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False, l_max=0)\n",
+      "                  sub_samples=5, no_ghost=False)\n",
       "\n",
       "# Here we substitute the TransferFunction we constructed earlier.\n",
       "cam.transfer_function = tfh.tf\n",

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -32,8 +32,8 @@
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
                            (8., 'kpc'), (1., 'kpc'))
 
-   c_min = 10**na.floor(na.log10(data_source['density']).min()  )
-   c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
+   c_min = 10**np.floor(np.log10(data_source['density']).min()  )
+   c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
    function = 'self.data[\'Density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -254,7 +254,7 @@
    c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
-   write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+   write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
 directions.

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -793,8 +793,14 @@
 
         if (self.pf.geometry == "cylindrical" and self.axis == 1) or \
             (self.pf.geometry == "polar" and self.axis == 2):
+            if center is not None and center != (0.0, 0.0):
+                raise NotImplementedError(
+                    "Currently we only support images centered at R=0. " +
+                    "We plan to generalize this in the near future")
             from yt.visualization.fixed_resolution import CylindricalFixedResolutionBuffer
-            frb = CylindricalFixedResolutionBuffer(self, width, resolution)
+            if iterable(width): radius = max(width)
+            if iterable(resolution): resolution = max(resolution)
+            frb = CylindricalFixedResolutionBuffer(self, radius, resolution)
             return frb
 
         if center is None:

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -160,15 +160,14 @@
     coord : float
         The coordinate along the axis at which to slice.  This is in
         "domain" coordinates.
-    fields : list of strings, optional
-        If you want the object to pre-retrieve a set of fields, supply them
-        here.  This is not necessary.
     center : array_like, optional
         The 'center' supplied to fields that use it.  Note that this does
         not have to have `coord` as one value.  Strictly optional.
-    node_name: string, optional
-        The node in the .yt file to find or store this slice at.  Should
-        probably not be used.
+    pf: Dataset, optional
+        An optional dataset to use rather than self.pf
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
     kwargs : dict of items
         Any additional values are passed as field parameters that can be
         accessed by generated fields.

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -161,7 +161,7 @@
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
         if count == 0: return 0
-        dest[offset:offset+count] = source[mask]
+        dest[offset:offset+count] = source.flat[mask]
         return count
 
     def count(self, selector):

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -191,7 +191,7 @@
         """
         Return a data label for the given field, inluding units.
         """
-        name = self.name
+        name = self.name[1]
         if self.display_name is not None:
             name = self.display_name
 
@@ -202,7 +202,7 @@
         if projected:
             raise NotImplementedError
         else:
-            units = self.units
+            units = Unit(self.units)
         # Add unit label
         if not units.is_dimensionless:
             data_label += r"\/\/ (%s)" % (units)

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -133,12 +133,6 @@
                        particle_type = True,
                        units = "")
 
-    registry.alias((ptype, "ParticleMass"), (ptype, mass_name),
-                    units = "g")
-
-    registry.alias((ptype, "ParticleMassMsun"), (ptype, mass_name),
-                    units = "Msun")
-
     def particle_mesh_ids(field, data):
         pos = data[ptype, coord_name]
         ids = np.zeros(pos.shape[0], dtype="float64") - 1

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -43,6 +43,8 @@
     gadget_header_specs, \
     gadget_field_specs, \
     gadget_ptype_specs
+from .io import \
+    IOHandlerTipsyBinary
 
 try:
     import requests
@@ -384,7 +386,9 @@
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        success, self.endian = self._validate_header(filename)
+        if field_dtypes is None:
+            field_dtypes = {}
+        success, self.endian = self._validate_header(filename, field_dtypes)
         if not success:
             print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
             print "%s != (%s == %s + %s + %s)" % (
@@ -400,8 +404,6 @@
 
         # My understanding is that dtypes are set on a field by field basis,
         # not on a (particle type, field) basis
-        if field_dtypes is None:
-            field_dtypes = {}
         self._field_dtypes = field_dtypes
 
         self._unit_base = unit_base or {}
@@ -520,7 +522,7 @@
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
     @staticmethod
-    def _validate_header(filename):
+    def _validate_header(filename, field_dtypes):
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
@@ -544,11 +546,16 @@
             endianswap = ">"
             f.seek(0)
             t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
+        # Now we construct the sizes of each of the particles.
+        dtypes = IOHandlerTipsyBinary._compute_dtypes(field_dtypes, endianswap)
         #Catch for 4 byte padding
-        if (fs == 32+48*ng+36*nd+44*ns):
+        gas_size = dtypes["Gas"].itemsize
+        dm_size = dtypes["DarkMatter"].itemsize
+        star_size = dtypes["Stars"].itemsize
+        if (fs == 32+gas_size*ng+dm_size*nd+star_size*ns):
             f.read(4)
         #File is borked if this is true
-        elif (fs != 28+48*ng+36*nd+44*ns):
+        elif (fs != 28+gas_size*ng+dm_size*nd+star_size*ns):
             f.close()
             return False, 0
         f.close()
@@ -556,7 +563,8 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        return TipsyDataset._validate_header(args[0])[0]
+        field_dtypes = kwargs.get("field_dtypes", {})
+        return TipsyDataset._validate_header(args[0], field_dtypes)[0]
 
 class HTTPParticleFile(ParticleFile):
     pass

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -392,7 +392,7 @@
                 "DarkMatter",
                 "Stars" )
 
-    _aux_fields = []
+    _aux_fields = None
     _fields = ( ("Gas", "Mass"),
                 ("Gas", "Coordinates"),
                 ("Gas", "Velocities"),
@@ -415,6 +415,10 @@
                 ("Stars", "Phi")
               )
 
+    def __init__(self, *args, **kwargs):
+        self._aux_fields = []
+        super(IOHandlerTipsyBinary, self).__init__(*args, **kwargs)
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -623,6 +627,22 @@
         }
         return npart
 
+    @classmethod
+    def _compute_dtypes(cls, field_dtypes, endian = "<"):
+        pds = {}
+        for ptype, field in cls._fields:
+            dtbase = field_dtypes.get(field, 'f')
+            ff = "%s%s" % (endian, dtbase)
+            if field in cls._vector_fields:
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
+            else:
+                dt = (field, ff)
+            pds.setdefault(ptype, []).append(dt)
+        pdtypes = {}
+        for ptype in pds:
+            pdtypes[ptype] = np.dtype(pds[ptype])
+        return pdtypes
+
     def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
         self._header_offset = data_file.pf._header_offset
@@ -632,19 +652,14 @@
         tp = data_file.total_particles
         aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
         self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
+        self._pdtypes = self._compute_dtypes(data_file.pf._field_dtypes,
+                                             data_file.pf.endian)
         for ptype, field in self._fields:
-            pfields = []
-            if tp[ptype] == 0: continue
-            dtbase = data_file.pf._field_dtypes.get(field, 'f')
-            ff = "%s%s" % (data_file.pf.endian, dtbase)
-            if field in self._vector_fields:
-                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
-            else:
-                dt = (field, ff)
-            pds.setdefault(ptype, []).append(dt)
+            if tp[ptype] == 0:
+                # We do not want out _pdtypes to have empty particles.
+                self._pdtypes.pop(ptype, None)
+                continue
             field_list.append((ptype, field))
-        for ptype in pds:
-            self._pdtypes[ptype] = np.dtype(pds[ptype])
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
             field_list += [("Gas",a) for a in self._aux_fields] 
         if any(["DarkMatter"==f[0] for f in field_list]):

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      hexahedral_connectivity, \
       load_octree, \
       refine_amr
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -292,12 +292,14 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = 'stream'
 
-    def __init__(self, stream_handler, storage_filename = None):
+    def __init__(self, stream_handler, storage_filename = None,
+                 geometry = "cartesian"):
         #if parameter_override is None: parameter_override = {}
         #self._parameter_override = parameter_override
         #if conversion_override is None: conversion_override = {}
         #self._conversion_override = conversion_override
 
+        self.geometry = geometry
         self.stream_handler = stream_handler
         name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
         from yt.data_objects.static_output import _cached_pfs
@@ -510,7 +512,8 @@
 
 def load_uniform_grid(data, domain_dimensions, length_unit=None, bbox=None,
                       nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
-                      velocity_unit=None, periodicity=(True, True, True)):
+                      velocity_unit=None, periodicity=(True, True, True),
+                      geometry = "cartesian"):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -550,6 +553,8 @@
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
+    geometry : string
+        "cartesian", "cylindrical" or "polar"
 
     Examples
     --------
@@ -658,7 +663,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    spf = StreamDataset(handler)
+    spf = StreamDataset(handler, geometry = geometry)
 
     # Now figure out where the particles go
     if number_of_particles > 0 :
@@ -678,7 +683,7 @@
 def load_amr_grids(grid_data, domain_dimensions,
                    field_units=None, bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
-                   periodicity=(True, True, True)):
+                   periodicity=(True, True, True), geometry = "cartesian"):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -721,6 +726,8 @@
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
+    geometry : string
+        "cartesian", "cylindrical" or "polar"
 
     Examples
     --------
@@ -817,7 +824,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    spf = StreamDataset(handler)
+    spf = StreamDataset(handler, geometry = geometry)
     return spf
 
 def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
@@ -1123,7 +1130,8 @@
 def load_hexahedral_mesh(data, connectivity, coordinates,
                          length_unit = None, bbox=None, sim_time=0.0,
                          mass_unit = None, time_unit = None,
-                         velocity_unit = None, periodicity=(True, True, True)):
+                         velocity_unit = None, periodicity=(True, True, True),
+                         geometry = "cartesian"):
     r"""Load a hexahedral mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -1157,6 +1165,8 @@
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
+    geometry : string
+        "cartesian", "cylindrical" or "polar"
 
     """
 
@@ -1214,7 +1224,7 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    spf = StreamHexahedralDataset(handler)
+    spf = StreamHexahedralDataset(handler, geometry = geometry)
 
     return spf
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -19,6 +19,7 @@
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
+import yt.visualization._MPL as _MPL
 
 class CartesianCoordinateHandler(CoordinateHandler):
 
@@ -46,19 +47,27 @@
              ("index", "x"), ("index", "y"), ("index", "z"),
              ("index", "cell_volume")])
 
-    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
         if dimension < 3:
-            return self._ortho_pixelize(data_source, field, bounds, size, antialias)
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
         else:
-            return self._oblique_pixelize(data_source, field, bounds, size, antialias)
+            return self._oblique_pixelize(data_source, field, bounds, size,
+                                          antialias)
 
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
         # We should be using fcoords
+        period = self.period[:2].copy() # dummy here
+        period[0] = self.period[self.x_axis[dim]]
+        period[1] = self.period[self.y_axis[dim]]
+        period = period.in_units("code_length").d
         buff = _MPL.Pixelize(data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field], size[0], size[1],
                              bounds, int(antialias),
-                             True, self.period).transpose()
+                             period, int(periodic)).transpose()
         return buff
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/geometry/cylindrical_coordinates.py
--- a/yt/geometry/cylindrical_coordinates.py
+++ b/yt/geometry/cylindrical_coordinates.py
@@ -20,6 +20,7 @@
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
+import yt.visualization._MPL as _MPL
 #
 # Cylindrical fields
 #
@@ -92,9 +93,9 @@
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
         buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr']/2.0,
+                                 data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta']/2.0,
+                                 data_source['dtheta'],
                                  size[0], data_source[field], bounds[0])
         return buff
 

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/geometry/polar_coordinates.py
--- a/yt/geometry/polar_coordinates.py
+++ b/yt/geometry/polar_coordinates.py
@@ -19,6 +19,8 @@
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder
 
 class PolarCoordinateHandler(CoordinateHandler):
 
@@ -68,7 +70,7 @@
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias)
         elif ax_name == "z":
-            return self._cyl_pixelize(data_source, field, bounds, size,
+            return self._polar_pixelize(data_source, field, bounds, size,
                                         antialias)
         else:
             # Pixelizing along a cylindrical surface is a bit tricky
@@ -84,11 +86,13 @@
         return buff
 
     def _polar_pixelize(self, data_source, field, bounds, size, antialias):
+        # Out bounds here will *always* be what plot window thinks are x0, x1,
+        # y0, y1, but which will actually be rmin, rmax, thetamin, thetamax.
         buff = pixelize_cylinder(data_source['r'],
-                                 data_source['dr']/2.0,
+                                 data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta']/2.0,
-                                 size[0], data_source[field], bounds[0])
+                                 data_source['dtheta'],
+                                 size[0], data_source[field], bounds[1])
         return buff
 
     axis_name = { 0  : 'r',  1  : 'theta',  2  : 'z',

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -124,15 +124,11 @@
             if hasattr(b, "in_units"):
                 b = float(b.in_units("code_length"))
             bounds.append(b)
-        buff = _MPL.Pixelize(self.data_source['px'],
-                             self.data_source['py'],
-                             self.data_source['pdx'],
-                             self.data_source['pdy'],
-                             self.data_source[item],
-                             self.buff_size[0], self.buff_size[1],
-                             bounds, int(self.antialias),
-                             self._period, int(self.periodic),
-                             ).transpose()
+        buff = self.pf.coordinates.pixelize(self.data_source.axis,
+            self.data_source, item, bounds, self.buff_size,
+            int(self.antialias))
+        # Need to add _period and self.periodic
+        # self._period, int(self.periodic)
         ia = ImageArray(buff, input_units=self.data_source[item].units,
                         info=self._get_info(item))
         self.data[item] = ia

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1087,9 +1087,9 @@
     *width* along the line of sight.  *p_size* controls the number of
     pixels per particle, and *col* governs the color.  *ptype* will
     restrict plotted particles to only those that are of a given type.
-    *minimum_mass* will require that the particles be of a given mass,
-    calculated via ParticleMassMsun, to be plotted. *alpha* determines
-    each particle's opacity.
+    Particles with masses below *minimum_mass* will not be plotted.
+    *alpha* determines the opacity of the marker symbol used in the scatter
+    plot.
     """
     _type_name = "particles"
     region = None

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -161,8 +161,12 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    width = get_sanitized_width(axis, width, None, pf)
-    center = get_sanitized_center(center, pf)
+    if pf.geometry == "cartesian":
+        width = get_sanitized_width(axis, width, None, pf)
+        center = get_sanitized_center(center, pf)
+    elif pf.geometry in ("polar", "cylindrical"):
+        width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
+        center = pf.arr([0.0, 0.0, 0.0], "code_length")
     bounds = (center[x_dict[axis]]-width[0] / 2,
               center[x_dict[axis]]+width[0] / 2,
               center[y_dict[axis]]-width[1] / 2,

diff -r 09362feec07c80eafd8112cf1f4dffeeca75d678 -r ed5b2b5ac3696a84ada3baf546800c32792ed231 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -65,7 +65,7 @@
             in the dataset.  This can be slow for very large datasets.
         """
         if bounds is None:
-            bounds = self.pf.h.all_data().quantities['Extrema'](self.field)[0]
+            bounds = self.pf.all_data().quantities.extrema(self.field)
         self.bounds = bounds
 
         # Do some error checking.
@@ -98,7 +98,7 @@
             Sets whether the transfer function should use log or linear space.
         """
         self.log = log
-        self.pf.h
+        self.pf.index
         self.pf._get_field_info(self.field).take_log = log
 
     def build_transfer_function(self):
@@ -201,7 +201,7 @@
     def setup_profile(self, profile_field=None, profile_weight=None):
         if profile_field is None:
             profile_field = 'CellVolume'
-        prof = BinnedProfile1D(self.pf.h.all_data(), 128, self.field,
+        prof = BinnedProfile1D(self.pf.all_data(), 128, self.field,
                                self.bounds[0], self.bounds[1],
                                log_space=self.log,
                                end_collect=False)


https://bitbucket.org/yt_analysis/yt/commits/70bb88b56299/
Changeset:   70bb88b56299
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:22:50
Summary:     You should always be able to replace the WCS by hand
Affected #:  1 file

diff -r ed5b2b5ac3696a84ada3baf546800c32792ed231 -r 70bb88b5629928fbfc5eb4464d62bc00be75a01d yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -118,16 +118,14 @@
                 center = [0.0]*self.dimensionality
 
         if scale is None:
-            if units == "deg" or not has_coords:
+            if units == "deg" or not has_coords and wcs is None:
                 mylog.error("Please specify scale=(dx,dy[,dz]) in %s." % (units))
                 raise ValueError
 
-        w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
-        w.wcs.crpix = 0.5*(np.array(self.shape)+1)
-
-        proj_type = ["linear"]*self.dimensionality
-
         if wcs is None:
+            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+            w.wcs.crpix = 0.5*(np.array(self.shape)+1)
+            proj_type = ["linear"]*self.dimensionality
             if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
                 # FRBs are a special case where we have coordinate
                 # information, so we take advantage of this and


https://bitbucket.org/yt_analysis/yt/commits/2e0d50e397f9/
Changeset:   2e0d50e397f9
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:23:12
Summary:     Adding a "beam" unit for radio data. For now it will be dimensionless.
Affected #:  1 file

diff -r 70bb88b5629928fbfc5eb4464d62bc00be75a01d -r 2e0d50e397f991d2e3a55611338f9e727314008f yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -98,7 +98,8 @@
     "amu": (amu_grams, dimensions.mass),
     "me": (mass_electron_grams, dimensions.mass),
     "angstrom": (cm_per_ang, dimensions.length),
-    "Jy": (jansky_cgs, dimensions.specific_flux)
+    "Jy": (jansky_cgs, dimensions.specific_flux),
+    "beam": (1.0, dimensions.dimensionless)
 
 }
 


https://bitbucket.org/yt_analysis/yt/commits/ebdb62fccbda/
Changeset:   ebdb62fccbda
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:24:09
Summary:     Removing some performance bottlenecks related to accessing data in the FITS file, and creating a FITSXYVDataset class.
Affected #:  3 files

diff -r 2e0d50e397f991d2e3a55611338f9e727314008f -r ebdb62fccbda944e9743e53875b25e4bc337662e yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -13,10 +13,12 @@
 from .data_structures import \
       FITSGrid, \
       FITSHierarchy, \
-      FITSDataset
+      FITSDataset, \
+      FITSXYVDataset
 
 from .fields import \
       FITSFieldInfo
 
 from .io import \
-      IOHandlerFITS
+      IOHandlerFITS, \
+      IOHandlerXYVFITS

diff -r 2e0d50e397f991d2e3a55611338f9e727314008f -r ebdb62fccbda944e9743e53875b25e4bc337662e yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -13,6 +13,7 @@
 import stat
 import types
 import numpy as np
+import numpy.core.defchararray as np_char
 import weakref
 import warnings
 
@@ -77,6 +78,9 @@
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
+known_units = {"k":"K",
+               "jy":"Jy"}
+
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, index, level):
@@ -107,11 +111,33 @@
     def _initialize_data_storage(self):
         pass
 
+    def _detect_image_units(self, fname, header):
+        try:
+            field_units = header["bunit"].lower().strip(" ")
+            # FITS units always return upper-case, so we need to get
+            # the right case by comparing against known units
+            for name in known_units:
+                if field_units.find(name) > -1:
+                    field_units = field_units.replace(name, known_units[name])
+            self.parameter_file.field_units[fname] = field_units
+        except:
+            pass
+
     def _detect_output_fields(self):
         self.field_list = []
+        self._field_map = {}
         for h in self._handle[self.parameter_file.first_image:]:
-            if h.is_image:
-                self.field_list.append(("fits", h.name.lower()))
+            if h.header["naxis"] >= 2:
+                if self.parameter_file.four_dims:
+                    for idx in range(h.header["naxis4"]):
+                        fname = h.name.lower()+"_%d" % (idx)
+                        self._field_map[fname] = idx
+                        self.field_list.append(("fits", fname))
+                        self._detect_image_units(fname, h.header)
+                else:
+                    fname = h.name.lower()
+                    self.field_list.append(("fits", fname))
+                    self._detect_image_units(fname, h.header)
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
@@ -170,31 +196,20 @@
     _handle = None
 
     def __init__(self, filename, dataset_type='fits',
-                 primary_header = None,
-                 sky_conversion = None,
                  storage_filename = None,
                  mask_nans = True,
                  nprocs=1):
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, ap.pyfits.HDUList):
-            self._handle = filename
-            fname = filename.filename()
-        else:
-            self._handle = ap.pyfits.open(filename)
-            fname = filename
+        self._handle = ap.pyfits.open(filename)
         for i, h in enumerate(self._handle):
-            if h.is_image and h.data is not None:
+            if h.header["naxis"] >= 2:
                 self.first_image = i
                 break
 
-        if primary_header is None:
-            self.primary_header = self._handle[self.first_image].header
-        else:
-            self.primary_header = primary_header
+        self.primary_header = self._handle[self.first_image].header
         self.shape = self._handle[self.first_image].shape
-
         self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
@@ -205,22 +220,16 @@
                 break
         self.new_unit = None
         self.pixel_scale = 1.0
-        if self.file_unit in angle_units:
-            if sky_conversion is not None:
-                self.new_unit = sky_conversion[1]
-                self.pixel_scale = np.abs(self.wcs.wcs.cdelt[idx])*sky_conversion[0]
-        elif self.file_unit in mpc_conversion:
+        if self.file_unit in mpc_conversion:
             self.new_unit = self.file_unit
             self.pixel_scale = self.wcs.wcs.cdelt[idx]
 
         self.refine_by = 2
+        self.four_dims = False
 
-        Dataset.__init__(self, fname, dataset_type)
+        Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-        # For plotting to APLpy
-        self.hdu_list = self._handle
-
     def _set_code_unit_attributes(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -287,20 +296,101 @@
             if ext.upper() not in ("FITS", "FTS"):
                 return False
         try:
-            if args[0].__class__.__name__ == "HDUList":
-                for h in args[0]:
-                    if h.is_image and h.data is not None:
-                        return True
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', category=UserWarning, append=True)
+                fileh = ap.pyfits.open(args[0])
+            for h in fileh:
+                if h.header["naxis"] >= 2:
+                    axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
+                    a = np_char.startswith(axes_names, "RA")
+                    b = np_char.startswith(axes_names, "DEC")
+                    c = np_char.startswith(axes_names, "VEL")
+                    fileh.close()
+                    if (a+b+c).sum() != 3: return True
+            fileh.close()
         except:
             pass
+        return False
+
+#class FITSXYVHierarchy(FITSHierarchy):
+#
+#    grid = FITSGrid
+
+
+class FITSXYVDataset(FITSDataset):
+    _dataset_type = "xyv_fits"
+
+    def __init__(self, filename,
+                 dataset_type='xyv_fits',
+                 storage_filename = None,
+                 mask_nans = True,
+                 nprocs=1):
+
+        super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
+                                             storage_filename=storage_filename,
+                                             mask_nans=mask_nans, nprocs=nprocs)
+        self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
+        self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
+        self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
+        self.vel_axis = np.where(np_char.startswith(self.axes_names, "VEL"))[0][0]
+
+    def _parse_parameter_file(self):
+
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        for k, v in self.primary_header.items():
+            self.parameters[k] = v
+
+        # Determine dimensionality
+
+        self.dimensionality = self.primary_header["naxis"]
+        self.geometry = "cartesian"
+        self.four_dims = False
+        if self.dimensionality == 4:
+            self.dimensionality = 3
+            self.four_dims = True
+
+        dims = self._handle[self.first_image].shape[::-1]
+        if self.four_dims: dims = dims[:3]
+
+        self.domain_dimensions = np.array(dims)
+        self.domain_left_edge = np.array([0.5]*3)
+        self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
+
+        # Get the simulation time
+        try:
+            self.current_time = self.parameters["time"]
+        except:
+            mylog.warning("Cannot find time")
+            self.current_time = 0.0
+            pass
+
+        # For now we'll ignore these
+        self.periodicity = (False,)*3
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
                 fileh = ap.pyfits.open(args[0])
             for h in fileh:
-                if h.is_image and h.data is not None:
+                if h.header["naxis"] >= 3:
+                    axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
+                    a = np_char.startswith(axes_names, "RA")
+                    b = np_char.startswith(axes_names, "DEC")
+                    c = np_char.startswith(axes_names, "VEL")
                     fileh.close()
-                    return True
+                    if (a+b+c).sum() == 3: return True
             fileh.close()
         except:
             pass

diff -r 2e0d50e397f991d2e3a55611338f9e727314008f -r ebdb62fccbda944e9743e53875b25e4bc337662e yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -55,10 +55,45 @@
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
                         data = np.zeros((nx,ny,nz))
-                        data[:,:,0] = ds.data.transpose()[start[0]:end[0],start[1]:end[1]]
+                        data[:,:,0] = ds.data[start[1]:end[1],start[0]:end[0]].transpose()
                     elif self.pf.dimensionality == 3:
-                        data = ds.data.transpose()[start[0]:end[0],start[1]:end[1],start[2]:end[2]]
+                        data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     if self.pf.mask_nans: data[np.isnan(data)] = 0.0
                     ind += g.select(selector, data.astype("float64"), rv[field], ind)
         return rv
 
+class IOHandlerXYVFITS(IOHandlerFITS):
+    _particle_reader = False
+    _dataset_type = "xyv_fits"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "xyv_fits" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        dt = "float64"
+        for field in fields:
+            rv[field] = np.empty(size, dtype=dt)
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            if self.pf.four_dims:
+                ds = f[fname.split("_")[0]]
+            else:
+                ds = f[fname]
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
+                    end = (g.RightEdge.ndarray_view()-0.5).astype("int")
+                    if self.pf.four_dims:
+                        idx = self.pf.index._field_map[fname]
+                        data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
+                    else:
+                        data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
+                    if self.pf.mask_nans: data[np.isnan(data)] = 0.0
+                    ind += g.select(selector, data.astype("float64"), rv[field], ind)
+        return rv


https://bitbucket.org/yt_analysis/yt/commits/7aeb84b77854/
Changeset:   7aeb84b77854
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:25:49
Summary:     Cleaning up WCS code. This will need a little more work.
Affected #:  1 file

diff -r ebdb62fccbda944e9743e53875b25e4bc337662e -r 7aeb84b778542472a7b41f6700ff799524c40e73 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -19,17 +19,11 @@
     known_other_fields = ()
     def _get_wcs(self, data, axis):
         if data.pf.dimensionality == 2:
-            xw, yw = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
-            zw = data["z"]
+            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
         else:
-            xw, yw, zw = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
-                                                   data["z"], 1)
-        if axis == 0:
-            return xw
-        elif axis == 1:
-            return yw
-        elif axis == 2:
-            return zw
+            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
+                                                 data["z"], 1)
+        return w_coords[axis]
     def setup_fluid_fields(self):
         def world_f(axis):
             def _world_f(field, data):


https://bitbucket.org/yt_analysis/yt/commits/e8b1f98baaea/
Changeset:   e8b1f98baaea
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:26:23
Summary:     Send slices and projections to APLpy for FITS data.
Affected #:  1 file

diff -r 7aeb84b778542472a7b41f6700ff799524c40e73 -r e8b1f98baaeaef993dfb0078a62641c1eb1d8c41 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -0,0 +1,95 @@
+"""
+FITS-specific miscellaneous functions
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import aplpy
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.funcs import fix_axis
+import astropy.wcs as pywcs
+
+axis_wcs = [[1,2],[0,2],[0,1]]
+
+plot_method_list = ["recenter","show_colorscale","show_grayscale",
+                    "refresh","add_colorbar","remove_colorbar"]
+
+def plot_method(method, plots):
+    def _method(*args, **kwargs):
+        for plot in plots.values():
+            getattr(plot, method)(*args, **kwargs)
+        return
+    return _method
+
+class FITSPlot(object):
+    def __init__(self, ds, data, axis, fields, **kwargs):
+        self.ds = ds
+        self.fields = fields
+        self.plots = {}
+        w = pywcs.WCS(naxis=2)
+        w.wcs.crpix = self.ds.wcs.wcs.crpix[axis_wcs[axis]]
+        w.wcs.cdelt = self.ds.wcs.wcs.cdelt[axis_wcs[axis]]
+        w.wcs.crval = self.ds.wcs.wcs.crval[axis_wcs[axis]]
+        w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
+        w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
+        self.buffer = FITSImageBuffer(data, fields=fields, wcs=w)
+        for field in self.fields:
+            self.plots[field] = aplpy.FITSFigure(self.buffer[field],
+                                                 **kwargs)
+            self.plots[field].set_auto_refresh(False)
+        self._setup_plot_methods()
+        self.set_font(family="serif", size=15)
+
+    def _setup_plot_methods(self):
+        for method in plot_method_list:
+            self.__dict__[method] = plot_method(method, self.plots)
+
+    def __getitem__(self, key):
+        return self.plots[key]
+
+    def keys(self):
+        return self.plots.keys()
+
+    def values(self):
+        return self.plots.values()
+
+    def items(self):
+        return self.plots.items()
+
+    def set_font(self, **kwargs):
+        for plot in self.keys():
+            self[plot].axis_labels.set_font(**kwargs)
+            self[plot].tick_labels.set_font(**kwargs)
+
+    def set_stretch(self, name, stretch):
+        self[name].show_colorscale(stretch=stretch)
+
+    def set_zlim(self, name, zmin, zmax):
+        self[name].show_colorscale(vmin=zmin, vmax=zmax)
+
+class FITSSlicePlot(FITSPlot):
+    def __init__(self, ds, axis, fields, coord=None, **kwargs):
+        axis = fix_axis(axis)
+        if coord is None:
+            coord = ds.domain_center.ndarray_view()[axis]
+        slc = ds.slice(axis, coord)
+        data = {}
+        for field in fields:
+            data[field] = slc[field].reshape(ds.domain_dimensions[axis_wcs[axis]]).transpose()
+        super(FITSSlicePlot, self).__init__(ds, data, axis, fields, **kwargs)
+
+class FITSProjectionPlot(FITSPlot):
+    def __init__(self, ds, axis, fields, weight_field=None, data_source=None, **kwargs):
+        axis = fix_axis(axis)
+        prj = ds.proj(fields[0], axis, weight_field=weight_field, data_source=data_source)
+        data = {}
+        for field in fields:
+            data[field] = prj[field].reshape(ds.domain_dimensions[axis_wcs[axis]]).transpose()
+        super(FITSProjectionPlot, self).__init__(ds, data, axis, fields, **kwargs)
+


https://bitbucket.org/yt_analysis/yt/commits/b6bf1c6dfcc5/
Changeset:   b6bf1c6dfcc5
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:57:53
Summary:     More FITS work
Affected #:  4 files

diff -r e8b1f98baaeaef993dfb0078a62641c1eb1d8c41 -r b6bf1c6dfcc5749062da03cd80842e81e721d612 yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -17,8 +17,9 @@
       FITSXYVDataset
 
 from .fields import \
-      FITSFieldInfo
+      FITSFieldInfo, \
+      FITSXYVFieldInfo
 
 from .io import \
       IOHandlerFITS, \
-      IOHandlerXYVFITS
+      IOHandlerFITSXYV

diff -r e8b1f98baaeaef993dfb0078a62641c1eb1d8c41 -r b6bf1c6dfcc5749062da03cd80842e81e721d612 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -31,7 +31,7 @@
     mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
-from .fields import FITSFieldInfo
+from .fields import FITSFieldInfo, FITSXYVFieldInfo
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
@@ -132,11 +132,11 @@
                     for idx in range(h.header["naxis4"]):
                         fname = h.name.lower()+"_%d" % (idx)
                         self._field_map[fname] = idx
-                        self.field_list.append(("fits", fname))
+                        self.field_list.append((self.dataset_type, fname))
                         self._detect_image_units(fname, h.header)
                 else:
                     fname = h.name.lower()
-                    self.field_list.append(("fits", fname))
+                    self.field_list.append((self.dataset_type, fname))
                     self._detect_image_units(fname, h.header)
 
     def _count_grids(self):
@@ -202,7 +202,7 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(filename)
+        self._handle = ap.pyfits.open(filename, do_not_scale_image_data=True)
         for i, h in enumerate(self._handle):
             if h.header["naxis"] >= 2:
                 self.first_image = i
@@ -319,6 +319,7 @@
 
 class FITSXYVDataset(FITSDataset):
     _dataset_type = "xyv_fits"
+    _field_info_class = FITSXYVFieldInfo
 
     def __init__(self, filename,
                  dataset_type='xyv_fits',
@@ -326,6 +327,8 @@
                  mask_nans = True,
                  nprocs=1):
 
+        self.fluid_types += ("xyv_fits",)
+
         super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
                                              storage_filename=storage_filename,
                                              mask_nans=mask_nans, nprocs=nprocs)
@@ -334,6 +337,15 @@
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
         self.vel_axis = np.where(np_char.startswith(self.axes_names, "VEL"))[0][0]
 
+        self.wcs_2d = ap.pywcs.WCS(naxis=2)
+        self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[self.ra_axis, self.dec_axis]
+        self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[self.ra_axis, self.dec_axis]
+        self.wcs_2d.wcs.crval = self.wcs.wcs.crval[self.ra_axis, self.dec_axis]
+        self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
+                                 str(self.wcs.wcs.cunit[self.dec_axis])]
+        self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
+                                 self.wcs.wcs.ctype[self.dec_axis]]
+
     def _parse_parameter_file(self):
 
         self.unique_identifier = \

diff -r e8b1f98baaeaef993dfb0078a62641c1eb1d8c41 -r b6bf1c6dfcc5749062da03cd80842e81e721d612 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -40,5 +40,31 @@
                 if unit.lower() == "rad": unit = "radian"
                 self.add_field(("fits",name), function=world_f(i), units=unit)
 
+class FITSXYVFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+    def _get_wcs(self, data, axis):
+        if data.pf.dimensionality == 2:
+            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
+        else:
+            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
+                                                 data["z"], 1)
+        return w_coords[axis]
+    def setup_fluid_fields(self):
+        def world_f(axis):
+            def _world_f(field, data):
+                return self._get_wcs(data, axis)
+            return _world_f
+        for i in range(self.pf.dimensionality):
+            if self.pf.wcs.wcs.cname[i] == '':
+                name = str(self.pf.wcs.wcs.ctype[i])
+            else:
+                name = str(self.pf.wcs.wcs.cname[i])
+            unit = str(self.pf.wcs.wcs.cunit[i])
+            if name != '' and unit != '':
+                if unit.lower() == "deg": unit = "degree"
+                if unit.lower() == "rad": unit = "radian"
+                self.add_field(("fits",name), function=world_f(i), units=unit)
 
 
+
+

diff -r e8b1f98baaeaef993dfb0078a62641c1eb1d8c41 -r b6bf1c6dfcc5749062da03cd80842e81e721d612 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -62,7 +62,7 @@
                     ind += g.select(selector, data.astype("float64"), rv[field], ind)
         return rv
 
-class IOHandlerXYVFITS(IOHandlerFITS):
+class IOHandlerFITSXYV(IOHandlerFITS):
     _particle_reader = False
     _dataset_type = "xyv_fits"
 


https://bitbucket.org/yt_analysis/yt/commits/ad1db0c752dc/
Changeset:   ad1db0c752dc
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 19:58:35
Summary:     Merge
Affected #:  4 files

diff -r b6bf1c6dfcc5749062da03cd80842e81e721d612 -r ad1db0c752dcea039487dda881957a5c9cc91f07 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -11,234 +11,232 @@
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
-	struct particle:
-		np.int64_t id
-		float pos[6]
+    struct particle:
+        np.int64_t id
+        float pos[6]
 
 cdef import from "fof.h":
-	struct fof:
-		np.int64_t num_p
-		particle *particles
+    struct fof:
+        np.int64_t num_p
+        particle *particles
 
 cdef import from "halo.h":
-	struct halo:
-		np.int64_t id
-		float pos[6], corevel[3], bulkvel[3]
-		float m, r, child_r, vmax_r, mgrav,	vmax, rvmax, rs, klypin_rs, vrms
-		float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
-		float bullock_spin, kin_to_pot
-		np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-		float min_pos_err, min_vel_err, min_bulkvel_err
+    struct halo:
+        np.int64_t id
+        float pos[6], corevel[3], bulkvel[3]
+        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float bullock_spin, kin_to_pot
+        np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+        float min_pos_err, min_vel_err, min_bulkvel_err
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
 cdef import from "groupies.h":
-	void find_subs(fof *f) 
-	halo *halos
-	np.int64_t num_halos
-	void calc_mass_definition()
+    void find_subs(fof *f) nogil
+    halo *halos
+    np.int64_t num_halos
+    void calc_mass_definition() nogil
 
 # For outputing halos, rockstar style
 
 cdef import from "meta_io.h":
-	void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) nogil
 
 # For setting up the configuration of rockstar
 
 cdef import from "config.h":
-	void setup_config()
+    void setup_config() nogil
 
 cdef import from "config_vars.h":
-	# Rockstar cleverly puts all of the config variables inside a templated
-	# definition of their vaiables.
-	char *FILE_FORMAT
-	np.float64_t PARTICLE_MASS
+    # Rockstar cleverly puts all of the config variables inside a templated
+    # definition of their vaiables.
+    char *FILE_FORMAT
+    np.float64_t PARTICLE_MASS
 
-	char *MASS_DEFINITION
-	np.int64_t MIN_HALO_OUTPUT_SIZE
-	np.float64_t FORCE_RES
+    char *MASS_DEFINITION
+    np.int64_t MIN_HALO_OUTPUT_SIZE
+    np.float64_t FORCE_RES
 
-	np.float64_t SCALE_NOW
-	np.float64_t h0
-	np.float64_t Ol
-	np.float64_t Om
+    np.float64_t SCALE_NOW
+    np.float64_t h0
+    np.float64_t Ol
+    np.float64_t Om
 
-	np.int64_t GADGET_ID_BYTES
-	np.float64_t GADGET_MASS_CONVERSION
-	np.float64_t GADGET_LENGTH_CONVERSION
-	np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
-	np.int64_t RESCALE_PARTICLE_MASS
+    np.int64_t GADGET_ID_BYTES
+    np.float64_t GADGET_MASS_CONVERSION
+    np.float64_t GADGET_LENGTH_CONVERSION
+    np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+    np.int64_t RESCALE_PARTICLE_MASS
 
-	np.int64_t PARALLEL_IO
-	char *PARALLEL_IO_SERVER_ADDRESS
-	char *PARALLEL_IO_SERVER_PORT
-	np.int64_t PARALLEL_IO_WRITER_PORT
-	char *PARALLEL_IO_SERVER_INTERFACE
-	char *RUN_ON_SUCCESS
+    np.int64_t PARALLEL_IO
+    char *PARALLEL_IO_SERVER_ADDRESS
+    char *PARALLEL_IO_SERVER_PORT
+    np.int64_t PARALLEL_IO_WRITER_PORT
+    char *PARALLEL_IO_SERVER_INTERFACE
+    char *RUN_ON_SUCCESS
 
-	char *INBASE
-	char *FILENAME
-	np.int64_t STARTING_SNAP
-	np.int64_t NUM_SNAPS
-	np.int64_t NUM_BLOCKS
-	np.int64_t NUM_READERS
-	np.int64_t PRELOAD_PARTICLES
-	char *SNAPSHOT_NAMES
-	char *LIGHTCONE_ALT_SNAPS
-	char *BLOCK_NAMES
+    char *INBASE
+    char *FILENAME
+    np.int64_t STARTING_SNAP
+    np.int64_t NUM_SNAPS
+    np.int64_t NUM_BLOCKS
+    np.int64_t NUM_READERS
+    np.int64_t PRELOAD_PARTICLES
+    char *SNAPSHOT_NAMES
+    char *LIGHTCONE_ALT_SNAPS
+    char *BLOCK_NAMES
 
-	char *OUTBASE
-	np.float64_t OVERLAP_LENGTH
-	np.int64_t NUM_WRITERS
-	np.int64_t FORK_READERS_FROM_WRITERS
-	np.int64_t FORK_PROCESSORS_PER_MACHINE
+    char *OUTBASE
+    np.float64_t OVERLAP_LENGTH
+    np.int64_t NUM_WRITERS
+    np.int64_t FORK_READERS_FROM_WRITERS
+    np.int64_t FORK_PROCESSORS_PER_MACHINE
 
-	char *OUTPUT_FORMAT
-	np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
-	np.int64_t FULL_PARTICLE_CHUNKS
-	char *BGC2_SNAPNAMES
+    char *OUTPUT_FORMAT
+    np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    np.int64_t FULL_PARTICLE_CHUNKS
+    char *BGC2_SNAPNAMES
 
-	np.int64_t BOUND_PROPS
-	np.int64_t BOUND_OUT_TO_HALO_EDGE
-	np.int64_t DO_MERGER_TREE_ONLY
-	np.int64_t IGNORE_PARTICLE_IDS
-	np.float64_t TRIM_OVERLAP
-	np.float64_t ROUND_AFTER_TRIM
-	np.int64_t LIGHTCONE
-	np.int64_t PERIODIC
+    np.int64_t BOUND_PROPS
+    np.int64_t BOUND_OUT_TO_HALO_EDGE
+    np.int64_t DO_MERGER_TREE_ONLY
+    np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t TRIM_OVERLAP
+    np.float64_t ROUND_AFTER_TRIM
+    np.int64_t LIGHTCONE
+    np.int64_t PERIODIC
 
-	np.float64_t LIGHTCONE_ORIGIN[3]
-	np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+    np.float64_t LIGHTCONE_ORIGIN[3]
+    np.float64_t LIGHTCONE_ALT_ORIGIN[3]
 
-	np.float64_t LIMIT_CENTER[3]
-	np.float64_t LIMIT_RADIUS
+    np.float64_t LIMIT_CENTER[3]
+    np.float64_t LIMIT_RADIUS
 
-	np.int64_t SWAP_ENDIANNESS
-	np.int64_t GADGET_VARIANT
+    np.int64_t SWAP_ENDIANNESS
+    np.int64_t GADGET_VARIANT
 
-	np.float64_t FOF_FRACTION
-	np.float64_t FOF_LINKING_LENGTH
-	np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-	np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
-	np.int64_t TEMPORAL_HALO_FINDING
-	np.int64_t MIN_HALO_PARTICLES
-	np.float64_t UNBOUND_THRESHOLD
-	np.int64_t ALT_NFW_METRIC
+    np.float64_t FOF_FRACTION
+    np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    np.int64_t TEMPORAL_HALO_FINDING
+    np.int64_t MIN_HALO_PARTICLES
+    np.float64_t UNBOUND_THRESHOLD
+    np.int64_t ALT_NFW_METRIC
 
-	np.int64_t TOTAL_PARTICLES
-	np.float64_t BOX_SIZE
-	np.int64_t OUTPUT_HMAD
-	np.int64_t OUTPUT_PARTICLES
-	np.int64_t OUTPUT_LEVELS
-	np.float64_t DUMP_PARTICLES[3]
+    np.int64_t TOTAL_PARTICLES
+    np.float64_t BOX_SIZE
+    np.int64_t OUTPUT_HMAD
+    np.int64_t OUTPUT_PARTICLES
+    np.int64_t OUTPUT_LEVELS
+    np.float64_t DUMP_PARTICLES[3]
 
-	np.float64_t AVG_PARTICLE_SPACING
-	np.int64_t SINGLE_SNAP
+    np.float64_t AVG_PARTICLE_SPACING
+    np.int64_t SINGLE_SNAP
 
 
 
 cdef class RockstarGroupiesInterface:
-	
-	cdef public object pf
-	cdef public object fof
+    
+    cdef public object pf
+    cdef public object fof
 
-	# For future use/consistency
-	def __cinit__(self,pf):
-		self.pf = pf
+    # For future use/consistency
+    def __cinit__(self,pf):
+        self.pf = pf
 
-	def setup_rockstar(self,
-						particle_mass,
-						int periodic = 1, force_res=None,
-						int min_halo_size = 25, outbase = "None",
-						callbacks = None):
-		global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
-		global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
-		global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-		global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-		global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-		
+    def setup_rockstar(self,
+                        particle_mass,
+                        int periodic = 1, force_res=None,
+                        int min_halo_size = 25, outbase = "None",
+                        callbacks = None):
+        global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+        global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+        global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        
 
-		if force_res is not None:
-			FORCE_RES=np.float64(force_res)
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
 
-		OVERLAP_LENGTH = 0.0
-		
-		FILENAME = "inline.<block>"
-		FILE_FORMAT = "GENERIC"
-		OUTPUT_FORMAT = "ASCII"
-		MIN_HALO_OUTPUT_SIZE=min_halo_size
-		
-		pf = self.pf
+        OVERLAP_LENGTH = 0.0
+        
+        FILENAME = "inline.<block>"
+        FILE_FORMAT = "GENERIC"
+        OUTPUT_FORMAT = "ASCII"
+        MIN_HALO_OUTPUT_SIZE=min_halo_size
+        
+        pf = self.pf
 
-		h0 = pf.hubble_constant
-		Ol = pf.omega_lambda
-		Om = pf.omega_matter
-		
-		SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-		
-		if not outbase =='None'.decode('UTF-8'):
-			#output directory. since we can't change the output filenames
-			#workaround is to make a new directory
-			OUTBASE = outbase 
+        h0 = pf.hubble_constant
+        Ol = pf.omega_lambda
+        Om = pf.omega_matter
+        
+        SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+        
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            OUTBASE = outbase 
 
 
-		PARTICLE_MASS = particle_mass.in_units('Msun/h')
-		PERIODIC = periodic
-		BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
+        PARTICLE_MASS = particle_mass.in_units('Msun/h')
+        PERIODIC = periodic
+        BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
-		# Set up the configuration options
-		setup_config()
+        # Set up the configuration options
+        setup_config()
 
-		# Needs to be called so rockstar can use the particle mass parameter
-		# to calculate virial quantities properly
-		calc_mass_definition()
+        # Needs to be called so rockstar can use the particle mass parameter
+        # to calculate virial quantities properly
+        calc_mass_definition()
 
+    def output_halos(self):
+        output_halos(0, 0, 0, NULL) 
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+                                np.ndarray[np.float64_t, ndim=2] pos,
+                                np.ndarray[np.float64_t, ndim=2] vel,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
+                                np.int64_t nfof,
+                                np.int64_t npart_max):
 
-	def make_rockstar_fof(self,fof_ids, pos, vel):
+        # Define fof object
 
-		# Turn positions and velocities into units we want
-		pos = pos.in_units('Mpccm/h')
-		vel = vel.in_units('km/s')
+        # Find number of particles
+        cdef np.int64_t i, j
+        cdef np.int64_t num_particles = pid.shape[0]
 
-		# Define fof object
-		cdef fof fof_obj
+        # Allocate space for correct number of particles
+        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
+        cdef fof fof_obj
+        fof_obj.particles = particles
 
-		# Find number of particles
-		cdef np.int64_t num_particles = len(fof_ids)
+        cdef np.int64_t last_fof_tag = 1
+        cdef np.int64_t k = 0
+        for i in range(num_particles):
+            if fof_tags[i] == 0:
+                continue
+            if fof_tags[i] != last_fof_tag:
+                last_fof_tag = fof_tags[i]
+                if k > 16:
+                    print "Finding subs", k, i
+                    fof_obj.num_p = k
+                    find_subs(&fof_obj)
+                k = 0
+            particles[k].id = pid[i]
 
-		# Allocate space for correct number of particles
-		cdef particle* particles = <particle*> malloc(num_particles * sizeof(particle))
+            # fill in locations & velocities
+            for j in range(3):
+                particles[k].pos[j] = pos[i,j]
+                particles[k].pos[j+3] = vel[i,j]
+            k += 1
+        free(particles)
 
-		# Fill in array of particles with particle that fof identified
-		# This is possibly the slowest way to code this, but for now
-		# I just want it to work
-		for i,id in enumerate(fof_ids):
-			particles[i].id = id
 
-			# fill in locations & velocities
-			for j in range(3):
-				particles[i].pos[j] = pos[id][j]
-				particles[i].pos[j+3] = vel[id][j]
 
-
-		# Assign pointer to particles into FOF object 
-		fof_obj.particles = particles
-
-		# Assign number of particles into FOF object
-		fof_obj.num_p = num_particles
-
-		# Make pointer to fof object
-		cdef fof* fof_pointer = & fof_obj
-
-		# Find the sub halos using rockstar by passing a pointer to the fof object
-		find_subs( fof_pointer)
-
-		# Output the halos, rockstar style
-		output_halos(0, 0, 0, NULL) 
-
-		free(particles)
-
-
-

diff -r b6bf1c6dfcc5749062da03cd80842e81e721d612 -r ad1db0c752dcea039487dda881957a5c9cc91f07 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -24,5 +24,12 @@
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
+    config.add_extension("rockstar_groupies",
+                         "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
+                         library_dirs=[rd],
+                         libraries=["rockstar"],
+                         include_dirs=[rd,
+                                       os.path.join(rd, "io"),
+                                       os.path.join(rd, "util")])
     return config
 

diff -r b6bf1c6dfcc5749062da03cd80842e81e721d612 -r ad1db0c752dcea039487dda881957a5c9cc91f07 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -33,7 +33,7 @@
     io_registry
 from .fields import FITSFieldInfo, FITSXYVFieldInfo
 from yt.utilities.decompose import \
-    decompose_array, get_psize
+    decompose_array, get_psize, decompose_array_nocopy
 
 class astropy_imports:
     _pyfits = None
@@ -150,12 +150,10 @@
             bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
                                                        pf.domain_right_edge)])
             psize = get_psize(np.array(pf.domain_dimensions), pf.nprocs)
-            temp_arr = np.zeros(pf.domain_dimensions)
-            gle, gre, temp_arr = decompose_array(temp_arr, psize, bbox)
+            gle, gre, shapes = decompose_array_nocopy(pf.domain_dimensions, psize, bbox)
             self.grid_left_edge = self.pf.arr(gle, "code_length")
             self.grid_right_edge = self.pf.arr(gre, "code_length")
-            self.grid_dimensions = np.array([grid.shape for grid in temp_arr], dtype="int32")
-            del temp_arr
+            self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge

diff -r b6bf1c6dfcc5749062da03cd80842e81e721d612 -r ad1db0c752dcea039487dda881957a5c9cc91f07 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -52,6 +52,27 @@
     patches = split_array(arr, psize)
     return grid_left_edges, grid_right_edges, patches
 
+def decompose_array_nocopy(shape, psize, bbox):
+    """ Calculate list of product(psize) subarrays of arr, along with their
+        left and right edges
+    """
+    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
+    n_d = shape
+    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
+    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
+                    bbox[1, 0]:bbox[1, 1]:d_s[1],
+                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
+    for i in range(3):
+        xyz = split_array(dist[i], psize)
+        for j in range(np.product(psize)):
+            grid_left_edges[j, i] = xyz[j][0, 0, 0]
+            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
+        del xyz
+    del dist
+    shapes = split_array(shape, psize)
+    return grid_left_edges, grid_right_edges, shapes
+
 
 def evaluate_domain_decomposition(n_d, pieces, ldom):
     """ Evaluate longest to shortest edge ratio
@@ -141,3 +162,15 @@
                 slices.append(np.s_[lei[0]:rei[0], lei[1]:
                                     rei[1], lei[2]:rei[2]])
     return [tab[slc] for slc in slices]
+
+def split_array_nocopy(shape, psize):
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(shape, dtype=np.int64)
+    shapes = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                shapes.append(rei-lei)
+    return shapes 


https://bitbucket.org/yt_analysis/yt/commits/8946c33b97ef/
Changeset:   8946c33b97ef
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 20:03:29
Summary:     Index fix
Affected #:  1 file

diff -r ad1db0c752dcea039487dda881957a5c9cc91f07 -r 8946c33b97ef6e59f5face9bf14c78d94082c3e6 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -336,9 +336,9 @@
         self.vel_axis = np.where(np_char.startswith(self.axes_names, "VEL"))[0][0]
 
         self.wcs_2d = ap.pywcs.WCS(naxis=2)
-        self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[self.ra_axis, self.dec_axis]
-        self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[self.ra_axis, self.dec_axis]
-        self.wcs_2d.wcs.crval = self.wcs.wcs.crval[self.ra_axis, self.dec_axis]
+        self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
+        self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.ra_axis, self.dec_axis]]
+        self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.ra_axis, self.dec_axis]]
         self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
                                  str(self.wcs.wcs.cunit[self.dec_axis])]
         self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],


https://bitbucket.org/yt_analysis/yt/commits/e81fe6d9db92/
Changeset:   e81fe6d9db92
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 20:16:45
Summary:     memmap=True!
Affected #:  1 file

diff -r 8946c33b97ef6e59f5face9bf14c78d94082c3e6 -r e81fe6d9db9286ea091e2f6c45287b969496e4a6 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -200,7 +200,7 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(filename, do_not_scale_image_data=True)
+        self._handle = ap.pyfits.open(filename, memmap=True, do_not_scale_image_data=True)
         for i, h in enumerate(self._handle):
             if h.header["naxis"] >= 2:
                 self.first_image = i


https://bitbucket.org/yt_analysis/yt/commits/7797791a8102/
Changeset:   7797791a8102
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 22:08:37
Summary:     Refactoring _is_valid and _parse_parameter_file to avoid code duplication
Affected #:  2 files

diff -r e81fe6d9db9286ea091e2f6c45287b969496e4a6 -r 7797791a8102216c8f30723984bd03ca9a43f4f6 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -81,6 +81,22 @@
 known_units = {"k":"K",
                "jy":"Jy"}
 
+def fits_file_validator(ds, *args, **kwargs):
+    ext = args[0].rsplit(".", 1)[-1]
+    if ext.upper() == "GZ":
+        # We don't know for sure that there will be > 1
+        ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+    if ext.upper() not in ("FITS", "FTS"):
+        return False
+    try:
+        with warnings.catch_warnings():
+            warnings.filterwarnings('ignore', category=UserWarning, append=True)
+            fileh = ap.pyfits.open(args[0])
+        if ds._check_axes(fileh): return True
+    except:
+        pass
+    return False
+
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, index, level):
@@ -195,11 +211,9 @@
 
     def __init__(self, filename, dataset_type='fits',
                  storage_filename = None,
-                 mask_nans = True,
-                 nprocs=1):
+                 mask_nans = True):
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
-        self.nprocs = nprocs
         self._handle = ap.pyfits.open(filename, memmap=True, do_not_scale_image_data=True)
         for i, h in enumerate(self._handle):
             if h.header["naxis"] >= 2:
@@ -281,39 +295,28 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
+        self.nprocs = np.around(np.prod(self.domain_dimensions) /
+                                32**self.dimensionality).astype("int")
+
     def __del__(self):
         self._handle.close()
 
     @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if isinstance(args[0], types.StringTypes):
-            ext = args[0].rsplit(".", 1)[-1]
-            if ext.upper() == "GZ":
-                # We don't know for sure that there will be > 1
-                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
-            if ext.upper() not in ("FITS", "FTS"):
-                return False
-        try:
-            with warnings.catch_warnings():
-                warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = ap.pyfits.open(args[0])
-            for h in fileh:
-                if h.header["naxis"] >= 2:
-                    axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                    a = np_char.startswith(axes_names, "RA")
-                    b = np_char.startswith(axes_names, "DEC")
-                    c = np_char.startswith(axes_names, "VEL")
-                    fileh.close()
-                    if (a+b+c).sum() != 3: return True
-            fileh.close()
-        except:
-            pass
+    def _check_axes(cls, handle):
+        for h in handle:
+            if h.header["naxis"] >= 2:
+                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
+                a = np_char.startswith(axes_names, "RA")
+                b = np_char.startswith(axes_names, "DEC")
+                c = np_char.startswith(axes_names, "VEL")
+                if (a+b+c).sum() != 3:
+                    handle.close()
+                    return True
         return False
 
-#class FITSXYVHierarchy(FITSHierarchy):
-#
-#    grid = FITSGrid
-
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        return fits_file_validator(cls, *args, **kwargs)
 
 class FITSXYVDataset(FITSDataset):
     _dataset_type = "xyv_fits"
@@ -322,14 +325,13 @@
     def __init__(self, filename,
                  dataset_type='xyv_fits',
                  storage_filename = None,
-                 mask_nans = True,
-                 nprocs=1):
+                 mask_nans = True):
 
         self.fluid_types += ("xyv_fits",)
 
         super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
                                              storage_filename=storage_filename,
-                                             mask_nans=mask_nans, nprocs=nprocs)
+                                             mask_nans=mask_nans)
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
         self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
@@ -346,64 +348,31 @@
 
     def _parse_parameter_file(self):
 
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        for k, v in self.primary_header.items():
-            self.parameters[k] = v
+        super(FITSXYVDataset, self)._parse_parameter_file()
 
-        # Determine dimensionality
-
-        self.dimensionality = self.primary_header["naxis"]
-        self.geometry = "cartesian"
-        self.four_dims = False
         if self.dimensionality == 4:
             self.dimensionality = 3
             self.four_dims = True
+            self.domain_dimensions = self.domain_dimensions[:3]
+            self.domain_left_edge = self.domain_left_edge[:3]
+            self.domain_right_edge = self.domain_right_edge[:3]
 
-        dims = self._handle[self.first_image].shape[::-1]
-        if self.four_dims: dims = dims[:3]
-
-        self.domain_dimensions = np.array(dims)
-        self.domain_left_edge = np.array([0.5]*3)
-        self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
-
-        # Get the simulation time
-        try:
-            self.current_time = self.parameters["time"]
-        except:
-            mylog.warning("Cannot find time")
-            self.current_time = 0.0
-            pass
-
-        # For now we'll ignore these
-        self.periodicity = (False,)*3
-        self.current_redshift = self.omega_lambda = self.omega_matter = \
-            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.nprocs = np.around(np.prod(self.domain_dimensions) /
+                                32**self.dimensionality).astype("int")
 
     @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if isinstance(args[0], types.StringTypes):
-            ext = args[0].rsplit(".", 1)[-1]
-            if ext.upper() == "GZ":
-                # We don't know for sure that there will be > 1
-                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
-            if ext.upper() not in ("FITS", "FTS"):
-                return False
-        try:
-            with warnings.catch_warnings():
-                warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = ap.pyfits.open(args[0])
-            for h in fileh:
-                if h.header["naxis"] >= 3:
-                    axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                    a = np_char.startswith(axes_names, "RA")
-                    b = np_char.startswith(axes_names, "DEC")
-                    c = np_char.startswith(axes_names, "VEL")
-                    fileh.close()
-                    if (a+b+c).sum() == 3: return True
-            fileh.close()
-        except:
-            pass
+    def _check_axes(cls, handle):
+        for h in handle:
+            if h.header["naxis"] >= 3:
+                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
+                a = np_char.startswith(axes_names, "RA")
+                b = np_char.startswith(axes_names, "DEC")
+                c = np_char.startswith(axes_names, "VEL")
+                if (a+b+c).sum() == 3:
+                    handle.close()
+                    return True
         return False
 
-
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        return fits_file_validator(cls, *args, **kwargs)

diff -r e81fe6d9db9286ea091e2f6c45287b969496e4a6 -r 7797791a8102216c8f30723984bd03ca9a43f4f6 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -64,13 +64,13 @@
                     bbox[1, 0]:bbox[1, 1]:d_s[1],
                     bbox[2, 0]:bbox[2, 1]:d_s[2]]
     for i in range(3):
-        xyz = split_array(dist[i], psize)
+        xyz = split_array_nocopy(dist[i], psize)
         for j in range(np.product(psize)):
             grid_left_edges[j, i] = xyz[j][0, 0, 0]
             grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
         del xyz
     del dist
-    shapes = split_array(shape, psize)
+    shapes = split_array_nocopy(shape, psize)
     return grid_left_edges, grid_right_edges, shapes
 
 


https://bitbucket.org/yt_analysis/yt/commits/a66311d5d35c/
Changeset:   a66311d5d35c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 22:26:02
Summary:     Merge
Affected #:  634 files

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -36,6 +36,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
+yt/utilities/lib/origami.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
@@ -52,3 +53,7 @@
 *.so
 .idea/*
 tests/results/*
+doc/build/*
+doc/source/reference/api/generated/*
+doc/_temp/*
+doc/source/bootcamp/.ipynb_checkpoints/

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 CITATION
--- a/CITATION
+++ b/CITATION
@@ -29,3 +29,28 @@
    adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
   adsnote = {Provided by the SAO/NASA Astrophysics Data System}
 }
+
+Using yt can also utilize other functionality.  If you utilize ORIGAMI, we ask
+that you please cite the ORIGAMI paper:
+
+ at ARTICLE{2012ApJ...754..126F,
+   author = {{Falck}, B.~L. and {Neyrinck}, M.~C. and {Szalay}, A.~S.},
+    title = "{ORIGAMI: Delineating Halos Using Phase-space Folds}",
+  journal = {\apj},
+archivePrefix = "arXiv",
+   eprint = {1201.2353},
+ primaryClass = "astro-ph.CO",
+ keywords = {dark matter, galaxies: halos, large-scale structure of universe, methods: numerical},
+     year = 2012,
+    month = aug,
+   volume = 754,
+      eid = {126},
+    pages = {126},
+      doi = {10.1088/0004-637X/754/2/126},
+   adsurl = {http://adsabs.harvard.edu/abs/2012ApJ...754..126F},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+The main homepage for ORIGAMI can be found here:
+
+http://icg.port.ac.uk/~falckb/origami.html

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,10 @@
 include distribute_setup.py README* CREDITS COPYING.txt CITATION
-recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.h README*
+recursive-include yt/gui/reason/html *.html *.png *.ico *.js *.gif *.css
+recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE*
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
+include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
+include doc/extensions/README doc/Makefile
+prune doc/source/reference/api/generated
+prune doc/build/
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/Makefile
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,140 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html        to make standalone HTML files"
+	@echo "  dirhtml     to make HTML files named index.html in directories"
+	@echo "  singlehtml  to make a single large HTML file"
+	@echo "  pickle      to make pickle files"
+	@echo "  json        to make JSON files"
+	@echo "  htmlhelp    to make HTML files and a HTML help project"
+	@echo "  qthelp      to make HTML files and a qthelp project"
+	@echo "  devhelp     to make HTML files and a Devhelp project"
+	@echo "  epub        to make an epub"
+	@echo "  latex       to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf    to make LaTeX files and run them through pdflatex"
+	@echo "  text        to make text files"
+	@echo "  man         to make manual pages"
+	@echo "  changes     to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck   to check all external links for integrity"
+	@echo "  doctest     to run all doctests embedded in the documentation (if enabled)"
+	@echo "  clean 	     to remove the build directory"
+	@echo "  fullclean   to remove the build directory and autogenerated api docs"
+	@echo "  recipeclean to remove files produced by running the cookbook scripts"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+fullclean:
+	-rm -rf $(BUILDDIR)/*
+	-rm -rf source/reference/api/generated
+
+recipeclean:
+	-rm -rf _temp/*.done source/cookbook/_static/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/yt.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/yt.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/yt"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/yt"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	make -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/README
--- a/doc/README
+++ b/doc/README
@@ -1,14 +1,10 @@
-This directory contains the compiled yt documentation.  Development of the
-documentation happens in a mercurial repository, yt-doc, available at:
-
-http://hg.yt-project.org/yt-doc/
-
-It's written to be used with Sphinx, a tool designed for writing Python
-documentation.  Sphinx is available at this URL:
+This directory contains the uncompiled yt documentation.  It's written to be
+used with Sphinx, a tool designed for writing Python documentation.  Sphinx is
+available at this URL:
 
 http://sphinx.pocoo.org/
 
-All of the pre-built HTML files, accessible with any web browser, are available
-in the build/ directory, as well as at:
+Because the documentation requires a number of dependencies, we provide
+pre-build versions online, accessible here:
 
-http://yt-project.org/doc/index.html
+http://yt-project.org/docs/

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/cheatsheet.tex
--- /dev/null
+++ b/doc/cheatsheet.tex
@@ -0,0 +1,379 @@
+\documentclass[10pt,landscape]{article}
+\usepackage{multicol}
+\usepackage{calc}
+\usepackage{ifthen}
+\usepackage[landscape]{geometry}
+\usepackage[colorlinks = true, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref}
+
+% To make this come out properly in landscape mode, do one of the following
+% 1.
+%  pdflatex latexsheet.tex
+%
+% 2.
+%  latex latexsheet.tex
+%  dvips -P pdf  -t landscape latexsheet.dvi
+%  ps2pdf latexsheet.ps
+
+
+% If you're reading this, be prepared for confusion.  Making this was
+% a learning experience for me, and it shows.  Much of the placement
+% was hacked in; if you make it better, let me know...
+
+
+% 2008-04
+% Changed page margin code to use the geometry package. Also added code for
+% conditional page margins, depending on paper size. Thanks to Uwe Ziegenhagen
+% for the suggestions.
+
+% 2006-08
+% Made changes based on suggestions from Gene Cooperman. <gene at ccs.neu.edu>
+
+% 2012-11 - Stephen Skory
+% Converted the latex cheat sheet to a yt cheat sheet, taken from
+% http://www.stdout.org/~winston/latex/
+
+
+% This sets page margins to .5 inch if using letter paper, and to 1cm
+% if using A4 paper. (This probably isn't strictly necessary.)
+% If using another size paper, use default 1cm margins.
+\ifthenelse{\lengthtest { \paperwidth = 11in}}
+	{ \geometry{top=.5in,left=.5in,right=.5in,bottom=0.85in} }
+	{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
+		{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
+		{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
+	}
+
+% Turn off header and footer
+\pagestyle{empty}
+ 
+
+% Redefine section commands to use less space
+\makeatletter
+\renewcommand{\section}{\@startsection{section}{1}{0mm}%
+                                {-1ex plus -.5ex minus -.2ex}%
+                                {0.5ex plus .2ex}%x
+                                {\normalfont\large\bfseries}}
+\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
+                                {-1explus -.5ex minus -.2ex}%
+                                {0.5ex plus .2ex}%
+                                {\normalfont\normalsize\bfseries}}
+\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
+                                {-1ex plus -.5ex minus -.2ex}%
+                                {1ex plus .2ex}%
+                                {\normalfont\small\bfseries}}
+\makeatother
+
+% Define BibTeX command
+\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
+    T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
+
+% Don't print section numbers
+\setcounter{secnumdepth}{0}
+
+
+\setlength{\parindent}{0pt}
+\setlength{\parskip}{0pt plus 0.5ex}
+
+
+% -----------------------------------------------------------------------
+
+\begin{document}
+
+\raggedright
+\fontsize{3mm}{3mm}\selectfont
+\begin{multicols}{3}
+
+
+% multicol parameters
+% These lengths are set only within the two main columns
+%\setlength{\columnseprule}{0.25pt}
+\setlength{\premulticols}{1pt}
+\setlength{\postmulticols}{1pt}
+\setlength{\multicolsep}{1pt}
+\setlength{\columnsep}{2pt}
+
+\begin{center}
+     \Large{\textbf{yt Cheat Sheet}} \\
+\end{center}
+
+\subsection{General Info}
+For everything yt please see \url{http://yt-project.org}.
+Documentation \url{http://yt-project.org/doc/index.html}.
+Need help? Start here \url{http://yt-project.org/doc/help/} and then
+try the IRC chat room \url{http://yt-project.org/irc.html},
+or the mailing list \url{http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org}.
+{\bf Installing yt:} The easiest way to install yt is to use the installation script
+found on the yt homepage or the docs linked above.
+
+\subsection{Command Line yt}
+yt, and its convenience functions, are launched from a command line prompt.
+Many commands have flags to control behavior.
+Commands can be followed by
+{\bf {-}{-}help} (e.g. {\bf yt render {-}{-}help}) for detailed help for that command
+including a list of the available flags.
+
+\texttt{iyt}\textemdash\ Load yt and IPython. \\
+\texttt{yt load} {\it dataset}   \textemdash\ Load a single dataset.  \\
+\texttt{yt help} \textemdash\ Print yt help information. \\
+\texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
+\texttt{yt update} \textemdash\ Update yt to most recent version.\\
+\texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
+\texttt{yt instinfo} \textemdash\ yt installation information. \\
+\texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
+\texttt{yt serve} ({\it dataset}) \textemdash\  Run yt-specific web GUI ({\it dataset} is optional).\\
+\texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
+\texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
+\texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
+\texttt{yt render} {\it dataset} \textemdash\ Create a simple
+ volume rendering. \\
+\texttt{yt mapserver} {\it dataset} \textemdash\ View a plot/projection in a Gmaps-like
+ interface. \\
+\texttt{yt pastebin} {\it text.out} \textemdash\ Post text to the pastebin at
+ paste.yt-project.org. \\ 
+\texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
+ STDOUT. \\
+ \texttt{yt hub\_register} \textemdash\ Register with
+hub.yt-project.org. \\
+\texttt{yt hub\_submit} \textemdash\ Submit hg repo to
+hub.yt-project.org. \\
+\texttt{yt bootstrap\_dev} \textemdash\ Bootstrap a yt 
+development environment. \\
+\texttt{yt bugreport} \textemdash\ Report a yt bug. \\
+\texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
+\texttt{yt rpdb} \textemdash\ Connect to running rpd 
+ session. 
+
+\subsection{yt Imports}
+In order to use yt, Python must load the relevant yt modules into memory.
+The import commands are entered in the Python/IPython shell or
+used as part of a script.
+\newlength{\MyLen}
+\settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
+\texttt{from yt.mods import \textasteriskcentered}  \textemdash\ 
+Load base yt  modules. \\
+\texttt{from yt.config import ytcfg}  \textemdash\ 
+Used to set yt configuration options.
+ If used, must be called before importing any other module.\\
+\texttt{from yt.analysis\_modules.api import \textasteriskcentered}   \textemdash\ 
+Load all yt analysis modules. \\
+\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
+Load halo finding modules. Other modules
+are loaded in a similar way by swapping the 
+{\em emphasized} text.
+See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
+
+\subsection{Numpy Arrays}
+Simulation data in yt is returned in Numpy arrays. The Numpy package provides a wealth of built-in
+functions that operate on Numpy arrays. Here is a very brief list of some useful ones.
+Please see \url{http://docs.scipy.org/doc/numpy/reference/} for the full
+numpy documentation.\\
+\settowidth{\MyLen}{\texttt{multicol} }
+\texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
+\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max, 
+min value of \texttt{a}.\\
+\texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
+\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from \texttt{a} between
+locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
+\texttt{sel = (a > const)}  \textemdash\ Create a new boolean Numpy array \texttt{sel}, of the same shape as \texttt{a},
+that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
+\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of elements from \texttt{a} that correspond to elements of \texttt{sel}
+that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
+\texttt{a.dump({\it filename.dat})} \textemdash\ Save \texttt{a} to the binary file {\it filename.dat}.\\
+\texttt{a = np.load({\it filename.dat})} \textemdash\ Load the contents of {\it filename.dat} into \texttt{a}.
+
+\subsection{IPython Tips}
+\settowidth{\MyLen}{\texttt{multicol} }
+These tips work if IPython has been loaded, typically either by invoking
+\texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}).
+\texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a
+variable or function name when the \texttt{Tab} key is pressed, e.g. {\it HaloFi}\textendash\texttt{Tab} would auto-complete
+to {\it HaloFinder}. This also works with imports, e.g. {\it from numpy.random.}\textendash\texttt{Tab}
+would give you a list of random functions (note the trailing period before hitting \texttt{Tab}).\\
+\texttt{?, ??} \textemdash\ Appending one or two question marks at the end of any object gives you
+detailed information about it, e.g. {\it variable\_name}?.\\
+Below a few IPython ``magics'' are listed, which are IPython-specific shortcut commands.\\
+\texttt{\%paste} \textemdash\ Paste content from the system clipboard into the IPython shell.\\
+\texttt{\%hist} \textemdash\ Print recent command history.\\
+\texttt{\%quickref} \textemdash\ Print IPython quick reference.\\
+\texttt{\%pdb} \textemdash\ Automatically enter the Python debugger at an exception.\\
+\texttt{\%time, \%timeit} \textemdash\ Find running time of expressions for benchmarking.\\
+\texttt{\%lsmagic} \textemdash\ List all available IPython magics. Hint: \texttt{?} works with magics.\\
+
+
+Please see \url{http://ipython.org/documentation.html} for the full
+IPython documentation.
+
+\subsection{Load and Access Data}
+The first step in using yt is to reference a simulation snapshot.
+After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
+that define a region of simulation space from which data should be selected.
+\settowidth{\MyLen}{\texttt{multicol} }
+\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
+numpy array \texttt{a}. Similarly for other data containers.\\
+\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+in the snapshot. \\
+\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
+\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+container. {\it cen} may be a coordinate, or ``max'' which 
+centers on the max density point. {\it radius} may be a float in 
+code units or a tuple of ({\it length, unit}).\\
+
+\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+rectilinear data container. {\it cen} is required but not used.
+{\it left} and {\it right edge} are coordinate values that define the region.
+
+\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+Create a cylindrical data container centered at {\it cen} along the 
+direction set by {\it normal},with total length
+ 2$\times${\it height} and with radius {\it radius}. \\
+ 
+ \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
+ container. {\it constructor} is a list of pre-defined non-boolean 
+ data containers with nested boolean logic using the
+ ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
+ {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
+ by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
+ 
+\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+
+
+\subsection{Defining New Fields \& Quantities}
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. Quantities reduce a field (e.g. "Density") defined over an object (e.g. "sphere") to get a single value (e.g. "Mass"). \\
+\texttt{def \_MetalMassMsun({\it field},{\it data})}\\
+\texttt{\hspace{4 mm} return data["Metallicity"]*data["CellMassMsun"]}\\
+\texttt{add\_field("MetalMassMsun",function=\_MetalMassMsun)}\\
+Define a new quantity; note the first function operates on grids and data objects and the second on the results of the first. \\
+\texttt{def \_TotalMass(data): }\\
+\texttt{\hspace{4 mm} baryon\_mass = data["CellMassMsun"].sum()}\\
+\texttt{\hspace{4 mm} particle\_mass = data["ParticleMassMsun"].sum()}\\
+\texttt{\hspace{4 mm} return baryon\_mass, particle\_mass}\\
+\texttt{def \_combTotalMass(data, baryon\_mass, particle\_mass):}\\
+\texttt{\hspace{4 mm} return baryon\_mass.sum() + particle\_mass.sum()}\\
+\texttt{add\_quantity("TotalMass", function=\_TotalMass,}\\
+\texttt{\hspace{4 mm} combine\_function=\_combTotalMass, n\_ret = 2)}\\
+
+
+
+\subsection{Slices and Projections}
+\settowidth{\MyLen}{\texttt{multicol} }
+\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
+{\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
+\texttt{.save()} works similarly for the commands below.\\
+
+\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
+\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+
+\subsection{Plot Annotations}
+\settowidth{\MyLen}{\texttt{multicol} }
+Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a {\it plot\_args} dict that is fed to matplotlib annotator. \\
+\texttt{velocity({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
+\texttt{magnetic\_field({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
+\texttt{quiver({\it field\_x},{\it field\_y},{\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \\
+\texttt{contour({\it field=},{\it ncont=},{\it factor=},{\it clim=},{\it take\_log=}, {\it additional parameters})} \textemdash Plots a number of contours {\it ncont} to interpolate {\it field} optionally using {\it take\_log}, upper and lower {\it c}ontour{\it lim}its and {\it factor} number of points in the interpolation.\\
+\texttt{grids({\it alpha=}, {\it draw\_ids=}, {\it periodic=}, {\it min\_level=}, {\it max\_level=})} \textemdash Add grid boundaries. \\
+\texttt{streamlines({\it field\_x},{\it field\_y},{\it factor=},{\it density=})}\\
+\texttt{clumps({\it clumplist})} \textemdash\ Generate {\it clumplist} using the clump finder and plot. \\
+\texttt{arrow({\it pos}, {\it code\_size})} Add an arrow at a {\it pos}ition. \\
+\texttt{point({\it pos}, {\it text})} \textemdash\ Add text at a {\it pos}ition. \\
+\texttt{marker({\it pos}, {\it marker=})} \textemdash\ Add a matplotlib-defined marker at a {\it pos}ition. \\
+\texttt{sphere({\it center}, {\it radius}, {\it text=})} \textemdash\ Draw a circle and append {\it text}.\\
+\texttt{hop\_circles({\it hop\_output}, {\it max\_number=}, {\it annotate=}, {\it min\_size=}, {\it max\_size=}, {\it font\_size=}, {\it print\_halo\_size=}, {\it fixed\_radius=}, {\it min\_mass=}, {\it print\_halo\_mass=}, {\it width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles ({\it size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
+\texttt{hop\_particles({\it hop\_output},{\it max\_number=},{\it p\_size=},\\
+{\it min\_size},{\it alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
+\texttt{particles({\it width},{\it p\_size=},{\it col=}, {\it marker=}, {\it stride=}, {\it ptype=}, {\it stars\_only=}, {\it dm\_only=}, {\it minimum\_mass=}, {\it alpha=})}  \textemdash\  Draw particles of {\it p\_size} pixels in a slab of {\it width} with {\it col}or using a matplotlib {\it marker} plotting only every {\it stride} number of particles.\\
+\texttt{title({\it text})}\\
+
+\subsection{The $\sim$/.yt/ Directory}
+\settowidth{\MyLen}{\texttt{multicol} }
+yt will automatically check for configuration files in a special directory (\texttt{\$HOME/.yt/}) in the user's home directory.
+
+The \texttt{config} file \textemdash\ Settings that control runtime behavior. \\
+The \texttt{my\_plugins.py} file \textemdash\ Add functions, derived fields, constants, or other commonly-used Python code to yt.
+
+
+
+
+\subsection{Analysis Modules}
+\settowidth{\MyLen}{\texttt{multicol}}
+The import name for each module is listed at the end of each description (see \textbf{yt Imports}).
+
+\texttt{Absorption Spectrum} \textemdash\ (\texttt{absorption\_spectrum}). \\
+\texttt{Clump Finder} \textemdash\ Find clumps defined by density thresholds (\texttt{level\_sets}). \\
+\texttt{Coordinate Transformation} \textemdash\ (\texttt{coordinate\_transformation}). \\
+\texttt{Halo Finding} \textemdash\ Locate halos of dark matter particles (\texttt{halo\_finding}). \\
+\texttt{Halo Mass Function} \textemdash\ Find halo mass functions from data and from theory (\texttt{halo\_mass\_function}). \\
+\texttt{Halo Profiling} \textemdash\ Profile and project multiple halos (\texttt{halo\_profiler}). \\
+\texttt{Halo Merger Tree} \textemdash\ Create a database of halo mergers (\texttt{halo\_merger\_tree}). \\
+\texttt{Light Cone Generator} \textemdash\ Stitch datasets together to perform analysis over cosmological volumes. \\
+\texttt{Light Ray Generator} \textemdash\ Analyze the path of light rays.\\
+\texttt{Radial Column Density} \textemdash\ Calculate column densities around a point (\texttt{radial\_column\_density}). \\
+\texttt{Rockstar Halo Finding} \textemdash\ Locate halos of dark matter using the Rockstar halo finder (\texttt{halo\_finding.rockstar}). \\
+\texttt{Star Particle Analysis} \textemdash\ Analyze star formation history and assemble spectra (\texttt{star\_analysis}). \\
+\texttt{Sunrise Exporter} \textemdash\ Export data to the sunrise visualization format (\texttt{sunrise\_export}). \\
+\texttt{Two Point Functions} \textemdash\ Two point correlations (\texttt{two\_point\_functions}). \\
+
+
+\subsection{Parallel Analysis}
+\settowidth{\MyLen}{\texttt{multicol}}
+Nearly all of yt is parallelized using MPI.
+The {\it mpi4py} package must be installed for parallelism in yt.
+To install {\it pip install mpi4py} on the command line usually works.
+Execute python in parallel similar to this:\\
+{\it mpirun -n 12 python script.py --parallel}\\
+This command may differ for each system on which you use yt;
+please consult the system documentation for details on how to run parallel applications.
+
+\texttt{from yt.pmods import *} \textemdash\ Load yt faster when in parallel.
+This replaces the usual \texttt{from yt.mods import *}.\\
+\texttt{parallel\_objects()} \textemdash\ A way to parallelize analysis over objects
+(such as halos or clumps).\\
+
+
+\subsection{Pre-Installed Versions}
+\settowidth{\MyLen}{\texttt{multicol}}
+yt is pre-installed on several supercomputer systems.
+
+\textbf{NICS Kraken} \textemdash\ {\it module load yt} \\
+
+
+\subsection{Mercurial}
+\settowidth{\MyLen}{\texttt{multicol}}
+Please see \url{http://mercurial.selenic.com/} for the full Mercurial documentation.
+
+\texttt{hg clone https://bitbucket.org/yt\_analysis/yt} \textemdash\ Clone a copy of yt. \\
+\texttt{hg status} \textemdash\ Files changed in working directory.\\
+\texttt{hg diff} \textemdash\ Print diff of all changed files in working directory. \\
+\texttt{hg diff -r{\it RevX} -r{\it RevY}} \textemdash\ Print diff of all changes between revision {\it RevX} and {\it RevY}.\\
+\texttt{hg log} \textemdash\ History of changes.\\
+\texttt{hg cat -r{\it RevX file}} \textemdash\ Print the contents of {\it file} from revision {\it RevX}.\\
+\texttt{hg heads} \textemdash\ Print all the current heads. \\
+\texttt{hg revert -r{\it RevX file}} \textemdash\ Revert {\it file} to revision {\it RevX}. On-disk changed version is
+moved to {\it file.orig}. \\
+\texttt{hg commit} \textemdash\ Commit changes to repository. \\
+\texttt{hg push} \textemdash\ Push changes to default remote repository. \\
+\texttt{hg pull} \textemdash\ Pull changes from default remote repository. \\
+\texttt{hg serve} \textemdash\ Launch a webserver on the local machine to examine the repository in a web browser. \\
+
+\subsection{FAQ}
+\settowidth{\MyLen}{\texttt{multicol}}
+
+\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
+Must enter \texttt{pf.h} before this command. \\
+
+
+%\rule{0.3\linewidth}{0.25pt}
+%\scriptsize
+
+% Can put some final stuff here like copyright etc...
+
+\end{multicols}
+
+\end{document}

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -34,11 +34,11 @@
  * Do not import "*" from anything other than "yt.funcs".
  * Internally, only import from source files directly -- instead of:
 
-   from yt.visualization.api import PlotCollection
+   from yt.visualization.api import ProjectionPlot
 
    do:
 
-   from yt.visualization.plot_collection import PlotCollection
+   from yt.visualization.plot_window import ProjectionPlot
 
  * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
@@ -60,7 +60,7 @@
  * Avoid Enzo-isms.  This includes but is not limited to:
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
-     parameters are now properties on a StaticOutput subclass: you access them
+     parameters are now properties on a Dataset subclass: you access them
      like pf.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -43,7 +43,7 @@
 To indicate the return type of a given object, you can reference it using this
 construction:
 
-    This function returns a :class:`PlotCollection`.
+    This function returns a :class:`ProjectionPlot`.
 
 To reference a function, you can use:
 
@@ -51,4 +51,4 @@
 
 To reference a method, you can use:
 
-    To add a projection, use :meth:`PlotCollection.add_projection`.
+    To add a projection, use :meth:`ProjectionPlot.set_width`.

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/README
--- /dev/null
+++ b/doc/extensions/README
@@ -0,0 +1,4 @@
+This includes a version of the Numpy Documentation extension that has been
+slightly modified to emit extra TOC tree items.
+
+-- Matt Turk, March 25, 2011

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/notebook_sphinxext.py
--- /dev/null
+++ b/doc/extensions/notebook_sphinxext.py
@@ -0,0 +1,188 @@
+import os, shutil, string, glob, re
+from sphinx.util.compat import Directive
+from docutils import nodes
+from docutils.parsers.rst import directives
+from IPython.nbconvert import html, python
+from IPython.nbformat.current import read, write
+from runipy.notebook_runner import NotebookRunner, NotebookError
+
+class NotebookDirective(Directive):
+    """Insert an evaluated notebook into a document
+
+    This uses runipy and nbconvert to transform a path to an unevaluated notebook
+    into html suitable for embedding in a Sphinx document.
+    """
+    required_arguments = 1
+    optional_arguments = 1
+    option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
+
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
+        # check if raw html is supported
+        if not self.state.document.settings.raw_enabled:
+            raise self.warning('"%s" directive disabled.' % self.name)
+
+        # get path to notebook
+        source_dir = os.path.dirname(
+            os.path.abspath(self.state.document.current_source))
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
+        rst_file = self.state_machine.document.attributes['source']
+        rst_dir = os.path.abspath(os.path.dirname(rst_file))
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
+
+        # Move files around.
+        rel_dir = os.path.relpath(rst_dir, setup.confdir)
+        rel_path = os.path.join(rel_dir, nb_basename)
+        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
+        dest_path = os.path.join(dest_dir, nb_basename)
+
+        if not os.path.exists(dest_dir):
+            os.makedirs(dest_dir)
+
+        # Copy unevaluated script
+        try:
+            shutil.copyfile(nb_abs_path, dest_path)
+        except IOError:
+            raise RuntimeError("Unable to copy notebook to build destination.")
+
+        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
+        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
+        rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
+        rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
+
+        # Create python script vesion
+        unevaluated_text = nb_to_html(nb_abs_path)
+        script_text = nb_to_python(nb_abs_path)
+        f = open(dest_path_script, 'w')
+        f.write(script_text.encode('utf8'))
+        f.close()
+
+        skip_exceptions = 'skip_exceptions' in self.options
+
+        evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
+                                           skip_exceptions=skip_exceptions)
+
+        # Create link to notebook and script files
+        link_rst = "(" + \
+                   formatted_link(nb_basename) + "; " + \
+                   formatted_link(rel_path_eval) + "; " + \
+                   formatted_link(rel_path_script) + \
+                   ")"
+
+        self.state_machine.insert_input([link_rst], rst_file)
+
+        # create notebook node
+        attributes = {'format': 'html', 'source': 'nb_path'}
+        nb_node = notebook_node('', evaluated_text, **attributes)
+        (nb_node.source, nb_node.line) = \
+            self.state_machine.get_source_and_line(self.lineno)
+
+        # add dependency
+        self.state.document.settings.record_dependencies.add(nb_abs_path)
+
+        # clean up png files left behind by notebooks.
+        png_files = glob.glob("*.png")
+        fits_files = glob.glob("*.fits")
+        h5_files = glob.glob("*.h5")
+        for file in png_files:
+            os.remove(file)
+
+        return [nb_node]
+
+
+class notebook_node(nodes.raw):
+    pass
+
+def nb_to_python(nb_path):
+    """convert notebook to python script"""
+    exporter = python.PythonExporter()
+    output, resources = exporter.from_filename(nb_path)
+    return output
+
+def nb_to_html(nb_path):
+    """convert notebook to html"""
+    exporter = html.HTMLExporter(template_file='full')
+    output, resources = exporter.from_filename(nb_path)
+    header = output.split('<head>', 1)[1].split('</head>',1)[0]
+    body = output.split('<body>', 1)[1].split('</body>',1)[0]
+
+    # http://imgur.com/eR9bMRH
+    header = header.replace('<style', '<style scoped="scoped"')
+    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
+
+    # Filter out styles that conflict with the sphinx theme.
+    filter_strings = [
+        'navbar',
+        'body{',
+        'alert{',
+        'uneditable-input{',
+        'collapse{',
+    ]
+    filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
+
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
+    header_lines = filter(
+        lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
+    header = '\n'.join(header_lines)
+
+    # concatenate raw html lines
+    lines = ['<div class="ipynotebook">']
+    lines.append(header)
+    lines.append(body)
+    lines.append('</div>')
+    return '\n'.join(lines)
+
+def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
+    # Create evaluated version and save it to the dest path.
+    # Always use --pylab so figures appear inline
+    # perhaps this is questionable?
+    notebook = read(open(nb_path), 'json')
+    nb_runner = NotebookRunner(notebook, pylab=False)
+    try:
+        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
+    except NotebookError as e:
+        print ''
+        print e
+        # Return the traceback, filtering out ANSI color codes.
+        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
+        return 'Notebook conversion failed with the following traceback: \n%s' % \
+            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '', str(e))
+    if dest_path is None:
+        dest_path = 'temp_evaluated.ipynb'
+    write(nb_runner.nb, open(dest_path, 'w'), 'json')
+    ret = nb_to_html(dest_path)
+    if dest_path is 'temp_evaluated.ipynb':
+        os.remove(dest_path)
+    return ret
+
+def formatted_link(path):
+    return "`%s <%s>`__" % (os.path.basename(path), path)
+
+def visit_notebook_node(self, node):
+    self.visit_raw(node)
+
+def depart_notebook_node(self, node):
+    self.depart_raw(node)
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_node(notebook_node,
+                 html=(visit_notebook_node, depart_notebook_node))
+
+    app.add_directive('notebook', NotebookDirective)

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/notebookcell_sphinxext.py
--- /dev/null
+++ b/doc/extensions/notebookcell_sphinxext.py
@@ -0,0 +1,67 @@
+import os, shutil, string, glob, io
+from sphinx.util.compat import Directive
+from docutils.parsers.rst import directives
+from IPython.nbconvert import html, python
+from IPython.nbformat import current
+from runipy.notebook_runner import NotebookRunner
+from jinja2 import FileSystemLoader
+from notebook_sphinxext import \
+    notebook_node, nb_to_html, nb_to_python, \
+    visit_notebook_node, depart_notebook_node, \
+    evaluate_notebook
+
+class NotebookCellDirective(Directive):
+    """Insert an evaluated notebook cell into a document
+
+    This uses runipy and nbconvert to transform an inline python
+    script into html suitable for embedding in a Sphinx document.
+    """
+    required_arguments = 0
+    optional_arguments = 1
+    has_content = True
+    option_spec = {'skip_exceptions' : directives.flag}
+
+    def run(self):
+        # check if raw html is supported
+        if not self.state.document.settings.raw_enabled:
+            raise self.warning('"%s" directive disabled.' % self.name)
+
+        # Construct notebook from cell content
+        content = "\n".join(self.content)
+        with open("temp.py", "w") as f:
+            f.write(content)
+
+        convert_to_ipynb('temp.py', 'temp.ipynb')
+
+        skip_exceptions = 'skip_exceptions' in self.options
+
+        evaluated_text = evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
+
+        # create notebook node
+        attributes = {'format': 'html', 'source': 'nb_path'}
+        nb_node = notebook_node('', evaluated_text, **attributes)
+        (nb_node.source, nb_node.line) = \
+            self.state_machine.get_source_and_line(self.lineno)
+
+        # clean up
+        files = glob.glob("*.png") + ['temp.py', 'temp.ipynb']
+        for file in files:
+            os.remove(file)
+
+        return [nb_node]
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_node(notebook_node,
+                 html=(visit_notebook_node, depart_notebook_node))
+
+    app.add_directive('notebook-cell', NotebookCellDirective)
+
+def convert_to_ipynb(py_file, ipynb_file):
+    with io.open(py_file, 'r', encoding='utf-8') as f:
+        notebook = current.reads(f.read(), format='py')
+    with io.open(ipynb_file, 'w', encoding='utf-8') as f:
+        current.write(notebook, f, format='ipynb')

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/numpydocmod/__init__.py
--- /dev/null
+++ b/doc/extensions/numpydocmod/__init__.py
@@ -0,0 +1,1 @@
+from numpydoc import setup

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/numpydocmod/comment_eater.py
--- /dev/null
+++ b/doc/extensions/numpydocmod/comment_eater.py
@@ -0,0 +1,158 @@
+from cStringIO import StringIO
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from compiler_unparse import unparse
+
+
+class Comment(object):
+    """ A comment block.
+    """
+    is_comment = True
+    def __init__(self, start_lineno, end_lineno, text):
+        # int : The first line number in the block. 1-indexed.
+        self.start_lineno = start_lineno
+        # int : The last line number. Inclusive!
+        self.end_lineno = end_lineno
+        # str : The text block including '#' character but not any leading spaces.
+        self.text = text
+
+    def add(self, string, start, end, line):
+        """ Add a new comment line.
+        """
+        self.start_lineno = min(self.start_lineno, start[0])
+        self.end_lineno = max(self.end_lineno, end[0])
+        self.text += string
+
+    def __repr__(self):
+        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno, self.text)
+
+
+class NonComment(object):
+    """ A non-comment block of code.
+    """
+    is_comment = False
+    def __init__(self, start_lineno, end_lineno):
+        self.start_lineno = start_lineno
+        self.end_lineno = end_lineno
+
+    def add(self, string, start, end, line):
+        """ Add lines to the block.
+        """
+        if string.strip():
+            # Only add if not entirely whitespace.
+            self.start_lineno = min(self.start_lineno, start[0])
+            self.end_lineno = max(self.end_lineno, end[0])
+
+    def __repr__(self):
+        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno)
+
+
+class CommentBlocker(object):
+    """ Pull out contiguous comment blocks.
+    """
+    def __init__(self):
+        # Start with a dummy.
+        self.current_block = NonComment(0, 0)
+
+        # All of the blocks seen so far.
+        self.blocks = []
+
+        # The index mapping lines of code to their associated comment blocks.
+        self.index = {}
+
+    def process_file(self, file):
+        """ Process a file object.
+        """
+        for token in tokenize.generate_tokens(file.next):
+            self.process_token(*token)
+        self.make_index()
+
+    def process_token(self, kind, string, start, end, line):
+        """ Process a single token.
+        """
+        if self.current_block.is_comment:
+            if kind == tokenize.COMMENT:
+                self.current_block.add(string, start, end, line)
+            else:
+                self.new_noncomment(start[0], end[0])
+        else:
+            if kind == tokenize.COMMENT:
+                self.new_comment(string, start, end, line)
+            else:
+                self.current_block.add(string, start, end, line)
+
+    def new_noncomment(self, start_lineno, end_lineno):
+        """ We are transitioning from a noncomment to a comment.
+        """
+        block = NonComment(start_lineno, end_lineno)
+        self.blocks.append(block)
+        self.current_block = block
+
+    def new_comment(self, string, start, end, line):
+        """ Possibly add a new comment.
+        
+        Only adds a new comment if this comment is the only thing on the line.
+        Otherwise, it extends the noncomment block.
+        """
+        prefix = line[:start[1]]
+        if prefix.strip():
+            # Oops! Trailing comment, not a comment block.
+            self.current_block.add(string, start, end, line)
+        else:
+            # A comment block.
+            block = Comment(start[0], end[0], string)
+            self.blocks.append(block)
+            self.current_block = block
+
+    def make_index(self):
+        """ Make the index mapping lines of actual code to their associated
+        prefix comments.
+        """
+        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+            if not block.is_comment:
+                self.index[block.start_lineno] = prev
+
+    def search_for_comment(self, lineno, default=None):
+        """ Find the comment block just before the given line number.
+
+        Returns None (or the specified default) if there is no such block.
+        """
+        if not self.index:
+            self.make_index()
+        block = self.index.get(lineno, None)
+        text = getattr(block, 'text', default)
+        return text
+
+
+def strip_comment_marker(text):
+    """ Strip # markers at the front of a block of comment text.
+    """
+    lines = []
+    for line in text.splitlines():
+        lines.append(line.lstrip('#'))
+    text = textwrap.dedent('\n'.join(lines))
+    return text
+
+
+def get_class_traits(klass):
+    """ Yield all of the documentation for trait definitions on a class object.
+    """
+    # FIXME: gracefully handle errors here or in the caller?
+    source = inspect.getsource(klass)
+    cb = CommentBlocker()
+    cb.process_file(StringIO(source))
+    mod_ast = compiler.parse(source)
+    class_ast = mod_ast.node.nodes[0]
+    for node in class_ast.code.nodes:
+        # FIXME: handle other kinds of assignments?
+        if isinstance(node, compiler.ast.Assign):
+            name = node.nodes[0].name
+            rhs = unparse(node.expr).strip()
+            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+            yield name, rhs, doc
+

diff -r 0d705d2ae8eb306c867e664f65d83e91f980bb0a -r a66311d5d35c46a628684ac62c7a35698374c977 doc/extensions/numpydocmod/compiler_unparse.py
--- /dev/null
+++ b/doc/extensions/numpydocmod/compiler_unparse.py
@@ -0,0 +1,860 @@
+""" Turn compiler.ast structures back into executable python code.
+
+    The unparse method takes a compiler.ast tree and transforms it back into
+    valid python code.  It is incomplete and currently only works for
+    import statements, function calls, function definitions, assignments, and
+    basic expressions.
+
+    Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+    fixme: We may want to move to using _ast trees because the compiler for
+           them is about 6 times faster than compiler.compile.
+"""
+
+import sys
+import cStringIO
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+def unparse(ast, single_line_functions=False):
+    s = cStringIO.StringIO()
+    UnparseCompilerAst(ast, s, single_line_functions)
+    return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+    """ Methods in this class recursively traverse an AST and
+        output source code for the abstract syntax; original formatting
+        is disregarged.
+    """
+
+    #########################################################################
+    # object interface.
+    #########################################################################
+
+    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+        """ Unparser(tree, file=sys.stdout) -> None.
+
+            Print the source for tree to file.
+        """
+        self.f = file
+        self._single_func = single_line_functions
+        self._do_indent = True
+        self._indent = 0
+        self._dispatch(tree)
+        self._write("\n")
+        self.f.flush()
+
+    #########################################################################
+    # Unparser private interface.
+    #########################################################################
+
+    ### format, output, and dispatch methods ################################
+
+    def _fill(self, text = ""):
+        "Indent a piece of text, according to the current indentation level"
+        if self._do_indent:
+            self._write("\n"+"    "*self._indent + text)
+        else:
+            self._write(text)
+
+    def _write(self, text):
+        "Append a piece of text to the current line."
+        self.f.write(text)
+
+    def _enter(self):
+        "Print ':', and increase the indentation."
+        self._write(": ")
+        self._indent += 1
+
+    def _leave(self):
+        "Decrease the indentation level."
+        self._indent -= 1
+
+    def _dispatch(self, tree):
+        "_dispatcher function, _dispatching tree type T to method _T."
+        if isinstance(tree, list):
+            for t in tree:
+                self._dispatch(t)
+            return
+        meth = getattr(self, "_"+tree.__class__.__name__)
+        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+            return
+        meth(tree)
+
+
+    #########################################################################
+    # compiler.ast unparsing methods.
+    #
+    # There should be one method per concrete grammar type. They are
+    # organized in alphabetical order.
+    #########################################################################
+
+    def _Add(self, t):
+        self.__binary_op(t, '+')
+
+    def _And(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") and (")
+        self._write(")")
+               
+    def _AssAttr(self, t):
+        """ Handle assigning an attribute of an object
+        """
+        self._dispatch(t.expr)
+        self._write('.'+t.attrname)
+ 
+    def _Assign(self, t):
+        """ Expression Assignment such as "a = 1".
+
+            This only handles assignment in expressions.  Keyword assignment
+            is handled separately.
+        """
+        self._fill()
+        for target in t.nodes:
+            self._dispatch(target)
+            self._write(" = ")
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write('; ')
+
+    def _AssName(self, t):
+        """ Name on left hand side of expression.
+
+            Treat just like a name on the right side of an expression.
+        """
+        self._Name(t)
+
+    def _AssTuple(self, t):
+        """ Tuple on left hand side of an expression.
+        """
+
+        # _write each elements, separated by a comma.
+        for element in t.nodes[:-1]:
+            self._dispatch(element)
+            self._write(", ")
+
+        # Handle the last one without writing comma
+        last_element = t.nodes[-1]
+        self._dispatch(last_element)
+
+    def _AugAssign(self, t):
+        """ +=,-=,*=,/=,**=, etc. operations
+        """
+        
+        self._fill()
+        self._dispatch(t.node)
+        self._write(' '+t.op+' ')
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write(';')
+            
+    def _Bitand(self, t):
+        """ Bit and operation.
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" & ")
+                
+    def _Bitor(self, t):
+        """ Bit or operation
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" | ")
+                
+    def _CallFunc(self, t):
+        """ Function call.
+        """
+        self._dispatch(t.node)
+        self._write("(")
+        comma = False
+        for e in t.args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._dispatch(e)
+        if t.star_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("*")
+            self._dispatch(t.star_args)
+        if t.dstar_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("**")
+            self._dispatch(t.dstar_args)
+        self._write(")")
+
+    def _Compare(self, t):
+        self._dispatch(t.expr)
+        for op, expr in t.ops:
+            self._write(" " + op + " ")
+            self._dispatch(expr)
+
+    def _Const(self, t):
+        """ A constant value such as an integer value, 3, or a string, "hello".
+        """
+        self._dispatch(t.value)
+
+    def _Decorators(self, t):
+        """ Handle function decorators (eg. @has_units)
+        """
+        for node in t.nodes:
+            self._dispatch(node)
+
+    def _Dict(self, t):
+        self._write("{")
+        for  i, (k, v) in enumerate(t.items):
+            self._dispatch(k)
+            self._write(": ")
+            self._dispatch(v)
+            if i < len(t.items)-1:
+                self._write(", ")
+        self._write("}")
+
+    def _Discard(self, t):
+        """ Node for when return value is ignored such as in "foo(a)".
+        """
+        self._fill()
+        self._dispatch(t.expr)
+
+    def _Div(self, t):
+        self.__binary_op(t, '/')
+
+    def _Ellipsis(self, t):
+        self._write("...")
+
+    def _From(self, t):
+        """ Handle "from xyz import foo, bar as baz".
+        """
+        # fixme: Are From and ImportFrom handled differently?
+        self._fill("from ")
+        self._write(t.modname)
+        self._write(" import ")
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+                
+    def _Function(self, t):
+        """ Handle function definitions
+        """
+        if t.decorators is not None:
+            self._fill("@")
+            self._dispatch(t.decorators)
+        self._fill("def "+t.name + "(")
+        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+        for i, arg in enumerate(zip(t.argnames, defaults)):
+            self._write(arg[0])
+            if arg[1] is not None:
+                self._write('=')
+                self._dispatch(arg[1])
+            if i < len(t.argnames)-1:
+                self._write(', ')
+        self._write(")")
+        if self._single_func:
+            self._do_indent = False
+        self._enter()
+        self._dispatch(t.code)
+        self._leave()
+        self._do_indent = True
+
+    def _Getattr(self, t):
+        """ Handle getting an attribute of an object
+        """
+        if isinstance(t.expr, (Div, Mul, Sub, Add)):
+            self._write('(')
+            self._dispatch(t.expr)
+            self._write(')')
+        else:
+            self._dispatch(t.expr)
+            
+        self._write('.'+t.attrname)
+        
+    def _If(self, t):
+        self._fill()
+        
+        for i, (compare,code) in enumerate(t.tests):
+            if i == 0:
+                self._write("if ")
+            else:
+                self._write("elif ")
+            self._dispatch(compare)
+            self._enter()
+            self._fill()
+            self._dispatch(code)
+            self._leave()
+            self._write("\n")
+
+        if t.else_ is not None:
+            self._write("else")
+            self._enter()
+            self._fill()
+            self._dispatch(t.else_)
+            self._leave()
+            self._write("\n")
+            
+    def _IfExp(self, t):
+        self._dispatch(t.then)
+        self._write(" if ")
+        self._dispatch(t.test)
+
+        if t.else_ is not None:
+            self._write(" else (")
+            self._dispatch(t.else_)
+            self._write(")")
+
+    def _Import(self, t):
+        """ Handle "import xyz.foo".
+        """
+        self._fill("import ")
+        
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+
+    def _Keyword(self, t):
+        """ Keyword value assignment within function calls and definitions.
+        """
+        self._write(t.name)
+        self._write("=")
+        self._dispatch(t.expr)
+        
+    def _List(self, t):
+        self._write("[")
+        for  i,node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i < len(t.nodes)-1:
+                self._write(", ")
+        self._write("]")
+
+    def _Module(self, t):
+        if t.doc is not None:
+            self._dispatch(t.doc)
+        self._dispatch(t.node)
+
+    def _Mul(self, t):
+        self.__binary_op(t, '*')
+
+    def _Name(self, t):
+        self._write(t.name)
+
+    def _NoneType(self, t):
+        self._write("None")
+        
+    def _Not(self, t):
+        self._write('not (')
+        self._dispatch(t.expr)
+        self._write(')')
+        
+    def _Or(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") or (")
+        self._write(")")
+                
+    def _Pass(self, t):
+        self._write("pass\n")
+
+    def _Printnl(self, t):
+        self._fill("print ")
+        if t.dest:
+            self._write(">> ")
+            self._dispatch(t.dest)
+            self._write(", ")
+        comma = False
+        for node in t.nodes:
+            if comma: self._write(', ')
+            else: comma = True
+            self._dispatch(node)
+
+    def _Power(self, t):
+        self.__binary_op(t, '**')
+
+    def _Return(self, t):
+        self._fill("return ")
+        if t.value:
+            if isinstance(t.value, Tuple):
+                text = ', '.join([ name.name for name in t.value.asList() ])
+                self._write(text)
+            else:
+                self._dispatch(t.value)
+            if not self._do_indent:
+                self._write('; ')
+
+    def _Slice(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        if t.lower:
+            self._dispatch(t.lower)
+        self._write(":")
+        if t.upper:
+            self._dispatch(t.upper)
+        #if t.step:
+        #    self._write(":")
+        #    self._dispatch(t.step)
+        self._write("]")
+
+    def _Sliceobj(self, t):
+        for i, node in enumerate(t.nodes):
+            if i != 0:
+                self._write(":")
+            if not (isinstance(node, Const) and node.value is None):
+                self._dispatch(node)
+
+    def _Stmt(self, tree):
+        for node in tree.nodes:
+            self._dispatch(node)
+
+    def _Sub(self, t):
+        self.__binary_op(t, '-')
+
+    def _Subscript(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        for i, value in enumerate(t.subs):
+            if i != 0:
+                self._write(",")
+            self._dispatch(value)
+        self._write("]")
+
+    def _TryExcept(self, t):
+        self._fill("try")
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+
+        for handler in t.handlers:
+            self._fill('except ')
+            self._dispatch(handler[0])
+            if handler[1] is not None:
+                self._write(', ')
+                self._dispatch(handler[1])
+            self._enter()
+            self._dispatch(handler[2])
+            self._leave()
+            
+        if t.else_:
+            self._fill("else")
+            self._enter()
+            self._dispatch(t.else_)
+            self._leave()
+
+    def _Tuple(self, t):
+
+        if not t.nodes:
+            # Empty tuple.
+            self._write("()")
+        else:
+            self._write("(")
+
+            # _write each elements, separated by a comma.
+            for element in t.nodes[:-1]:
+                self._dispatch(element)
+                self._write(", ")
+
+            # Handle the last one without writing comma
+            last_element = t.nodes[-1]
+            self._dispatch(last_element)
+
+            self._write(")")
+            
+    def _UnaryAdd(self, t):
+        self._write("+")
+        self._dispatch(t.expr)
+        
+    def _UnarySub(self, t):
+        self._write("-")
+        self._dispatch(t.expr)        
+
+    def _With(self, t):
+        self._fill('with ')
+        self._dispatch(t.expr)
+        if t.vars:
+            self._write(' as ')
+            self._dispatch(t.vars.name)
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+        self._write('\n')
+        
+    def _int(self, t):
+        self._write(repr(t))
+
+    def __binary_op(self, t, symbol):
+        # Check if parenthesis are needed on left side and then dispatch
+        has_paren = False
+        left_class = str(t.left.__class__)
+        if (left_class in op_precedence.keys() and
+            op_precedence[left_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.left)
+        if has_paren:
+            self._write(')')
+        # Write the appropriate symbol for operator
+        self._write(symbol)
+        # Check if parenthesis are needed on the right side and then dispatch
+        has_paren = False
+        right_class = str(t.right.__class__)
+        if (right_class in op_precedence.keys() and
+            op_precedence[right_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.right)
+        if has_paren:
+            self._write(')')
+
+    def _float(self, t):
+        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+        # We prefer str here.
+        self._write(str(t))
+
+    def _str(self, t):
+        self._write(repr(t))
+        
+    def _tuple(self, t):
+        self._write(str(t))
+
+    #########################################################################
+    # These are the methods from the _ast modules unparse.
+    #
+    # As our needs to handle more advanced code increase, we may want to
+    # modify some of the methods below so that they work for compiler.ast.
+    #########################################################################
+
+#    # stmt
+#    def _Expr(self, tree):
+#        self._fill()
+#        self._dispatch(tree.value)
+#
+#    def _Import(self, t):
+#        self._fill("import ")
+#        first = True
+#        for a in t.names:
+#            if first:
+#                first = False
+#            else:
+#                self._write(", ")
+#            self._write(a.name)
+#            if a.asname:
+#                self._write(" as "+a.asname)
+#
+##    def _ImportFrom(self, t):
+##        self._fill("from ")
+##        self._write(t.module)
+##        self._write(" import ")
+##        for i, a in enumerate(t.names):
+##            if i == 0:
+##                self._write(", ")
+##            self._write(a.name)
+##            if a.asname:
+##                self._write(" as "+a.asname)
+##        # XXX(jpe) what is level for?
+##
+#
+#    def _Break(self, t):
+#        self._fill("break")
+#
+#    def _Continue(self, t):
+#        self._fill("continue")
+#
+#    def _Delete(self, t):
+#        self._fill("del ")
+#        self._dispatch(t.targets)
+#
+#    def _Assert(self, t):
+#        self._fill("assert ")
+#        self._dispatch(t.test)
+#        if t.msg:
+#            self._write(", ")
+#            self._dispatch(t.msg)
+#
+#    def _Exec(self, t):
+#        self._fill("exec ")
+#        self._dispatch(t.body)
+#        if t.globals:
+#            self._write(" in ")
+#            self._dispatch(t.globals)
+#        if t.locals:
+#            self._write(", ")
+#            self._dispatch(t.locals)
+#
+#    def _Print(self, t):
+#        self._fill("print ")
+#        do_comma = False
+#        if t.dest:
+#            self._write(">>")
+#            self._dispatch(t.dest)
+#            do_comma = True
+#        for e in t.values:
+#            if do_comma:self._write(", ")
+#            else:do_comma=True
+#            self._dispatch(e)
+#        if not t.nl:
+#            self._write(",")
+#
+#    def _Global(self, t):
+#        self._fill("global")
+#        for i, n in enumerate(t.names):
+#            if i != 0:
+#                self._write(",")
+#            self._write(" " + n)
+#
+#    def _Yield(self, t):
+#        self._fill("yield")
+#        if t.value:
+#            self._write(" (")
+#            self._dispatch(t.value)
+#            self._write(")")
+#
+#    def _Raise(self, t):
+#        self._fill('raise ')
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.inst:
+#            self._write(", ")
+#            self._dispatch(t.inst)
+#        if t.tback:
+#            self._write(", ")
+#            self._dispatch(t.tback)
+#
+#
+#    def _TryFinally(self, t):
+#        self._fill("try")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#        self._fill("finally")
+#        self._enter()
+#        self._dispatch(t.finalbody)
+#        self._leave()
+#
+#    def _excepthandler(self, t):
+#        self._fill("except ")
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.name:
+#            self._write(", ")
+#            self._dispatch(t.name)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _ClassDef(self, t):
+#        self._write("\n")
+#        self._fill("class "+t.name)
+#        if t.bases:
+#            self._write("(")
+#            for a in t.bases:
+#                self._dispatch(a)
+#                self._write(", ")
+#            self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _FunctionDef(self, t):
+#        self._write("\n")
+#        for deco in t.decorators:
+#            self._fill("@")
+#            self._dispatch(deco)
+#        self._fill("def "+t.name + "(")
+#        self._dispatch(t.args)
+#        self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _For(self, t):
+#        self._fill("for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    def _While(self, t):
+#        self._fill("while ")
+#        self._dispatch(t.test)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    # expr
+#    def _Str(self, tree):
+#        self._write(repr(tree.s))
+##
+#    def _Repr(self, t):
+#        self._write("`")
+#        self._dispatch(t.value)
+#        self._write("`")
+#
+#    def _Num(self, t):
+#        self._write(repr(t.n))
+#
+#    def _ListComp(self, t):
+#        self._write("[")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write("]")
+#
+#    def _GeneratorExp(self, t):
+#        self._write("(")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write(")")
+#
+#    def _comprehension(self, t):
+#        self._write(" for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        for if_clause in t.ifs:
+#            self._write(" if ")
+#            self._dispatch(if_clause)
+#
+#    def _IfExp(self, t):
+#        self._dispatch(t.body)
+#        self._write(" if ")
+#        self._dispatch(t.test)
+#        if t.orelse:
+#            self._write(" else ")
+#            self._dispatch(t.orelse)
+#
+#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+#    def _UnaryOp(self, t):
+#        self._write(self.unop[t.op.__class__.__name__])
+#        self._write("(")
+#        self._dispatch(t.operand)
+#        self._write(")")
+#
+#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+#                    "FloorDiv":"//", "Pow": "**"}
+#    def _BinOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.left)
+#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+#        self._dispatch(t.right)
+#        self._write(")")
+#
+#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
+#    def _BoolOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.values[0])
+#        for v in t.values[1:]:
+#            self._write(" %s " % self.boolops[t.op.__class__])
+#            self._dispatch(v)
+#        self._write(")")
+#
+#    def _Attribute(self,t):
+#        self._dispatch(t.value)
+#        self._write(".")
+#        self._write(t.attr)
+#
+##    def _Call(self, t):
+##        self._dispatch(t.func)
+##        self._write("(")
+##        comma = False
+##        for e in t.args:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        for e in t.keywords:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        if t.starargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("*")
+##            self._dispatch(t.starargs)
+##        if t.kwargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("**")
+##            self._dispatch(t.kwargs)
+##        self._write(")")
+#
+#    # slice
+#    def _Index(self, t):
+#        self._dispatch(t.value)
+#
+#    def _ExtSlice(self, t):
+#        for i, d in enumerate(t.dims):
+#            if i != 0:
+#                self._write(': ')
+#            self._dispatch(d)
+#
+#    # others
+#    def _arguments(self, t):
+#        first = True
+#        nonDef = len(t.args)-len(t.defaults)
+#        for a in t.args[0:nonDef]:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a)
+#        for a,d in zip(t.args[nonDef:], t.defaults):
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a),
+#            self._write("=")
+#            self._dispatch(d)
+#        if t.vararg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("*"+t.vararg)
+#        if t.kwarg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("**"+t.kwarg)
+#
+##    def _keyword(self, t):
+##        self._write(t.arg)
+##        self._write("=")
+##        self._dispatch(t.value)
+#
+#    def _Lambda(self, t):
+#        self._write("lambda ")
+#        self._dispatch(t.args)
+#        self._write(": ")
+#        self._dispatch(t.body)
+
+
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c18d3ac987e2/
Changeset:   c18d3ac987e2
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 22:38:01
Summary:     Made some mistakes in the merge: correcting
Affected #:  4 files

diff -r a66311d5d35c46a628684ac62c7a35698374c977 -r c18d3ac987e21e9f73a4c482ea5c42e31fa4f599 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5159,5 +5159,4 @@
 a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
-079e456c38a87676472a458210077e2be325dc85 last_gplv3
-f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+079e456c38a87676472a458210077e2be325dc85 last_gplv3
\ No newline at end of file

diff -r a66311d5d35c46a628684ac62c7a35698374c977 -r c18d3ac987e21e9f73a4c482ea5c42e31fa4f599 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -427,13 +427,11 @@
         self.left_edge = self.pf.arr(left_edge, 'code_length')
         self.level = level
 
-        self.ActiveDimensions = np.array(dims, dtype='int32')
         rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
-        # issue 602
-        rdx[np.where(self.ActiveDimensions - 2 * num_ghost_zones <= 1)] = 1
         rdx[np.where(np.array(dims) - 2 * num_ghost_zones <= 1)] = 1   # issue 602
         self.base_dds = self.pf.domain_width / self.pf.domain_dimensions
         self.dds = self.pf.domain_width / rdx.astype("float64")
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar

diff -r a66311d5d35c46a628684ac62c7a35698374c977 -r c18d3ac987e21e9f73a4c482ea5c42e31fa4f599 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -27,7 +27,6 @@
     new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
-from yt.utilities.exceptions import YTEmptyProfileData
 
 def preserve_source_parameters(func):
     def save_state(*args, **kwargs):

diff -r a66311d5d35c46a628684ac62c7a35698374c977 -r c18d3ac987e21e9f73a4c482ea5c42e31fa4f599 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 
-import __builtin__
 import base64
 import types
 
@@ -25,27 +24,15 @@
 import cStringIO
 import __builtin__
 
-
-from ._mpl_imports import FigureCanvasAgg
-from .plot_window import WindowPlotMPL
 from .base_plot_types import ImagePlotMPL
 from .plot_container import \
     ImagePlotContainer, \
     log_transform, linear_transform
 from yt.data_objects.profiles import \
-     create_profile
-from yt.utilities.exceptions import \
-     YTNotInsideNotebook
-from yt.data_objects.profiles import \
-    BinnedProfile1D, \
-    BinnedProfile2D
     create_profile
 from yt.utilities.logger import ytLogger as mylog
 import _mpl_imports as mpl
 from yt.funcs import \
-     ensure_list, \
-     get_image_suffix, \
-     get_ipython_api_version
     ensure_list, \
     get_image_suffix, \
     get_ipython_api_version


https://bitbucket.org/yt_analysis/yt/commits/c37b2b85eeab/
Changeset:   c37b2b85eeab
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 22:40:25
Summary:     Fixing merge still
Affected #:  1 file

diff -r c18d3ac987e21e9f73a4c482ea5c42e31fa4f599 -r c37b2b85eeabcfa7cd9156835637a68c1606f7fd yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -24,6 +24,7 @@
 import cStringIO
 import __builtin__
 
+
 from .base_plot_types import ImagePlotMPL
 from .plot_container import \
     ImagePlotContainer, \


https://bitbucket.org/yt_analysis/yt/commits/a5e45da7793a/
Changeset:   a5e45da7793a
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 23:08:25
Summary:     Putting a ceiling on the number of grids to avoid recursion issues
Affected #:  1 file

diff -r c37b2b85eeabcfa7cd9156835637a68c1606f7fd -r a5e45da7793aa8a194e738ad6cc1c9f512f047cb yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -297,6 +297,7 @@
 
         self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                 32**self.dimensionality).astype("int")
+        self.nprocs = min(self.nprocs, 2500)
 
     def __del__(self):
         self._handle.close()
@@ -359,6 +360,7 @@
 
         self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                 32**self.dimensionality).astype("int")
+        self.nprocs = max(self.nprocs, 2500)
 
     @classmethod
     def _check_axes(cls, handle):


https://bitbucket.org/yt_analysis/yt/commits/97f427e6b342/
Changeset:   97f427e6b342
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-08 23:10:36
Summary:     Missed this
Affected #:  1 file

diff -r a5e45da7793aa8a194e738ad6cc1c9f512f047cb -r 97f427e6b3423d968f1450b02aa44276b81892da yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -360,7 +360,7 @@
 
         self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                 32**self.dimensionality).astype("int")
-        self.nprocs = max(self.nprocs, 2500)
+        self.nprocs = min(self.nprocs, 2500)
 
     @classmethod
     def _check_axes(cls, handle):


https://bitbucket.org/yt_analysis/yt/commits/06fc62324f5c/
Changeset:   06fc62324f5c
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-08 21:54:29
Summary:     Decompose is now both functional and is memory-conservative. Need to refactor
so that its not a bunch of copied code. FITS XYV now decomposes i/o really
nicely.
Affected #:  2 files

diff -r e81fe6d9db9286ea091e2f6c45287b969496e4a6 -r 06fc62324f5c5f1679070e0b4439bb29563d25ba yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -187,6 +187,19 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        # local_only is only useful for inline datasets and requires
+        # implementation by subclasses.
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for g in gobjs:
+            gfiles[g.id].append(g)
+        for fn in sorted(gfiles):
+            gs = gfiles[fn]
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)
+
+
 class FITSDataset(Dataset):
     _index_class = FITSHierarchy
     _field_info_class = FITSFieldInfo

diff -r e81fe6d9db9286ea091e2f6c45287b969496e4a6 -r 06fc62324f5c5f1679070e0b4439bb29563d25ba yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -60,17 +60,9 @@
     grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
     n_d = shape
     d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
-    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
-                    bbox[1, 0]:bbox[1, 1]:d_s[1],
-                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
-    for i in range(3):
-        xyz = split_array(dist[i], psize)
-        for j in range(np.product(psize)):
-            grid_left_edges[j, i] = xyz[j][0, 0, 0]
-            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
-        del xyz
-    del dist
-    shapes = split_array(shape, psize)
+    grid_left_edges, grid_right_edges = \
+            split_array_alt(bbox[:, 0], bbox[:, 1], shape, psize)
+    shapes = split_array_nocopy(shape, psize)
     return grid_left_edges, grid_right_edges, shapes
 
 
@@ -163,6 +155,26 @@
                                     rei[1], lei[2]:rei[2]])
     return [tab[slc] for slc in slices]
 
+def split_array_alt(gle, gre, shape, psize):
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(shape, dtype=np.int64)
+    dds = (gre-gle)/shape
+    left_edges = []
+    right_edges = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                lle = gle + lei*dds
+                lre = gle + rei*dds
+                left_edges.append(lle)
+                right_edges.append(lre)
+
+    return left_edges, right_edges 
+
+
 def split_array_nocopy(shape, psize):
     """ Split array into px*py*pz subarrays. """
     n_d = np.array(shape, dtype=np.int64)
@@ -170,6 +182,7 @@
     for i in range(psize[0]):
         for j in range(psize[1]):
             for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
                 lei = n_d * piece / psize
                 rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
                 shapes.append(rei-lei)


https://bitbucket.org/yt_analysis/yt/commits/e8e9b8b8184e/
Changeset:   e8e9b8b8184e
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-08 23:15:24
Summary:     Merging in fits optimization in decompose.py
Affected #:  2 files

diff -r 97f427e6b3423d968f1450b02aa44276b81892da -r e8e9b8b8184ebf77b7723b93e9c90f803210dc50 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -203,6 +203,19 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        # local_only is only useful for inline datasets and requires
+        # implementation by subclasses.
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for g in gobjs:
+            gfiles[g.id].append(g)
+        for fn in sorted(gfiles):
+            gs = gfiles[fn]
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)
+
+
 class FITSDataset(Dataset):
     _index_class = FITSHierarchy
     _field_info_class = FITSFieldInfo

diff -r 97f427e6b3423d968f1450b02aa44276b81892da -r e8e9b8b8184ebf77b7723b93e9c90f803210dc50 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -60,16 +60,8 @@
     grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
     n_d = shape
     d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
-    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
-                    bbox[1, 0]:bbox[1, 1]:d_s[1],
-                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
-    for i in range(3):
-        xyz = split_array_nocopy(dist[i], psize)
-        for j in range(np.product(psize)):
-            grid_left_edges[j, i] = xyz[j][0, 0, 0]
-            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
-        del xyz
-    del dist
+    grid_left_edges, grid_right_edges = \
+            split_array_alt(bbox[:, 0], bbox[:, 1], shape, psize)
     shapes = split_array_nocopy(shape, psize)
     return grid_left_edges, grid_right_edges, shapes
 
@@ -163,6 +155,26 @@
                                     rei[1], lei[2]:rei[2]])
     return [tab[slc] for slc in slices]
 
+def split_array_alt(gle, gre, shape, psize):
+    """ Split array into px*py*pz subarrays. """
+    n_d = np.array(shape, dtype=np.int64)
+    dds = (gre-gle)/shape
+    left_edges = []
+    right_edges = []
+    for i in range(psize[0]):
+        for j in range(psize[1]):
+            for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
+                lei = n_d * piece / psize
+                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
+                lle = gle + lei*dds
+                lre = gle + rei*dds
+                left_edges.append(lle)
+                right_edges.append(lre)
+
+    return left_edges, right_edges 
+
+
 def split_array_nocopy(shape, psize):
     """ Split array into px*py*pz subarrays. """
     n_d = np.array(shape, dtype=np.int64)
@@ -170,6 +182,7 @@
     for i in range(psize[0]):
         for j in range(psize[1]):
             for k in range(psize[2]):
+                piece = np.array((i, j, k), dtype=np.int64)
                 lei = n_d * piece / psize
                 rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
                 shapes.append(rei-lei)


https://bitbucket.org/yt_analysis/yt/commits/ba0ffebc6eba/
Changeset:   ba0ffebc6eba
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-09 00:22:40
Summary:     Allowing for other line-of-sight axes besides velocity, getting units correct for world coordinates. Sped up world coordinates for XYV datasets.
Affected #:  2 files

diff -r e8e9b8b8184ebf77b7723b93e9c90f803210dc50 -r ba0ffebc6ebad15e7a0ff51002bd859d13a69b50 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -323,7 +323,9 @@
                 a = np_char.startswith(axes_names, "RA")
                 b = np_char.startswith(axes_names, "DEC")
                 c = np_char.startswith(axes_names, "VEL")
-                if (a+b+c).sum() != 3:
+                d = np_char.startswith(axes_names, "FREQ")
+                e = np_char.startswith(axes_names, "ENER")
+                if (a+b+c+d+e).sum() != 3:
                     handle.close()
                     return True
         return False
@@ -349,7 +351,10 @@
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
         self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
-        self.vel_axis = np.where(np_char.startswith(self.axes_names, "VEL"))[0][0]
+        self.vel_axis = np_char.startswith(self.axes_names, "VEL")
+        self.vel_axis += np_char.startswith(self.axes_names, "FREQ")
+        self.vel_axis += np_char.startswith(self.axes_names, "ENER")
+        self.vel_axis = np.where(self.vel_axis)[0][0]
 
         self.wcs_2d = ap.pywcs.WCS(naxis=2)
         self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
@@ -360,6 +365,13 @@
         self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
                                  self.wcs.wcs.ctype[self.dec_axis]]
 
+        self.wcs_1d = ap.pywcs.WCS(naxis=1)
+        self.wcs_1d.wcs.crpix = [self.wcs.wcs.crpix[self.vel_axis]]
+        self.wcs_1d.wcs.cdelt = [self.wcs.wcs.cdelt[self.vel_axis]]
+        self.wcs_1d.wcs.crval = [self.wcs.wcs.crval[self.vel_axis]]
+        self.wcs_1d.wcs.cunit = [str(self.wcs.wcs.cunit[self.vel_axis])]
+        self.wcs_1d.wcs.ctype = [self.wcs.wcs.ctype[self.vel_axis]]
+
     def _parse_parameter_file(self):
 
         super(FITSXYVDataset, self)._parse_parameter_file()
@@ -383,7 +395,9 @@
                 a = np_char.startswith(axes_names, "RA")
                 b = np_char.startswith(axes_names, "DEC")
                 c = np_char.startswith(axes_names, "VEL")
-                if (a+b+c).sum() == 3:
+                d = np_char.startswith(axes_names, "FREQ")
+                e = np_char.startswith(axes_names, "ENER")
+                if (a+b+c+d+e).sum() == 3:
                     handle.close()
                     return True
         return False

diff -r e8e9b8b8184ebf77b7723b93e9c90f803210dc50 -r ba0ffebc6ebad15e7a0ff51002bd859d13a69b50 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -25,9 +25,9 @@
                                                  data["z"], 1)
         return w_coords[axis]
     def setup_fluid_fields(self):
-        def world_f(axis):
+        def world_f(axis, unit):
             def _world_f(field, data):
-                return self._get_wcs(data, axis)
+                return data.pf.arr(self._get_wcs(data, axis), unit)
             return _world_f
         for i in range(self.pf.dimensionality):
             if self.pf.wcs.wcs.cname[i] == '':
@@ -38,33 +38,31 @@
             if name != '' and unit != '':
                 if unit.lower() == "deg": unit = "degree"
                 if unit.lower() == "rad": unit = "radian"
-                self.add_field(("fits",name), function=world_f(i), units=unit)
+                self.add_field(("fits",name), function=world_f(i, unit), units=unit)
 
 class FITSXYVFieldInfo(FieldInfoContainer):
     known_other_fields = ()
     def _get_wcs(self, data, axis):
-        if data.pf.dimensionality == 2:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
-        else:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
-                                                 data["z"], 1)
+        w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
         return w_coords[axis]
     def setup_fluid_fields(self):
-        def world_f(axis):
+        def world_f(axis, unit):
             def _world_f(field, data):
-                return self._get_wcs(data, axis)
+                return data.pf.arr(self._get_wcs(data, axis), unit)
             return _world_f
-        for i in range(self.pf.dimensionality):
-            if self.pf.wcs.wcs.cname[i] == '':
-                name = str(self.pf.wcs.wcs.ctype[i])
-            else:
-                name = str(self.pf.wcs.wcs.cname[i])
-            unit = str(self.pf.wcs.wcs.cunit[i])
-            if name != '' and unit != '':
-                if unit.lower() == "deg": unit = "degree"
-                if unit.lower() == "rad": unit = "radian"
-                self.add_field(("fits",name), function=world_f(i), units=unit)
+        for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
+            name = str(self.pf.wcs_2d.wcs.ctype[i])
+            unit = str(self.pf.wcs_2d.wcs.cunit[i])
+            if unit.lower() == "deg": unit = "degree"
+            if unit.lower() == "rad": unit = "radian"
+            self.add_field(("xyv_fits",name), function=world_f(axis, unit), units=unit)
+        def _vel_los(field, data):
+            return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
+                               str(data.pf.wcs_1d.wcs.cunit[0]))
+        name = str(self.pf.wcs_1d.wcs.ctype[0])
+        unit = str(self.pf.wcs_1d.wcs.cunit[0])
+        self.add_field(("xyv_fits",name),
+                       function=_vel_los,
+                       units=unit)
 
 
-
-


https://bitbucket.org/yt_analysis/yt/commits/453f87adb736/
Changeset:   453f87adb736
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-09 00:26:11
Summary:     Allowing nprocs to be set manually.
Affected #:  1 file

diff -r ba0ffebc6ebad15e7a0ff51002bd859d13a69b50 -r 453f87adb73624ded64dd108db2549826c5fd300 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -223,10 +223,12 @@
     _handle = None
 
     def __init__(self, filename, dataset_type='fits',
+                 nprocs = None,
                  storage_filename = None,
                  mask_nans = True):
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
+        self.nprocs = nprocs
         self._handle = ap.pyfits.open(filename, memmap=True, do_not_scale_image_data=True)
         for i, h in enumerate(self._handle):
             if h.header["naxis"] >= 2:
@@ -308,9 +310,10 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
-        self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                32**self.dimensionality).astype("int")
-        self.nprocs = min(self.nprocs, 2500)
+        if self.nprocs is None:
+            self.nprocs = np.around(np.prod(self.domain_dimensions) /
+                                    32**self.dimensionality).astype("int")
+            self.nprocs = min(self.nprocs, 2500)
 
     def __del__(self):
         self._handle.close()
@@ -340,12 +343,14 @@
 
     def __init__(self, filename,
                  dataset_type='xyv_fits',
+                 nprocs = None,
                  storage_filename = None,
                  mask_nans = True):
 
         self.fluid_types += ("xyv_fits",)
 
         super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
+                                             nprocs=nprocs,
                                              storage_filename=storage_filename,
                                              mask_nans=mask_nans)
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
@@ -383,9 +388,10 @@
             self.domain_left_edge = self.domain_left_edge[:3]
             self.domain_right_edge = self.domain_right_edge[:3]
 
-        self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                32**self.dimensionality).astype("int")
-        self.nprocs = min(self.nprocs, 2500)
+        if self.nprocs is None:
+            self.nprocs = np.around(np.prod(self.domain_dimensions) /
+                                    32**self.dimensionality).astype("int")
+            self.nprocs = min(self.nprocs, 2500)
 
     @classmethod
     def _check_axes(cls, handle):


https://bitbucket.org/yt_analysis/yt/commits/a447a93b2535/
Changeset:   a447a93b2535
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-09 14:34:34
Summary:     More refinement of the FITS frontend.
Affected #:  3 files

diff -r 453f87adb73624ded64dd108db2549826c5fd300 -r a447a93b2535f3168339b3f4c52f6b0bdd721796 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -215,7 +215,6 @@
             yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
                               cache = cache)
 
-
 class FITSDataset(Dataset):
     _index_class = FITSHierarchy
     _field_info_class = FITSFieldInfo
@@ -225,7 +224,7 @@
     def __init__(self, filename, dataset_type='fits',
                  nprocs = None,
                  storage_filename = None,
-                 mask_nans = True):
+                 mask_nans = False):
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
@@ -345,7 +344,7 @@
                  dataset_type='xyv_fits',
                  nprocs = None,
                  storage_filename = None,
-                 mask_nans = True):
+                 mask_nans = False):
 
         self.fluid_types += ("xyv_fits",)
 

diff -r 453f87adb73624ded64dd108db2549826c5fd300 -r a447a93b2535f3168339b3f4c52f6b0bdd721796 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -66,6 +66,9 @@
     _particle_reader = False
     _dataset_type = "xyv_fits"
 
+    def __init__(self, pf):
+        super(IOHandlerFITSXYV,self).__init__(pf)
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
         if any((ftype != "xyv_fits" for ftype, fname in fields)):

diff -r 453f87adb73624ded64dd108db2549826c5fd300 -r a447a93b2535f3168339b3f4c52f6b0bdd721796 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -10,15 +10,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import __builtin__
 import aplpy
 from yt.utilities.fits_image import FITSImageBuffer
-from yt.funcs import fix_axis
+from yt.funcs import fix_axis, ensure_list
 import astropy.wcs as pywcs
+from yt.utilities.exceptions import \
+    YTNotInsideNotebook
+import matplotlib.pyplot as plt
 
 axis_wcs = [[1,2],[0,2],[0,1]]
 
-plot_method_list = ["recenter","show_colorscale","show_grayscale",
-                    "refresh","add_colorbar","remove_colorbar"]
+plot_method_list = ["recenter","refresh","add_colorbar",
+                    "remove_colorbar"]
 
 def plot_method(method, plots):
     def _method(*args, **kwargs):
@@ -40,11 +44,13 @@
         w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
         self.buffer = FITSImageBuffer(data, fields=fields, wcs=w)
         for field in self.fields:
-            self.plots[field] = aplpy.FITSFigure(self.buffer[field],
-                                                 **kwargs)
+            self.plots[field] = aplpy.FITSFigure(self.buffer[field], **kwargs)
             self.plots[field].set_auto_refresh(False)
         self._setup_plot_methods()
         self.set_font(family="serif", size=15)
+        for v in self.values():
+            v.show_colorscale()
+        plt.close("all")
 
     def _setup_plot_methods(self):
         for method in plot_method_list:
@@ -67,29 +73,52 @@
             self[plot].axis_labels.set_font(**kwargs)
             self[plot].tick_labels.set_font(**kwargs)
 
-    def set_stretch(self, name, stretch):
-        self[name].show_colorscale(stretch=stretch)
+    def show(self):
+        r"""This will send any existing plots to the IPython notebook.
+        function name.
 
-    def set_zlim(self, name, zmin, zmax):
-        self[name].show_colorscale(vmin=zmin, vmax=zmax)
+        If yt is being run from within an IPython session, and it is able to
+        determine this, this function will send any existing plots to the
+        notebook for display.
+
+        If yt can't determine if it's inside an IPython session, it will raise
+        YTNotInsideNotebook.
+
+        Examples
+        --------
+
+        >>> from yt.mods import SlicePlot
+        >>> slc = SlicePlot(pf, "x", ["Density", "VelocityMagnitude"])
+        >>> slc.show()
+
+        """
+        if "__IPYTHON__" in dir(__builtin__):
+            from IPython.display import display
+            for k, v in sorted(self.plots.iteritems()):
+                display(v._figure)
+        else:
+            raise YTNotInsideNotebook
 
 class FITSSlicePlot(FITSPlot):
-    def __init__(self, ds, axis, fields, coord=None, **kwargs):
+    def __init__(self, ds, axis, fields, coord=None, field_parameters=None, **kwargs):
+        fields = ensure_list(fields)
         axis = fix_axis(axis)
         if coord is None:
             coord = ds.domain_center.ndarray_view()[axis]
-        slc = ds.slice(axis, coord)
+        slc = ds.slice(axis, coord, field_parameters=field_parameters)
         data = {}
         for field in fields:
-            data[field] = slc[field].reshape(ds.domain_dimensions[axis_wcs[axis]]).transpose()
+            data[field] = slc.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
         super(FITSSlicePlot, self).__init__(ds, data, axis, fields, **kwargs)
 
 class FITSProjectionPlot(FITSPlot):
-    def __init__(self, ds, axis, fields, weight_field=None, data_source=None, **kwargs):
+    def __init__(self, ds, axis, fields, weight_field=None, data_source=None,
+                 field_parameters=None, **kwargs):
+        fields = ensure_list(fields)
         axis = fix_axis(axis)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, data_source=data_source)
         data = {}
         for field in fields:
-            data[field] = prj[field].reshape(ds.domain_dimensions[axis_wcs[axis]]).transpose()
+            data[field] = prj.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
         super(FITSProjectionPlot, self).__init__(ds, data, axis, fields, **kwargs)
 


https://bitbucket.org/yt_analysis/yt/commits/8e57daf2d595/
Changeset:   8e57daf2d595
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-09 17:02:39
Summary:     Set log to False for the output fields
Affected #:  1 file

diff -r a447a93b2535f3168339b3f4c52f6b0bdd721796 -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -42,6 +42,12 @@
 
 class FITSXYVFieldInfo(FieldInfoContainer):
     known_other_fields = ()
+
+    def __init__(self, pf, field_list, slice_info=None):
+        super(FITSXYVFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
+        for field in pf.field_list:
+            self[field].take_log = False
+            
     def _get_wcs(self, data, axis):
         w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
         return w_coords[axis]


https://bitbucket.org/yt_analysis/yt/commits/075aff2c6356/
Changeset:   075aff2c6356
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-09 20:55:34
Summary:     More FITS
Affected #:  3 files

diff -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 -r 075aff2c63565d8bf2709d263b731f4439e80415 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -81,6 +81,8 @@
 known_units = {"k":"K",
                "jy":"Jy"}
 
+axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
+
 def fits_file_validator(ds, *args, **kwargs):
     ext = args[0].rsplit(".", 1)[-1]
     if ext.upper() == "GZ":
@@ -141,19 +143,40 @@
 
     def _detect_output_fields(self):
         self.field_list = []
-        self._field_map = {}
-        for h in self._handle[self.parameter_file.first_image:]:
-            if h.header["naxis"] >= 2:
-                if self.parameter_file.four_dims:
-                    for idx in range(h.header["naxis4"]):
-                        fname = h.name.lower()+"_%d" % (idx)
-                        self._field_map[fname] = idx
+        self._axis_map = {}
+        self._file_map = {}
+        self._ext_map = {}
+        for i, fits_file in enumerate(self.parameter_file._fits_files):
+            for j, h in enumerate(fits_file):
+                if h.header["naxis"] >= 2:
+                    try:
+                        fname = h.header["btype"].lower()
+                    except:
+                        fname = h.name.lower()
+                    if self.parameter_file.four_dims:
+                        for idx in range(h.header["naxis4"]):
+                            if h.header["naxis4"] > 1:
+                                fname += "_stokes_%d" % (idx)
+                            if self.pf.num_files > 1:
+                                try:
+                                    fname += "_%5.3fGHz" % (h.header["restfreq"]/1.0e9)
+                                except:
+                                    fname += "_%5.3fGHz" % (h.header["restfrq"]/1.0e9)
+                                else:
+                                    fname += "_field_%d" % (i)
+                            self._axis_map[fname] = idx
+                            self._file_map[fname] = fits_file
+                            self._ext_map[fname] = j
+                            self.field_list.append((self.dataset_type, fname))
+                            mylog.info("Adding field %s to the list of fields." % (fname))
+                            self._detect_image_units(fname, h.header)
+                    else:
+                        if self.pf.num_files > 1:
+                            fname += "file_%d" % (i)
+                        self._file_map[fname] = fits_file
                         self.field_list.append((self.dataset_type, fname))
+                        mylog.info("Adding field %s to the list of fields." % (fname))
                         self._detect_image_units(fname, h.header)
-                else:
-                    fname = h.name.lower()
-                    self.field_list.append((self.dataset_type, fname))
-                    self._detect_image_units(fname, h.header)
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
@@ -221,18 +244,29 @@
     _dataset_type = "fits"
     _handle = None
 
-    def __init__(self, filename, dataset_type='fits',
+    def __init__(self, filename,
+                 dataset_type='fits',
+                 slave_files = [],
                  nprocs = None,
                  storage_filename = None,
                  mask_nans = False):
+        self.filenames = [filename] + slave_files
+        self.num_files = len(self.filenames)
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(filename, memmap=True, do_not_scale_image_data=True)
-        for i, h in enumerate(self._handle):
-            if h.header["naxis"] >= 2:
-                self.first_image = i
-                break
+        self._handle = ap.pyfits.open(self.filenames[0], memmap=True, do_not_scale_image_data=True)
+        self._fits_files = [self._handle]
+        if self.num_files > 1:
+            for fits_file in slave_files:
+                self._fits_files.append(ap.pyfits.open(fits_file,
+                                                       memmap=True,
+                                                       do_not_scale_image_data=True))
+        self.first_image = 0
+        #for i, h in enumerate(self._handle):
+        #    if h.header["naxis"] >= 2:
+        #        self.first_image = i
+        #        break
 
         self.primary_header = self._handle[self.first_image].header
         self.shape = self._handle[self.first_image].shape
@@ -322,12 +356,10 @@
         for h in handle:
             if h.header["naxis"] >= 2:
                 axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                a = np_char.startswith(axes_names, "RA")
-                b = np_char.startswith(axes_names, "DEC")
-                c = np_char.startswith(axes_names, "VEL")
-                d = np_char.startswith(axes_names, "FREQ")
-                e = np_char.startswith(axes_names, "ENER")
-                if (a+b+c+d+e).sum() != 3:
+                x = np.zeros((3), dtype="bool")
+                for ap in axes_prefixes:
+                    x += np_char.startswith(axes_names, ap)
+                if x.sum() != 3:
                     handle.close()
                     return True
         return False
@@ -342,6 +374,7 @@
 
     def __init__(self, filename,
                  dataset_type='xyv_fits',
+                 slave_files = [],
                  nprocs = None,
                  storage_filename = None,
                  mask_nans = False):
@@ -349,16 +382,18 @@
         self.fluid_types += ("xyv_fits",)
 
         super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
+                                             slave_files=slave_files,
                                              nprocs=nprocs,
                                              storage_filename=storage_filename,
                                              mask_nans=mask_nans)
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
         self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
-        self.vel_axis = np_char.startswith(self.axes_names, "VEL")
+        self.vel_axis = np_char.startswith(self.axes_names, "V")
         self.vel_axis += np_char.startswith(self.axes_names, "FREQ")
         self.vel_axis += np_char.startswith(self.axes_names, "ENER")
         self.vel_axis = np.where(self.vel_axis)[0][0]
+        self.vel_name = self.axes_names[self.vel_axis].lower()
 
         self.wcs_2d = ap.pywcs.WCS(naxis=2)
         self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
@@ -397,12 +432,10 @@
         for h in handle:
             if h.header["naxis"] >= 3:
                 axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                a = np_char.startswith(axes_names, "RA")
-                b = np_char.startswith(axes_names, "DEC")
-                c = np_char.startswith(axes_names, "VEL")
-                d = np_char.startswith(axes_names, "FREQ")
-                e = np_char.startswith(axes_names, "ENER")
-                if (a+b+c+d+e).sum() == 3:
+                x = np.zeros((3), dtype="bool")
+                for ap in axes_prefixes:
+                    x += np_char.startswith(axes_names, ap)
+                if x.sum() == 3:
                     handle.close()
                     return True
         return False

diff -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 -r 075aff2c63565d8bf2709d263b731f4439e80415 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -47,7 +47,7 @@
         super(FITSXYVFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
         for field in pf.field_list:
             self[field].take_log = False
-            
+
     def _get_wcs(self, data, axis):
         w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
         return w_coords[axis]
@@ -57,7 +57,7 @@
                 return data.pf.arr(self._get_wcs(data, axis), unit)
             return _world_f
         for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
-            name = str(self.pf.wcs_2d.wcs.ctype[i])
+            name = ["ra","dec"][i]
             unit = str(self.pf.wcs_2d.wcs.cunit[i])
             if unit.lower() == "deg": unit = "degree"
             if unit.lower() == "rad": unit = "radian"
@@ -65,9 +65,8 @@
         def _vel_los(field, data):
             return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
                                str(data.pf.wcs_1d.wcs.cunit[0]))
-        name = str(self.pf.wcs_1d.wcs.ctype[0])
         unit = str(self.pf.wcs_1d.wcs.cunit[0])
-        self.add_field(("xyv_fits",name),
+        self.add_field(("xyv_fits",self.pf.vel_name),
                        function=_vel_los,
                        units=unit)
 

diff -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 -r 075aff2c63565d8bf2709d263b731f4439e80415 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -35,7 +35,6 @@
         chunks = list(chunks)
         if any((ftype != "fits" for ftype, fname in fields)):
             raise NotImplementedError
-        f = self._handle
         rv = {}
         dt = "float64"
         for field in fields:
@@ -45,7 +44,8 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            ds = f[fname]
+            f = self.pf.index._file_map[fname]
+            ds = f[self.pf.index._ext_map[fname]]
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
@@ -73,7 +73,6 @@
         chunks = list(chunks)
         if any((ftype != "xyv_fits" for ftype, fname in fields)):
             raise NotImplementedError
-        f = self._handle
         rv = {}
         dt = "float64"
         for field in fields:
@@ -83,17 +82,15 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            if self.pf.four_dims:
-                ds = f[fname.split("_")[0]]
-            else:
-                ds = f[fname]
+            f = self.pf.index._file_map[fname]
+            ds = f[self.pf.index._ext_map[fname]]
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
                     start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
                     end = (g.RightEdge.ndarray_view()-0.5).astype("int")
                     if self.pf.four_dims:
-                        idx = self.pf.index._field_map[fname]
+                        idx = self.pf.index._axis_map[fname]
                         data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     else:
                         data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()


https://bitbucket.org/yt_analysis/yt/commits/c8155685f7fd/
Changeset:   c8155685f7fd
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 04:07:04
Summary:     Adding a pixel length unit for FITS files
Affected #:  1 file

diff -r 075aff2c63565d8bf2709d263b731f4439e80415 -r c8155685f7fd890a9ece614d1b05fbea2dd2729e yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -99,8 +99,8 @@
     "me": (mass_electron_grams, dimensions.mass),
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
-    "beam": (1.0, dimensions.dimensionless)
-
+    "beam": (1.0, dimensions.dimensionless),
+    "pixel":  (1.0, dimensions.length)
 }
 
 # Add LaTeX representations for units with trivial representations.


https://bitbucket.org/yt_analysis/yt/commits/59bfd88c3b12/
Changeset:   59bfd88c3b12
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 06:05:34
Summary:     Massive refactoring of the FITS frontend. Everything has been subsumed into one FITSDataset class, which checks for files of the form (RA,Dec,?) and sets up the world coordinates accordingly. The FITS plotting classes have been made subclasses of their PlotWindow counterparts, but they need more work.
Affected #:  6 files

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -13,13 +13,16 @@
 from .data_structures import \
       FITSGrid, \
       FITSHierarchy, \
-      FITSDataset, \
-      FITSXYVDataset
+      FITSDataset
 
 from .fields import \
-      FITSFieldInfo, \
-      FITSXYVFieldInfo
+      FITSFieldInfo
 
 from .io import \
-      IOHandlerFITS, \
-      IOHandlerFITSXYV
+      IOHandlerFITS
+
+from .misc import \
+      FITSOffAxisSlicePlot, \
+      FITSSlicePlot, \
+      FITSProjectionPlot, \
+      FITSOffAxisProjectionPlot

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -17,7 +17,6 @@
 import weakref
 import warnings
 
-from yt.config import ytcfg
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -28,10 +27,10 @@
 from yt.data_objects.static_output import \
     Dataset
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
-from .fields import FITSFieldInfo, FITSXYVFieldInfo
+from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
     decompose_array, get_psize, decompose_array_nocopy
 
@@ -75,30 +74,11 @@
 
 ap = astropy_imports()
 
-angle_units = ["deg","arcsec","arcmin","mas"]
-all_units = angle_units + mpc_conversion.keys()
-
 known_units = {"k":"K",
                "jy":"Jy"}
 
 axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
 
-def fits_file_validator(ds, *args, **kwargs):
-    ext = args[0].rsplit(".", 1)[-1]
-    if ext.upper() == "GZ":
-        # We don't know for sure that there will be > 1
-        ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
-    if ext.upper() not in ("FITS", "FTS"):
-        return False
-    try:
-        with warnings.catch_warnings():
-            warnings.filterwarnings('ignore', category=UserWarning, append=True)
-            fileh = ap.pyfits.open(args[0])
-        if ds._check_axes(fileh): return True
-    except:
-        pass
-    return False
-
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, index, level):
@@ -133,7 +113,8 @@
         try:
             field_units = header["bunit"].lower().strip(" ")
             # FITS units always return upper-case, so we need to get
-            # the right case by comparing against known units
+            # the right case by comparing against known units. This
+            # only really works for common units.
             for name in known_units:
                 if field_units.find(name) > -1:
                     field_units = field_units.replace(name, known_units[name])
@@ -146,34 +127,31 @@
         self._axis_map = {}
         self._file_map = {}
         self._ext_map = {}
+        # We create a field from each slice on the 4th axis
+        if self.parameter_file.naxis == 4:
+            naxis4 = self.parameter_file.primary_header["naxis4"]
+        else:
+            naxis4 = 1
         for i, fits_file in enumerate(self.parameter_file._fits_files):
             for j, h in enumerate(fits_file):
-                if h.header["naxis"] >= 2:
+                if self.parameter_file.naxis >= 2:
                     try:
                         fname = h.header["btype"].lower()
                     except:
                         fname = h.name.lower()
-                    if self.parameter_file.four_dims:
-                        for idx in range(h.header["naxis4"]):
-                            if h.header["naxis4"] > 1:
-                                fname += "_stokes_%d" % (idx)
-                            if self.pf.num_files > 1:
-                                try:
-                                    fname += "_%5.3fGHz" % (h.header["restfreq"]/1.0e9)
-                                except:
-                                    fname += "_%5.3fGHz" % (h.header["restfrq"]/1.0e9)
-                                else:
-                                    fname += "_field_%d" % (i)
-                            self._axis_map[fname] = idx
-                            self._file_map[fname] = fits_file
-                            self._ext_map[fname] = j
-                            self.field_list.append((self.dataset_type, fname))
-                            mylog.info("Adding field %s to the list of fields." % (fname))
-                            self._detect_image_units(fname, h.header)
-                    else:
+                    for k in xrange(naxis4):
+                        if naxis4 > 1:
+                            fname += "_%s_%d" % (h.header["CTYPE4"], k+1)
                         if self.pf.num_files > 1:
-                            fname += "file_%d" % (i)
+                            try:
+                                fname += "_%5.3fGHz" % (h.header["restfreq"]/1.0e9)
+                            except:
+                                fname += "_%5.3fGHz" % (h.header["restfrq"]/1.0e9)
+                            else:
+                                fname += "_field_%d" % (i)
+                        self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
+                        self._ext_map[fname] = j
                         self.field_list.append((self.dataset_type, fname))
                         mylog.info("Adding field %s to the list of fields." % (fname))
                         self._detect_image_units(fname, h.header)
@@ -185,6 +163,7 @@
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
 
+        # If nprocs > 1, decompose the domain into virtual grids
         if pf.nprocs > 1:
             bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
                                                        pf.domain_right_edge)])
@@ -245,36 +224,40 @@
     _handle = None
 
     def __init__(self, filename,
-                 dataset_type='fits',
+                 dataset_type = 'fits',
                  slave_files = [],
                  nprocs = None,
                  storage_filename = None,
-                 mask_nans = False):
+                 mask_nans = False,
+                 suppress_astropy_warnings = True):
+        if suppress_astropy_warnings:
+            ap.log.disable_warnings_logging()
+            warnings.filterwarnings('ignore', module="astropy", append=True)
         self.filenames = [filename] + slave_files
         self.num_files = len(self.filenames)
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(self.filenames[0], memmap=True, do_not_scale_image_data=True)
+        self._handle = ap.pyfits.open(self.filenames[0],
+                                      memmap=True,
+                                      do_not_scale_image_data=True)
         self._fits_files = [self._handle]
         if self.num_files > 1:
             for fits_file in slave_files:
                 self._fits_files.append(ap.pyfits.open(fits_file,
                                                        memmap=True,
                                                        do_not_scale_image_data=True))
-        self.first_image = 0
-        #for i, h in enumerate(self._handle):
-        #    if h.header["naxis"] >= 2:
-        #        self.first_image = i
-        #        break
-
+        self.first_image = 0 # Assumed for now
         self.primary_header = self._handle[self.first_image].header
         self.shape = self._handle[self.first_image].shape
         self.wcs = ap.pywcs.WCS(header=self.primary_header)
-
+        self.axis_names = {}
+        self.naxis = self.primary_header["naxis"]
+        for i, ax in enumerate("xyz"[:self.naxis]):
+            self.axis_names[self.primary_header["CTYPE%d" % (i+1)]] = ax
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
-            if unit in all_units:
+            if unit in mpc_conversion.keys():
                 self.file_unit = unit.name
                 idx = i
                 break
@@ -285,7 +268,6 @@
             self.pixel_scale = self.wcs.wcs.cdelt[idx]
 
         self.refine_by = 2
-        self.four_dims = False
 
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
@@ -294,6 +276,7 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
+        from yt.units.dimensions import length
         if self.new_unit is not None:
             length_factor = self.pixel_scale
             length_unit = str(self.new_unit)
@@ -305,6 +288,9 @@
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
+        self.unit_registry.add("pixel",
+                               float(self.length_unit.in_cgs().value),
+                               length)
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -317,7 +303,11 @@
         self.dimensionality = self.primary_header["naxis"]
         self.geometry = "cartesian"
 
-        dims = self._handle[self.first_image].shape[::-1]
+        # Sometimes a FITS file has a 4D datacube, in which case
+        # we take the 4th axis and assume it consists of different fields.
+        if self.dimensionality == 4: self.dimensionality = 3
+
+        dims = self._handle[self.first_image].shape[::-1][:self.dimensionality]
         self.domain_dimensions = np.array(dims)
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
@@ -343,103 +333,79 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
+        # If nprocs is None, do some automatic decomposition of the domain
         if self.nprocs is None:
             self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                     32**self.dimensionality).astype("int")
             self.nprocs = min(self.nprocs, 2500)
 
+        # Check to see if this data is in (RA,Dec,?) format
+        self.xyv_data = False
+        x = np.zeros((self.dimensionality), dtype="bool")
+        for ap in axes_prefixes:
+            x += np_char.startswith(self.axis_names.keys()[:self.dimensionality], ap)
+        if x.sum() == self.dimensionality: self._setup_xyv()
+
+    def _setup_xyv(self):
+
+        self.xyv_data = True
+
+        end = min(self.dimensionality+1,4)
+        ctypes = np.array([self.primary_header["CTYPE%d" % (i)] for i in xrange(1,end)])
+        self.ra_axis = np.where(np_char.startswith(ctypes, "RA"))[0][0]
+        self.dec_axis = np.where(np_char.startswith(ctypes, "DEC"))[0][0]
+
+        if self.wcs.naxis > 2:
+
+            self.vel_axis = np_char.startswith(ctypes, "V")
+            self.vel_axis += np_char.startswith(ctypes, "FREQ")
+            self.vel_axis += np_char.startswith(ctypes, "ENER")
+            self.vel_axis = np.where(self.vel_axis)[0][0]
+            self.vel_name = ctypes[self.vel_axis].lower()
+
+            self.wcs_2d = ap.pywcs.WCS(naxis=2)
+            self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
+                                     str(self.wcs.wcs.cunit[self.dec_axis])]
+            self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
+                                     self.wcs.wcs.ctype[self.dec_axis]]
+
+            self.wcs_1d = ap.pywcs.WCS(naxis=1)
+            self.wcs_1d.wcs.crpix = [self.wcs.wcs.crpix[self.vel_axis]]
+            self.wcs_1d.wcs.cdelt = [self.wcs.wcs.cdelt[self.vel_axis]]
+            self.wcs_1d.wcs.crval = [self.wcs.wcs.crval[self.vel_axis]]
+            self.wcs_1d.wcs.cunit = [str(self.wcs.wcs.cunit[self.vel_axis])]
+            self.wcs_1d.wcs.ctype = [self.wcs.wcs.ctype[self.vel_axis]]
+
+        else:
+
+            self.wcs_2d = self.wcs
+            self.wcs_1d = None
+            self.vel_axis = 2
+            self.vel_name = "z"
+
     def __del__(self):
+        for file in self._fits_files:
+            file.close()
         self._handle.close()
 
     @classmethod
-    def _check_axes(cls, handle):
-        for h in handle:
-            if h.header["naxis"] >= 2:
-                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                x = np.zeros((3), dtype="bool")
-                for ap in axes_prefixes:
-                    x += np_char.startswith(axes_names, ap)
-                if x.sum() != 3:
-                    handle.close()
-                    return True
+    def _is_valid(cls, *args, **kwargs):
+        ext = args[0].rsplit(".", 1)[-1]
+        if ext.upper() == "GZ":
+            # We don't know for sure that there will be > 1
+            ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+        if ext.upper() not in ("FITS", "FTS"):
+            return False
+        try:
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', category=UserWarning, append=True)
+                fileh = ap.pyfits.open(args[0])
+            valid = fileh[0].header["naxis"] >= 2
+            fileh.close()
+            return valid
+        except:
+            pass
         return False
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        return fits_file_validator(cls, *args, **kwargs)
-
-class FITSXYVDataset(FITSDataset):
-    _dataset_type = "xyv_fits"
-    _field_info_class = FITSXYVFieldInfo
-
-    def __init__(self, filename,
-                 dataset_type='xyv_fits',
-                 slave_files = [],
-                 nprocs = None,
-                 storage_filename = None,
-                 mask_nans = False):
-
-        self.fluid_types += ("xyv_fits",)
-
-        super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
-                                             slave_files=slave_files,
-                                             nprocs=nprocs,
-                                             storage_filename=storage_filename,
-                                             mask_nans=mask_nans)
-        self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-        self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
-        self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
-        self.vel_axis = np_char.startswith(self.axes_names, "V")
-        self.vel_axis += np_char.startswith(self.axes_names, "FREQ")
-        self.vel_axis += np_char.startswith(self.axes_names, "ENER")
-        self.vel_axis = np.where(self.vel_axis)[0][0]
-        self.vel_name = self.axes_names[self.vel_axis].lower()
-
-        self.wcs_2d = ap.pywcs.WCS(naxis=2)
-        self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
-                                 str(self.wcs.wcs.cunit[self.dec_axis])]
-        self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
-                                 self.wcs.wcs.ctype[self.dec_axis]]
-
-        self.wcs_1d = ap.pywcs.WCS(naxis=1)
-        self.wcs_1d.wcs.crpix = [self.wcs.wcs.crpix[self.vel_axis]]
-        self.wcs_1d.wcs.cdelt = [self.wcs.wcs.cdelt[self.vel_axis]]
-        self.wcs_1d.wcs.crval = [self.wcs.wcs.crval[self.vel_axis]]
-        self.wcs_1d.wcs.cunit = [str(self.wcs.wcs.cunit[self.vel_axis])]
-        self.wcs_1d.wcs.ctype = [self.wcs.wcs.ctype[self.vel_axis]]
-
-    def _parse_parameter_file(self):
-
-        super(FITSXYVDataset, self)._parse_parameter_file()
-
-        if self.dimensionality == 4:
-            self.dimensionality = 3
-            self.four_dims = True
-            self.domain_dimensions = self.domain_dimensions[:3]
-            self.domain_left_edge = self.domain_left_edge[:3]
-            self.domain_right_edge = self.domain_right_edge[:3]
-
-        if self.nprocs is None:
-            self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                    32**self.dimensionality).astype("int")
-            self.nprocs = min(self.nprocs, 2500)
-
-    @classmethod
-    def _check_axes(cls, handle):
-        for h in handle:
-            if h.header["naxis"] >= 3:
-                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                x = np.zeros((3), dtype="bool")
-                for ap in axes_prefixes:
-                    x += np_char.startswith(axes_names, ap)
-                if x.sum() == 3:
-                    handle.close()
-                    return True
-        return False
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        return fits_file_validator(cls, *args, **kwargs)

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -17,18 +17,55 @@
 
 class FITSFieldInfo(FieldInfoContainer):
     known_other_fields = ()
+
+    def __init__(self, pf, field_list, slice_info=None):
+        super(FITSFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
+        for field in pf.field_list:
+            self[field].take_log = False
+
     def _get_wcs(self, data, axis):
-        if data.pf.dimensionality == 2:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
-        else:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
-                                                 data["z"], 1)
+        w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
+                                             data["z"], 1)
         return w_coords[axis]
+
+    def _get_2d_wcs(self, data, axis):
+        w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
+        return w_coords[axis]
+
+    def _vel_los(field, data):
+        return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
+                           str(data.pf.wcs_1d.wcs.cunit[0]))
+
+    def _setup_xyv_fields(self):
+        def world_f(axis, unit):
+            def _world_f(field, data):
+                return data.pf.arr(self._get_2d_wcs(data, axis), unit)
+            return _world_f
+        for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
+            name = ["ra","dec"][i]
+            unit = str(self.pf.wcs_2d.wcs.cunit[i])
+            if unit.lower() == "deg": unit = "degree"
+            if unit.lower() == "rad": unit = "radian"
+            self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
+        def _vel_los(field, data):
+            return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
+                               str(data.pf.wcs_1d.wcs.cunit[0]))
+        if self.pf.dimensionality == 3:
+            unit = str(self.pf.wcs_1d.wcs.cunit[0])
+            self.add_field(("fits",self.pf.vel_name),
+                           function=_vel_los, units=unit)
+
     def setup_fluid_fields(self):
+
+        if self.pf.xyv_data:
+            self._setup_xyv_fields()
+            return
+
         def world_f(axis, unit):
             def _world_f(field, data):
                 return data.pf.arr(self._get_wcs(data, axis), unit)
             return _world_f
+
         for i in range(self.pf.dimensionality):
             if self.pf.wcs.wcs.cname[i] == '':
                 name = str(self.pf.wcs.wcs.ctype[i])
@@ -39,35 +76,3 @@
                 if unit.lower() == "deg": unit = "degree"
                 if unit.lower() == "rad": unit = "radian"
                 self.add_field(("fits",name), function=world_f(i, unit), units=unit)
-
-class FITSXYVFieldInfo(FieldInfoContainer):
-    known_other_fields = ()
-
-    def __init__(self, pf, field_list, slice_info=None):
-        super(FITSXYVFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
-        for field in pf.field_list:
-            self[field].take_log = False
-
-    def _get_wcs(self, data, axis):
-        w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
-        return w_coords[axis]
-    def setup_fluid_fields(self):
-        def world_f(axis, unit):
-            def _world_f(field, data):
-                return data.pf.arr(self._get_wcs(data, axis), unit)
-            return _world_f
-        for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
-            name = ["ra","dec"][i]
-            unit = str(self.pf.wcs_2d.wcs.cunit[i])
-            if unit.lower() == "deg": unit = "degree"
-            if unit.lower() == "rad": unit = "radian"
-            self.add_field(("xyv_fits",name), function=world_f(axis, unit), units=unit)
-        def _vel_los(field, data):
-            return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
-                               str(data.pf.wcs_1d.wcs.cunit[0]))
-        unit = str(self.pf.wcs_1d.wcs.cunit[0])
-        self.add_field(("xyv_fits",self.pf.vel_name),
-                       function=_vel_los,
-                       units=unit)
-
-

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -12,8 +12,6 @@
 
 import numpy as np
 
-from yt.utilities.math_utils import prec_accum
-
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
@@ -56,40 +54,7 @@
                         nz = 1
                         data = np.zeros((nx,ny,nz))
                         data[:,:,0] = ds.data[start[1]:end[1],start[0]:end[0]].transpose()
-                    elif self.pf.dimensionality == 3:
-                        data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
-                    if self.pf.mask_nans: data[np.isnan(data)] = 0.0
-                    ind += g.select(selector, data.astype("float64"), rv[field], ind)
-        return rv
-
-class IOHandlerFITSXYV(IOHandlerFITS):
-    _particle_reader = False
-    _dataset_type = "xyv_fits"
-
-    def __init__(self, pf):
-        super(IOHandlerFITSXYV,self).__init__(pf)
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        chunks = list(chunks)
-        if any((ftype != "xyv_fits" for ftype, fname in fields)):
-            raise NotImplementedError
-        rv = {}
-        dt = "float64"
-        for field in fields:
-            rv[field] = np.empty(size, dtype=dt)
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s grids",
-                    size, [f2 for f1, f2 in fields], ng)
-        for field in fields:
-            ftype, fname = field
-            f = self.pf.index._file_map[fname]
-            ds = f[self.pf.index._ext_map[fname]]
-            ind = 0
-            for chunk in chunks:
-                for g in chunk.objs:
-                    start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
-                    end = (g.RightEdge.ndarray_view()-0.5).astype("int")
-                    if self.pf.four_dims:
+                    elif self.pf.naxis == 4:
                         idx = self.pf.index._axis_map[fname]
                         data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     else:

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -10,115 +10,119 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import __builtin__
-import aplpy
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.funcs import fix_axis, ensure_list
-import astropy.wcs as pywcs
-from yt.utilities.exceptions import \
-    YTNotInsideNotebook
-import matplotlib.pyplot as plt
+import numpy as np
+from yt.funcs import fix_axis, ensure_list, iterable
+from yt.visualization.plot_window import AxisAlignedSlicePlot, \
+    OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
-axis_wcs = [[1,2],[0,2],[0,1]]
+def force_aspect(ax,aspect=1):
+    im = ax.get_images()
+    extent = im[0].get_extent()
+    ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
 
-plot_method_list = ["recenter","refresh","add_colorbar",
-                    "remove_colorbar"]
+def set_onaxis_wcs(pw):
+    return
+    if pw.axis == pw.ds.ra_axis:
+        xname = "Dec"
+        yname = pw.ds.vel_name
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[1])
+        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
+    elif pw.axis == pw.ds.dec_axis:
+        xname = "RA"
+        yname = pw.ds.vel_name
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
+        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
+    elif pw.axis == pw.ds.vel_axis:
+        xname = "RA"
+        yname = "Dec"
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
+        yunit = str(pw.ds.wcs_2d.wcs.cunit[1])
 
-def plot_method(method, plots):
-    def _method(*args, **kwargs):
-        for plot in plots.values():
-            getattr(plot, method)(*args, **kwargs)
-        return
-    return _method
+    for k,v in pw.plots.iteritems():
+        v.axes.set_xlabel(r"%s (%s)" % (xname, xunit))
+        v.axes.set_ylabel(r"%s (%s)" % (yname, yunit))
+        v.axes.set_aspect('auto')
 
-class FITSPlot(object):
-    def __init__(self, ds, data, axis, fields, **kwargs):
+class FITSSlicePlot(AxisAlignedSlicePlot):
+
+    def __init__(self, ds, axis, fields, set_wcs=True, **kwargs):
+
+        if isinstance(axis, basestring):
+            if axis in ds.axis_names:
+                axis = ds.axis_names[axis]
+        self.axis = fix_axis(axis)
         self.ds = ds
-        self.fields = fields
-        self.plots = {}
-        w = pywcs.WCS(naxis=2)
-        w.wcs.crpix = self.ds.wcs.wcs.crpix[axis_wcs[axis]]
-        w.wcs.cdelt = self.ds.wcs.wcs.cdelt[axis_wcs[axis]]
-        w.wcs.crval = self.ds.wcs.wcs.crval[axis_wcs[axis]]
-        w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
-        w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
-        self.buffer = FITSImageBuffer(data, fields=fields, wcs=w)
-        for field in self.fields:
-            self.plots[field] = aplpy.FITSFigure(self.buffer[field], **kwargs)
-            self.plots[field].set_auto_refresh(False)
-        self._setup_plot_methods()
-        self.set_font(family="serif", size=15)
-        for v in self.values():
-            v.show_colorscale()
-        plt.close("all")
+        self.set_wcs = set_wcs
+        super(FITSSlicePlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
+        self.set_axes_unit("pixel")
 
-    def _setup_plot_methods(self):
-        for method in plot_method_list:
-            self.__dict__[method] = plot_method(method, self.plots)
-
-    def __getitem__(self, key):
-        return self.plots[key]
-
-    def keys(self):
-        return self.plots.keys()
-
-    def values(self):
-        return self.plots.values()
-
-    def items(self):
-        return self.plots.items()
-
-    def set_font(self, **kwargs):
-        for plot in self.keys():
-            self[plot].axis_labels.set_font(**kwargs)
-            self[plot].tick_labels.set_font(**kwargs)
+    def _set_wcs(self):
+        if self.set_wcs:
+            set_onaxis_wcs(self)
 
     def show(self):
-        r"""This will send any existing plots to the IPython notebook.
-        function name.
+        self._set_wcs()
+        super(FITSSlicePlot, self).show()
 
-        If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send any existing plots to the
-        notebook for display.
+    def save(self, *args, **kwargs):
+        self._set_wcs()
+        super(FITSSlicePlot, self).save(*args, **kwargs)
 
-        If yt can't determine if it's inside an IPython session, it will raise
-        YTNotInsideNotebook.
+class FITSOffAxisSlicePlot(OffAxisSlicePlot):
 
-        Examples
-        --------
+    def __init__(self, ds, normal, fields, set_wcs=True, **kwargs):
 
-        >>> from yt.mods import SlicePlot
-        >>> slc = SlicePlot(pf, "x", ["Density", "VelocityMagnitude"])
-        >>> slc.show()
+        self.ds = ds
+        my_normal = normal
+        if ds.xyv_data:
+            if len(normal) > 2:
+                raise NotImplementedError("Normal vector must be in two dimensions for this dataset!")
+            my_normal = np.zeros((3))
+            my_normal[ds.ra_axis] = normal[0]
+            my_normal[ds.dec_axis] = normal[1]
 
-        """
-        if "__IPYTHON__" in dir(__builtin__):
-            from IPython.display import display
-            for k, v in sorted(self.plots.iteritems()):
-                display(v._figure)
-        else:
-            raise YTNotInsideNotebook
+        super(FITSOffAxisSlicePlot, self).__init__(ds, my_normal, fields, **kwargs)
+        self.set_axes_unit("pixel")
 
-class FITSSlicePlot(FITSPlot):
-    def __init__(self, ds, axis, fields, coord=None, field_parameters=None, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        if coord is None:
-            coord = ds.domain_center.ndarray_view()[axis]
-        slc = ds.slice(axis, coord, field_parameters=field_parameters)
-        data = {}
-        for field in fields:
-            data[field] = slc.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
-        super(FITSSlicePlot, self).__init__(ds, data, axis, fields, **kwargs)
+class FITSProjectionPlot(ProjectionPlot):
 
-class FITSProjectionPlot(FITSPlot):
-    def __init__(self, ds, axis, fields, weight_field=None, data_source=None,
-                 field_parameters=None, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        prj = ds.proj(fields[0], axis, weight_field=weight_field, data_source=data_source)
-        data = {}
-        for field in fields:
-            data[field] = prj.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
-        super(FITSProjectionPlot, self).__init__(ds, data, axis, fields, **kwargs)
+    def __init__(self, ds, axis, fields, set_wcs=True, **kwargs):
 
+        self.ds = ds
+        if isinstance(axis, basestring):
+            if axis in ds.axis_names:
+                axis = ds.axis_names[axis]
+        self.axis = fix_axis(axis)
+        self.set_wcs = set_wcs
+
+        super(FITSProjectionPlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
+        self.set_axes_unit("pixel")
+
+    def _set_wcs(self):
+        if self.set_wcs:
+            set_onaxis_wcs(self)
+
+    def show(self):
+        self._set_wcs()
+        super(FITSProjectionPlot, self).show()
+
+    def save(self, *args, **kwargs):
+        self._set_wcs()
+        super(FITSProjectionPlot, self).save(*args, **kwargs)
+
+class FITSOffAxisProjectionPlot(OffAxisProjectionPlot):
+
+    def __init__(self, ds, normal, fields, set_wcs=True, **kwargs):
+
+        self.ds = ds
+        my_normal = normal
+        if ds.xyv_data:
+            if len(normal) > 2:
+                raise ValueError("Normal vector must be in two dimensions for this dataset!")
+            my_normal = np.zeros((3))
+            my_normal[ds.ra_axis] = normal[0]
+            my_normal[ds.dec_axis] = normal[1]
+
+        super(FITSOffAxisProjectionPlot, self).__init__(ds, my_normal, fields, axes_unit="pixel", **kwargs)
+
+

diff -r c8155685f7fd890a9ece614d1b05fbea2dd2729e -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -100,7 +100,6 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "beam": (1.0, dimensions.dimensionless),
-    "pixel":  (1.0, dimensions.length)
 }
 
 # Add LaTeX representations for units with trivial representations.


https://bitbucket.org/yt_analysis/yt/commits/459ad3a80753/
Changeset:   459ad3a80753
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 06:07:15
Summary:     Merge
Affected #:  10 files

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -50,6 +50,8 @@
     PolarCoordinateHandler
 from yt.geometry.cylindrical_coordinates import \
     CylindricalCoordinateHandler
+from yt.geometry.spherical_coordinates import \
+    SphericalCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -365,6 +367,8 @@
             self.coordinates = CylindricalCoordinateHandler(self)
         elif self.geometry == "polar":
             self.coordinates = PolarCoordinateHandler(self)
+        elif self.geometry == "spherical":
+            self.coordinates = SphericalCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -163,7 +163,8 @@
     _field_info_class = GDFFieldInfo
 
     def __init__(self, filename, dataset_type='grid_data_format',
-                 storage_filename=None):
+                 storage_filename=None, geometry = 'cartesian'):
+        self.geometry = geometry
         self.fluid_types += ("gdf",)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -381,8 +381,8 @@
     def __init__(self, filename, dataset_type="tipsy",
                  field_dtypes=None,
                  unit_base=None,
+                 parameter_file=None,
                  cosmology_parameters=None,
-                 parameter_file=None,
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
@@ -480,15 +480,15 @@
             self.domain_left_edge = None
             self.domain_right_edge = None
         if self.parameters.get('bComove', False):
+            cosm = self._cosmology_parameters or {}
+            self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1
-            cosm = self._cosmology_parameters or {}
-            dcosm = dict(current_redshift=0.0,
-                         omega_lambda=0.0,
-                         omega_matter=0.0,
-                         hubble_constant=1.0)
-            for param in ['current_redshift', 'omega_lambda',
-                          'omega_matter', 'hubble_constant']:
-                pval = cosm.get(param, dcosm[param])
+            dcosm = dict(current_redshift=(1.0/self.scale_factor)-1.0,
+                         omega_lambda=self.parameters.get('dLambda', cosm.get('omega_lambda',0.0)),
+                         omega_matter=self.parameters.get('dOmega0', cosm.get('omega_matter',0.0)),
+                         hubble_constant=self.parameters.get('dHubble0', cosm.get('hubble_constant',1.0)))
+            for param in dcosm.keys():
+                pval = dcosm[param]
                 setattr(self, param, pval)
         else:
             self.cosmological_simulation = 0.0
@@ -501,18 +501,18 @@
         f.close()
 
     def _set_code_unit_attributes(self):
-        # Set a sane default for cosmological simulations.
-        if self._unit_base is None and self.cosmological_simulation == 1:
-            mylog.info("Assuming length units are in Mpc/h (comoving)")
-            self._unit_base.update(dict(length = (1.0, "Mpccm/h")))
         if self.cosmological_simulation:
-            length_units = self._unit_base['length']
-            DW = self.quan(1./length_units[1], length_units[0])
-            cosmo = Cosmology(self.hubble_constant * 100.0,
+            mu = self.parameters.get('dMsolUnit', 1.)
+            lu = self.parameters.get('dKpcUnit', 1000.)
+            # In cosmological runs, lengths are stored as length*scale_factor
+            self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
+            self.mass_unit = self.quan(mu, 'Msun')
+            density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
+            # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
+            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)  
+            cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
-            self.length_unit = DW
-            density_unit = cosmo.critical_density(self.current_redshift)
-            self.mass_unit = density_unit * self.length_unit ** 3
+            self.current_time = cosmo.hubble_time(self.current_redshift)
         else:
             mu = self.parameters.get('dMsolUnit', 1.0)
             self.mass_unit = self.quan(mu, 'Msun')

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/frontends/sph/owls_ion_tables.py
--- a/yt/frontends/sph/owls_ion_tables.py
+++ b/yt/frontends/sph/owls_ion_tables.py
@@ -162,8 +162,7 @@
         T  = np.array( T )
 
         if nH.size != T.size:
-            print ' array size mismatch !!! '
-            sys.exit(1)
+            raise ValueError(' owls_ion_tables: array size mismatch !!! ')
         
         # field discovery will have nH.size == 1 and T.size == 1
         # in that case we simply return 1.0
@@ -175,38 +174,25 @@
 
         # find inH and fnH
         #-----------------------------------------------------
-        inH = np.int32( ( nH - self.nH[0] ) / self.DELTA_nH )
-        fnH = ( nH - self.nH[inH] ) / self.dnH[inH]
-
-        indx = np.where( inH < 0 )[0]
-        if len(indx) > 0:
-            inH[indx] = 0
-            fnH[indx] = 0.0
-
-        indx = np.where( inH >= len(nH) )[0]
-        if len(indx) > 0:
-            inH[indx] = len(nH)-2
-            fnH[indx] = 1.0
+        x_nH = ( nH - self.nH[0] ) / self.DELTA_nH
+        x_nH_clip = np.clip( x_nH, 0.0, self.nH.size-1.001 )
+        fnH,inH = np.modf( x_nH_clip )
+        inH = inH.astype( np.int32 )
 
 
         # find iT and fT
         #-----------------------------------------------------
-        iT = np.int32( ( T - self.T[0] ) / self.DELTA_T )
-        fT = ( T - self.T[iT] ) / self.dT[iT]
+        x_T = ( T - self.T[0] ) / self.DELTA_T
+        x_T_clip = np.clip( x_T, 0.0, self.T.size-1.001 )
+        fT,iT = np.modf( x_T_clip )
+        iT = iT.astype( np.int32 )
+        
 
-        indx = np.where( iT < 0 )[0]
-        if len(indx) > 0:
-            iT[indx] = 0
-            fT[indx] = 0.0
-
-        indx = np.where( iT >= len(T) )[0]
-        if len(indx) > 0:
-            iT[indx] = len(T)-2
-            fT[indx] = 1.0
-
-
+        # short names for previously calculated iz and fz
+        #-----------------------------------------------------
         iz = self.iz
         fz = self.fz
+
                    
         # calculate interpolated value
         # use tri-linear interpolation on the log values

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -62,7 +62,8 @@
         period = self.period[:2].copy() # dummy here
         period[0] = self.period[self.x_axis[dim]]
         period[1] = self.period[self.y_axis[dim]]
-        period = period.in_units("code_length").d
+        if hasattr(period, 'in_units'):
+            period = period.in_units("code_length").d
         buff = _MPL.Pixelize(data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field], size[0], size[1],

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/geometry/cylindrical_coordinates.py
--- a/yt/geometry/cylindrical_coordinates.py
+++ b/yt/geometry/cylindrical_coordinates.py
@@ -21,6 +21,8 @@
     _unknown_coord, \
     _get_coord_fields
 import yt.visualization._MPL as _MPL
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder
 #
 # Cylindrical fields
 #
@@ -71,11 +73,12 @@
                  units = "code_length**3")
 
 
-    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
         ax_name = self.axis_name[dimension]
         if ax_name in ('r', 'theta'):
             return self._ortho_pixelize(data_source, field, bounds, size,
-                                        antialias)
+                                        antialias, dimension, periodic)
         elif ax_name == "z":
             return self._cyl_pixelize(data_source, field, bounds, size,
                                         antialias)
@@ -83,20 +86,26 @@
             # Pixelizing along a cylindrical surface is a bit tricky
             raise NotImplementedError
 
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        period = self.period[:2].copy() # dummy here
+        period[0] = self.period[self.x_axis[dim]]
+        period[1] = self.period[self.y_axis[dim]]
+        if hasattr(period, 'in_units'):
+            period = period.in_units("code_length").d
         buff = _MPL.Pixelize(data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field], size[0], size[1],
                              bounds, int(antialias),
-                             True, self.period).transpose()
+                             period, int(periodic)).transpose()
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
         buff = pixelize_cylinder(data_source['r'],
                                  data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta'],
-                                 size[0], data_source[field], bounds[0])
+                                 data_source['dtheta']/2.0, # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
     axis_name = { 0  : 'r',  1  : 'z',  2  : 'theta',

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/geometry/polar_coordinates.py
--- a/yt/geometry/polar_coordinates.py
+++ b/yt/geometry/polar_coordinates.py
@@ -91,8 +91,8 @@
         buff = pixelize_cylinder(data_source['r'],
                                  data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta'],
-                                 size[0], data_source[field], bounds[1])
+                                 data_source['dtheta'] / 2.0, # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
     axis_name = { 0  : 'r',  1  : 'theta',  2  : 'z',

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/geometry/spherical_coordinates.py
--- /dev/null
+++ b/yt/geometry/spherical_coordinates.py
@@ -0,0 +1,162 @@
+"""
+Spherical fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .coordinate_handler import \
+    CoordinateHandler, \
+    _unknown_coord, \
+    _get_coord_fields
+import yt.visualization._MPL as _MPL
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder, pixelize_aitoff
+
+class SphericalCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, pf, ordering = 'rtp'):
+        if ordering != 'rtp': raise NotImplementedError
+        super(SphericalCoordinateHandler, self).__init__(pf)
+
+    def setup_fields(self, registry):
+        # return the fields for r, z, theta
+        registry.add_field(("index", "dx"), function=_unknown_coord)
+        registry.add_field(("index", "dy"), function=_unknown_coord)
+        registry.add_field(("index", "dz"), function=_unknown_coord)
+        registry.add_field(("index", "x"), function=_unknown_coord)
+        registry.add_field(("index", "y"), function=_unknown_coord)
+        registry.add_field(("index", "z"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(0)
+        registry.add_field(("index", "dr"), function = f1,
+                           display_field = False,
+                           units = "code_length")
+        registry.add_field(("index", "r"), function = f2,
+                           display_field = False,
+                           units = "code_length")
+
+        f1, f2 = _get_coord_fields(1, "")
+        registry.add_field(("index", "dtheta"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "theta"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(2, "")
+        registry.add_field(("index", "dphi"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "phi"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        def _SphericalVolume(field, data):
+            # r**2 sin theta dr dtheta dphi
+            vol = data["index", "r"]**2.0
+            vol *= data["index", "dr"]
+            vol *= np.sin(data["index", "theta"])
+            vol *= data["index", "dtheta"]
+            vol *= data["index", "dphi"]
+            return vol
+        registry.add_field(("index", "cell_volume"),
+                 function=_SphericalVolume,
+                 units = "code_length**3")
+
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
+        if dimension == 0:
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
+        elif dimension in (1, 2):
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                          antialias, dimension)
+        else:
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        # We should be using fcoords
+        period = self.period[:2].copy() # dummy here
+        period[0] = self.period[self.x_axis[dim]]
+        period[1] = self.period[self.y_axis[dim]]
+        period = period.in_units("code_length").d
+        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+                             data_source['pdx'], data_source['pdy'],
+                             data_source[field], size[0], size[1],
+                             bounds, int(antialias),
+                             period, int(periodic)).transpose()
+        return buff
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
+                      dimension):
+        if dimension == 1:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     data_source['phi'],
+                                     data_source['dphi'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+        elif dimension == 2:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     data_source['theta'],
+                                     data_source['dtheta'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     2.0*np.pi - data_source['theta'],
+                                     data_source['dtheta'] / 2.0, # half-widths
+                                     size, data_source[field], bounds,
+                                     input_img = buff)
+        else:
+            raise RuntimeError
+        return buff
+
+
+    def convert_from_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    # Despite being mutables, we uses these here to be clear about how these
+    # are generated and to ensure that they are not re-generated unnecessarily
+    axis_name = { 0  : 'r',  1  : 'theta',  2  : 'phi',
+                 'r' : 'r', 'theta' : 'theta', 'phi' : 'phi',
+                 'R' : 'r', 'Theta' : 'theta', 'Phi' : 'phi'}
+
+    axis_id = { 'r' : 0, 'theta' : 1, 'phi' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'r' : 1, 'theta' : 0, 'phi' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'r' : 2, 'theta' : 2, 'phi' : 1,
+                0  : 2,  1  : 2,  2  : 1}
+
+    @property
+    def period(self):
+        return self.pf.domain_width
+

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -477,23 +477,27 @@
                       np.ndarray[np.float64_t, ndim=1] dradius,
                       np.ndarray[np.float64_t, ndim=1] theta,
                       np.ndarray[np.float64_t, ndim=1] dtheta,
-                      int buff_size,
+                      buff_size,
                       np.ndarray[np.float64_t, ndim=1] field,
-                      np.float64_t rmax=-1.0) :
+                      extents, input_img = None):
 
     cdef np.ndarray[np.float64_t, ndim=2] img
     cdef np.float64_t x, y, dx, dy, r0, theta0
+    cdef np.float64_t rmax, x0, y0, x1, y1
     cdef np.float64_t r_i, theta_i, dr_i, dtheta_i, dthetamin
     cdef int i, pi, pj
     
-    if rmax < 0.0 :
-        imax = radius.argmax()
-        rmax = radius[imax] + dradius[imax]
+    imax = radius.argmax()
+    rmax = radius[imax] + dradius[imax]
           
-    img = np.zeros((buff_size, buff_size))
-    extents = [-rmax, rmax] * 2
-    dx = (extents[1] - extents[0]) / img.shape[0]
-    dy = (extents[3] - extents[2]) / img.shape[1]
+    if input_img is None:
+        img = np.zeros((buff_size[0], buff_size[1]))
+        img[:] = np.nan
+    else:
+        img = input_img
+    x0, x1, y0, y1 = extents
+    dx = (x1 - x0) / img.shape[0]
+    dy = (y1 - y0) / img.shape[1]
       
     dthetamin = dx / rmax
       
@@ -513,14 +517,73 @@
                     continue
                 x = r_i * math.cos(theta_i)
                 y = r_i * math.sin(theta_i)
-                pi = <int>((x + rmax)/dx)
-                pj = <int>((y + rmax)/dy)
-                img[pi, pj] = field[i]
+                pi = <int>((x - x0)/dx)
+                pj = <int>((y - y0)/dy)
+                if pi >= 0 and pi < img.shape[0] and \
+                   pj >= 0 and pj < img.shape[1]:
+                    if img[pi, pj] != img[pi, pj]:
+                        img[pi, pj] = 0.0
+                    img[pi, pj] = field[i]
                 r_i += 0.5*dx 
             theta_i += dthetamin
 
     return img
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def pixelize_aitoff(np.ndarray[np.float64_t, ndim=1] theta,
+                    np.ndarray[np.float64_t, ndim=1] dtheta,
+                    np.ndarray[np.float64_t, ndim=1] phi,
+                    np.ndarray[np.float64_t, ndim=1] dphi,
+                    buff_size,
+                    np.ndarray[np.float64_t, ndim=1] field,
+                    extents, input_img = None):
+    
+    cdef np.ndarray[np.float64_t, ndim=2] img
+    cdef int i, j, nf, fi
+    cdef np.float64_t x, y, z, zb
+    cdef np.float64_t dx, dy, inside
+    cdef np.float64_t theta1, dtheta1, phi1, dphi1
+    cdef np.float64_t theta0, phi0
+    cdef np.float64_t PI = np.pi
+    cdef np.float64_t s2 = math.sqrt(2.0)
+    nf = field.shape[0]
+    
+    if input_img is None:
+        img = np.zeros((buff_size[0], buff_size[1]))
+        img[:] = np.nan
+    else:
+        img = input_img
+    dx = 2.0 / (img.shape[0] - 1)
+    dy = 2.0 / (img.shape[1] - 1)
+    for i in range(img.shape[0]):
+        x = (-1.0 + i*dx)*s2*2.0
+        for j in range(img.shape[1]):
+            y = (-1.0 + j * dy)*s2
+            zb = (x*x/8.0 + y*y/2.0 - 1.0)
+            if zb > 0: continue
+            z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
+            z = z**0.5
+            # Longitude
+            phi0 = (2.0*math.atan(z*x/(2.0 * (2.0*z*z-1.0))) + PI)
+            # Latitude
+            # We shift it into co-latitude
+            theta0 = (math.asin(z*y) + PI/2.0)
+            # Now we just need to figure out which pixel contributes.
+            # We do not have a fast search.
+            for fi in range(nf):
+                theta1 = theta[fi]
+                dtheta1 = dtheta[fi]
+                if not (theta1 - dtheta1 <= theta0 <= theta1 + dtheta1):
+                    continue
+                phi1 = phi[fi]
+                dphi1 = dphi[fi]
+                if not (phi1 - dphi1 <= phi0 <= phi1 + dphi1):
+                    continue
+                img[i, j] = field[fi]
+    return img
+
 #@cython.cdivision(True)
 #@cython.boundscheck(False)
 #@cython.wraparound(False)

diff -r 59bfd88c3b12fbf15c739dc6ca45dcf11030d7be -r 459ad3a80753bb3354c0602f940a1d748f9860d4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -165,8 +165,20 @@
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
+        # Set our default width to be the full domain
         width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
         center = pf.arr([0.0, 0.0, 0.0], "code_length")
+    elif pf.geometry == "spherical":
+        if axis == 0:
+            width = pf.domain_width[1], pf.domain_width[2]
+            center = 0.5*(pf.domain_left_edge +
+                pf.domain_right_edge).in_units("code_length")
+        else:
+            # Our default width here is the full domain
+            width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
+            center = pf.arr([0.0, 0.0, 0.0], "code_length")
+    else:
+        raise NotImplementedError
     bounds = (center[x_dict[axis]]-width[0] / 2,
               center[x_dict[axis]]+width[0] / 2,
               center[y_dict[axis]]-width[1] / 2,


https://bitbucket.org/yt_analysis/yt/commits/a6a66c929668/
Changeset:   a6a66c929668
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 06:16:19
Summary:     This wasn't needed
Affected #:  1 file

diff -r 459ad3a80753bb3354c0602f940a1d748f9860d4 -r a6a66c929668480a93c70f02dc1ec1e259ed09bc yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -231,7 +231,6 @@
                  mask_nans = False,
                  suppress_astropy_warnings = True):
         if suppress_astropy_warnings:
-            ap.log.disable_warnings_logging()
             warnings.filterwarnings('ignore', module="astropy", append=True)
         self.filenames = [filename] + slave_files
         self.num_files = len(self.filenames)


https://bitbucket.org/yt_analysis/yt/commits/37187068e11e/
Changeset:   37187068e11e
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 06:34:19
Summary:     Fixing the way the pixel length is set up. Fixed a typo in unit_registry.
Affected #:  3 files

diff -r a6a66c929668480a93c70f02dc1ec1e259ed09bc -r 37187068e11e26c342ab883ce06f50aae7f481dd yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -287,9 +287,7 @@
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
-        self.unit_registry.add("pixel",
-                               float(self.length_unit.in_cgs().value),
-                               length)
+        self.unit_registry.modify("pixel", self.length_unit),
 
     def _parse_parameter_file(self):
         self.unique_identifier = \

diff -r a6a66c929668480a93c70f02dc1ec1e259ed09bc -r 37187068e11e26c342ab883ce06f50aae7f481dd yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -100,6 +100,7 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "beam": (1.0, dimensions.dimensionless),
+    "pixel": (1.0, dimensions.length),
 }
 
 # Add LaTeX representations for units with trivial representations.

diff -r a6a66c929668480a93c70f02dc1ec1e259ed09bc -r 37187068e11e26c342ab883ce06f50aae7f481dd yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -82,7 +82,7 @@
         """
         if symbol not in self.lut:
             raise SymbolNotFoundError(
-                "Tried to remove the symbol '%s', but it does not exist" \
+                "Tried to modify the symbol '%s', but it does not exist" \
                 "in this registry." % symbol)
 
         if hasattr(cgs_value, "in_cgs"):


https://bitbucket.org/yt_analysis/yt/commits/d51eb563aa82/
Changeset:   d51eb563aa82
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 17:56:29
Summary:     Setting a reasonable value for maximum number of grids--some decompositions don't work with the older, higher value.
Affected #:  1 file

diff -r 37187068e11e26c342ab883ce06f50aae7f481dd -r d51eb563aa82e5a87c2073d67c3d5645e3e38d37 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -334,7 +334,7 @@
         if self.nprocs is None:
             self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                     32**self.dimensionality).astype("int")
-            self.nprocs = min(self.nprocs, 2500)
+            self.nprocs = min(self.nprocs, 512)
 
         # Check to see if this data is in (RA,Dec,?) format
         self.xyv_data = False


https://bitbucket.org/yt_analysis/yt/commits/bf9d49680fe0/
Changeset:   bf9d49680fe0
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 17:56:55
Summary:     A bit more work on getting WCS coordinates on plot window plots.
Affected #:  1 file

diff -r d51eb563aa82e5a87c2073d67c3d5645e3e38d37 -r bf9d49680fe0cfcaa985b4a790ca3fd6bf1f5a2c yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -20,8 +20,27 @@
     extent = im[0].get_extent()
     ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
 
+def convert_ticks(ticks, to_hours=False):
+    deg_ticks = ticks.astype("int")
+    min_ticks = ((ticks - deg_ticks)*60.).astype("int")
+    sec_ticks = ((((ticks - deg_ticks)*60.)-min_ticks)*60.).astype("int")
+    deg_string = "d"
+    if to_hours:
+        deg_ticks = (deg_ticks*24./360.).astype("int")
+        deg_string = "h"
+    return ["%02d%s%02dm%02ds" % (dt, deg_string, mt, st)
+            for dt, mt, st in zip(deg_ticks, min_ticks, sec_ticks)]
+
 def set_onaxis_wcs(pw):
-    return
+
+    ax = pw.plots.values()[0].axes
+    xpix = ax.get_xticks()
+    ypix = ax.get_xticks()
+    ra_ticks, dummy = pw.ds.wcs_2d.wcs_pix2world(xpix, ypix[0]*np.ones((len(xpix))), 1)
+    dummy, dec_ticks = pw.ds.wcs_2d.wcs_pix2world(xpix[0]*np.ones((len(ypix))), ypix, 1)
+    if pw.ds.dimensionality == 3:
+        vlim = pw.ds.wcs_1d.wcs_pix2world([pw.xlim[0], pw.xlim[1]], 1)[0]
+
     if pw.axis == pw.ds.ra_axis:
         xname = "Dec"
         yname = pw.ds.vel_name
@@ -41,11 +60,19 @@
     for k,v in pw.plots.iteritems():
         v.axes.set_xlabel(r"%s (%s)" % (xname, xunit))
         v.axes.set_ylabel(r"%s (%s)" % (yname, yunit))
-        v.axes.set_aspect('auto')
+        if xname == "Dec":
+            v.axes.xaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
+        if yname == "Dec":
+            v.axes.yaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
+        if xname == "RA":
+            v.axes.xaxis.set_ticklabels(convert_ticks(ra_ticks, to_hours=True), size=14)
+        if yname == pw.ds.vel_name:
+            extent = (pw.xlim[0].value, pw.xlim[1].value, vlim[0], vlim[1])
+            v.image.set_extent(extent)
 
 class FITSSlicePlot(AxisAlignedSlicePlot):
 
-    def __init__(self, ds, axis, fields, set_wcs=True, **kwargs):
+    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
 
         if isinstance(axis, basestring):
             if axis in ds.axis_names:
@@ -64,13 +91,13 @@
         self._set_wcs()
         super(FITSSlicePlot, self).show()
 
-    def save(self, *args, **kwargs):
+    def save_wcs(self, *args, **kwargs):
         self._set_wcs()
         super(FITSSlicePlot, self).save(*args, **kwargs)
 
 class FITSOffAxisSlicePlot(OffAxisSlicePlot):
 
-    def __init__(self, ds, normal, fields, set_wcs=True, **kwargs):
+    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
         my_normal = normal
@@ -86,7 +113,7 @@
 
 class FITSProjectionPlot(ProjectionPlot):
 
-    def __init__(self, ds, axis, fields, set_wcs=True, **kwargs):
+    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
         if isinstance(axis, basestring):
@@ -112,7 +139,7 @@
 
 class FITSOffAxisProjectionPlot(OffAxisProjectionPlot):
 
-    def __init__(self, ds, normal, fields, set_wcs=True, **kwargs):
+    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
         my_normal = normal


https://bitbucket.org/yt_analysis/yt/commits/68ca7c6668fb/
Changeset:   68ca7c6668fb
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 19:43:36
Summary:     This shouldn't be a restriction
Affected #:  1 file

diff -r bf9d49680fe0cfcaa985b4a790ca3fd6bf1f5a2c -r 68ca7c6668fbe373f21f16c66dd59fb253ad03bd yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -100,15 +100,9 @@
     def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
-        my_normal = normal
-        if ds.xyv_data:
-            if len(normal) > 2:
-                raise NotImplementedError("Normal vector must be in two dimensions for this dataset!")
-            my_normal = np.zeros((3))
-            my_normal[ds.ra_axis] = normal[0]
-            my_normal[ds.dec_axis] = normal[1]
-
-        super(FITSOffAxisSlicePlot, self).__init__(ds, my_normal, fields, **kwargs)
+        self.set_wcs = set_wcs
+        
+        super(FITSOffAxisSlicePlot, self).__init__(ds, normal, fields, **kwargs)
         self.set_axes_unit("pixel")
 
 class FITSProjectionPlot(ProjectionPlot):
@@ -142,14 +136,7 @@
     def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
-        my_normal = normal
-        if ds.xyv_data:
-            if len(normal) > 2:
-                raise ValueError("Normal vector must be in two dimensions for this dataset!")
-            my_normal = np.zeros((3))
-            my_normal[ds.ra_axis] = normal[0]
-            my_normal[ds.dec_axis] = normal[1]
+        self.set_wcs = set_wcs
+        super(FITSOffAxisProjectionPlot, self).__init__(ds, normal, fields, axes_unit="pixel", **kwargs)
 
-        super(FITSOffAxisProjectionPlot, self).__init__(ds, my_normal, fields, axes_unit="pixel", **kwargs)
 
-


https://bitbucket.org/yt_analysis/yt/commits/57d2946c6dbe/
Changeset:   57d2946c6dbe
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 19:44:29
Summary:     A more robust way to determine unit names. May not handle names with prefixes very well.
Affected #:  1 file

diff -r 68ca7c6668fbe373f21f16c66dd59fb253ad03bd -r 57d2946c6dbe72053e28977cf1173b9f04443ce0 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -16,6 +16,7 @@
 import numpy.core.defchararray as np_char
 import weakref
 import warnings
+import re
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -33,6 +34,7 @@
 from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
     decompose_array, get_psize, decompose_array_nocopy
+from yt.units.unit_lookup_table import default_unit_symbol_lut, prefixable_units
 
 class astropy_imports:
     _pyfits = None
@@ -74,10 +76,12 @@
 
 ap = astropy_imports()
 
-known_units = {"k":"K",
-               "jy":"Jy"}
+known_units = dict([(unit.lower(),unit) for unit in default_unit_symbol_lut])
+axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
 
-axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
+delimiters = ["*", "/", "-", "^"]
+delimiters += [str(i) for i in xrange(10)]
+regex_pattern = '|'.join(map(re.escape, delimiters))
 
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
@@ -109,15 +113,16 @@
     def _initialize_data_storage(self):
         pass
 
-    def _detect_image_units(self, fname, header):
+    def _determine_image_units(self, fname, header):
         try:
             field_units = header["bunit"].lower().strip(" ")
             # FITS units always return upper-case, so we need to get
             # the right case by comparing against known units. This
             # only really works for common units.
-            for name in known_units:
-                if field_units.find(name) > -1:
-                    field_units = field_units.replace(name, known_units[name])
+            units = re.split(regex_pattern, field_units)
+            for unit in units:
+                if unit in known_units:
+                    field_units = field_units.replace(unit, known_units[unit])
             self.parameter_file.field_units[fname] = field_units
         except:
             pass
@@ -154,7 +159,7 @@
                         self._ext_map[fname] = j
                         self.field_list.append((self.dataset_type, fname))
                         mylog.info("Adding field %s to the list of fields." % (fname))
-                        self._detect_image_units(fname, h.header)
+                        self._determine_image_units(fname, h.header)
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs


https://bitbucket.org/yt_analysis/yt/commits/764d4582c715/
Changeset:   764d4582c715
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-10 06:07:03
Summary:     Really hacky way to think of lines as different offsets into the primary field. Needs cleanup and an API. This is just proof of concept (it totally works!).
Affected #:  2 files

diff -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 -r 764d4582c715f6be4c96c23f9efc4fb68841239a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -154,6 +154,12 @@
                     fname = h.name.lower()
                     self.field_list.append((self.dataset_type, fname))
                     self._detect_image_units(fname, h.header)
+        self.field_list.append((self.dataset_type, "CO"))
+        self.field_list.append((self.dataset_type, "HCN"))
+        self._field_map["CO"] = 0
+        self._field_map["HCN"] = 0
+        for line in ["CO", "HCN"]:
+            self.parameter_file.field_units[line] = "Jy/beam"
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
@@ -224,7 +230,13 @@
     def __init__(self, filename, dataset_type='fits',
                  nprocs = None,
                  storage_filename = None,
-                 mask_nans = False):
+                 mask_nans = False,
+                 folded_axis=None,
+                 folded_width=None
+                 ):
+        self.folded_axis = folded_axis
+        self.folded_width = folded_width
+        self.folded_offsets = {}
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
@@ -342,16 +354,23 @@
 
     def __init__(self, filename,
                  dataset_type='xyv_fits',
-                 nprocs = None,
-                 storage_filename = None,
-                 mask_nans = False):
+                 nprocs=None,
+                 storage_filename=None,
+                 mask_nans=False,
+                 folded_axis=None,
+                 folded_width=None
+                 ):
 
         self.fluid_types += ("xyv_fits",)
 
         super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
                                              nprocs=nprocs,
                                              storage_filename=storage_filename,
-                                             mask_nans=mask_nans)
+                                             mask_nans=mask_nans,
+                                             folded_axis=folded_axis,
+                                             folded_width=folded_width)
+        self.folded_axis = folded_axis
+        self.folded_width = folded_width
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
         self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
@@ -387,6 +406,13 @@
             self.domain_left_edge = self.domain_left_edge[:3]
             self.domain_right_edge = self.domain_right_edge[:3]
 
+        if self.folded_axis is not None:
+            ax = self.folded_axis
+            ratio = self.folded_width/self.domain_dimensions[ax]
+            self.domain_dimensions[ax] = int(self.folded_width)
+            self.domain_left_edge[ax] = -self.folded_width/2.
+            self.domain_right_edge[ax] = self.folded_width/2.
+
         if self.nprocs is None:
             self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                     32**self.dimensionality).astype("int")

diff -r 8e57daf2d5958dec16d2c3d02fa98defebc37da6 -r 764d4582c715f6be4c96c23f9efc4fb68841239a yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,7 +26,11 @@
         super(IOHandlerFITS, self).__init__(pf)
         self.pf = pf
         self._handle = pf._handle
-        
+        self.folded = False
+        if self.pf.folded_axis is not None:
+            self.folded = True
+        self.pixel_offset = 0
+
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
         pass
@@ -49,8 +53,18 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
-                    end = (g.RightEdge.ndarray_view()-0.5).astype("int")
+                    centering = np.array([0.5]*3)
+                    if self.folded:
+                        centering[-1] = 0.0
+                    start = (g.LeftEdge.ndarray_view()-centering).astype("int")
+                    end = (g.RightEdge.ndarray_view()-centering).astype("int")
+                    if self.folded:
+                        my_off = self.pf.folded_offsets.get(fname, 0)\
+                            + self.pf.folded_width/2
+                        print "My offset1: ", my_off
+                        start[-1] += my_off
+                        end[-1] += my_off
+                        mylog.debug("Reading from " + str(start) + str(end))
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -84,14 +98,25 @@
         for field in fields:
             ftype, fname = field
             if self.pf.four_dims:
-                ds = f[fname.split("_")[0]]
+                #ds = f[fname.split("_")[0]]
+                ds = f['primary']
             else:
-                ds = f[fname]
+                ds = f['primary']
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
-                    end = (g.RightEdge.ndarray_view()-0.5).astype("int")
+                    centering = np.array([0.5]*3)
+                    if self.folded:
+                        centering[-1] = 0.0
+                    start = (g.LeftEdge.ndarray_view()-centering).astype("int")
+                    end = (g.RightEdge.ndarray_view()-centering).astype("int")
+                    if self.folded:
+                        my_off = self.pf.folded_offsets.get(fname, 0)\
+                            + self.pf.folded_width/2
+                        print "My offset2: ", my_off
+                        start[-1] += my_off
+                        end[-1] += my_off
+                        mylog.debug("Reading from " + str(start) + str(end))
                     if self.pf.four_dims:
                         idx = self.pf.index._field_map[fname]
                         data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()


https://bitbucket.org/yt_analysis/yt/commits/21ff66175bc6/
Changeset:   21ff66175bc6
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-10 18:30:12
Summary:     Specify fields in a line_database.
Affected #:  2 files

diff -r 764d4582c715f6be4c96c23f9efc4fb68841239a -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -154,12 +154,12 @@
                     fname = h.name.lower()
                     self.field_list.append((self.dataset_type, fname))
                     self._detect_image_units(fname, h.header)
-        self.field_list.append((self.dataset_type, "CO"))
-        self.field_list.append((self.dataset_type, "HCN"))
-        self._field_map["CO"] = 0
-        self._field_map["HCN"] = 0
-        for line in ["CO", "HCN"]:
-            self.parameter_file.field_units[line] = "Jy/beam"
+        line_db = self.parameter_file.line_database
+        for k, v in line_db.iteritems():
+            print "Adding line: ", k, v
+            self.field_list.append((self.dataset_type, k))
+            self._field_map[k] = 0
+            self.parameter_file.field_units[k] = "Jy/beam"
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
@@ -232,11 +232,14 @@
                  storage_filename = None,
                  mask_nans = False,
                  folded_axis=None,
-                 folded_width=None
+                 folded_width=None,
+                 line_database=None
                  ):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
-        self.folded_offsets = {}
+        if line_database is None:
+            line_database = {}
+        self.line_database = line_database
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
@@ -358,7 +361,8 @@
                  storage_filename=None,
                  mask_nans=False,
                  folded_axis=None,
-                 folded_width=None
+                 folded_width=None,
+                 line_database=None
                  ):
 
         self.fluid_types += ("xyv_fits",)
@@ -368,9 +372,8 @@
                                              storage_filename=storage_filename,
                                              mask_nans=mask_nans,
                                              folded_axis=folded_axis,
-                                             folded_width=folded_width)
-        self.folded_axis = folded_axis
-        self.folded_width = folded_width
+                                             folded_width=folded_width,
+                                             line_database=line_database)
         self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
         self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
         self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]

diff -r 764d4582c715f6be4c96c23f9efc4fb68841239a -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -29,7 +29,6 @@
         self.folded = False
         if self.pf.folded_axis is not None:
             self.folded = True
-        self.pixel_offset = 0
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -59,9 +58,9 @@
                     start = (g.LeftEdge.ndarray_view()-centering).astype("int")
                     end = (g.RightEdge.ndarray_view()-centering).astype("int")
                     if self.folded:
-                        my_off = self.pf.folded_offsets.get(fname, 0)\
+                        my_off = self.pf.line_database.get(fname, 0)\
                             + self.pf.folded_width/2
-                        print "My offset1: ", my_off
+
                         start[-1] += my_off
                         end[-1] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))
@@ -111,9 +110,8 @@
                     start = (g.LeftEdge.ndarray_view()-centering).astype("int")
                     end = (g.RightEdge.ndarray_view()-centering).astype("int")
                     if self.folded:
-                        my_off = self.pf.folded_offsets.get(fname, 0)\
+                        my_off = self.pf.line_database.get(fname, 0)\
                             + self.pf.folded_width/2
-                        print "My offset2: ", my_off
                         start[-1] += my_off
                         end[-1] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))


https://bitbucket.org/yt_analysis/yt/commits/9a3d686de3d4/
Changeset:   9a3d686de3d4
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-10 19:58:03
Summary:     Merging in line folding
Affected #:  17 files

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -50,6 +50,8 @@
     PolarCoordinateHandler
 from yt.geometry.cylindrical_coordinates import \
     CylindricalCoordinateHandler
+from yt.geometry.spherical_coordinates import \
+    SphericalCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -365,6 +367,8 @@
             self.coordinates = CylindricalCoordinateHandler(self)
         elif self.geometry == "polar":
             self.coordinates = PolarCoordinateHandler(self)
+        elif self.geometry == "spherical":
+            self.coordinates = SphericalCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -13,13 +13,16 @@
 from .data_structures import \
       FITSGrid, \
       FITSHierarchy, \
-      FITSDataset, \
-      FITSXYVDataset
+      FITSDataset
 
 from .fields import \
-      FITSFieldInfo, \
-      FITSXYVFieldInfo
+      FITSFieldInfo
 
 from .io import \
-      IOHandlerFITS, \
-      IOHandlerFITSXYV
+      IOHandlerFITS
+
+from .misc import \
+      FITSOffAxisSlicePlot, \
+      FITSSlicePlot, \
+      FITSProjectionPlot, \
+      FITSOffAxisProjectionPlot

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -17,7 +17,6 @@
 import weakref
 import warnings
 
-from yt.config import ytcfg
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -28,10 +27,10 @@
 from yt.data_objects.static_output import \
     Dataset
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
-from .fields import FITSFieldInfo, FITSXYVFieldInfo
+from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
     decompose_array, get_psize, decompose_array_nocopy
 
@@ -75,27 +74,10 @@
 
 ap = astropy_imports()
 
-angle_units = ["deg","arcsec","arcmin","mas"]
-all_units = angle_units + mpc_conversion.keys()
-
 known_units = {"k":"K",
                "jy":"Jy"}
 
-def fits_file_validator(ds, *args, **kwargs):
-    ext = args[0].rsplit(".", 1)[-1]
-    if ext.upper() == "GZ":
-        # We don't know for sure that there will be > 1
-        ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
-    if ext.upper() not in ("FITS", "FTS"):
-        return False
-    try:
-        with warnings.catch_warnings():
-            warnings.filterwarnings('ignore', category=UserWarning, append=True)
-            fileh = ap.pyfits.open(args[0])
-        if ds._check_axes(fileh): return True
-    except:
-        pass
-    return False
+axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
 
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
@@ -131,7 +113,8 @@
         try:
             field_units = header["bunit"].lower().strip(" ")
             # FITS units always return upper-case, so we need to get
-            # the right case by comparing against known units
+            # the right case by comparing against known units. This
+            # only really works for common units.
             for name in known_units:
                 if field_units.find(name) > -1:
                     field_units = field_units.replace(name, known_units[name])
@@ -141,24 +124,46 @@
 
     def _detect_output_fields(self):
         self.field_list = []
-        self._field_map = {}
-        for h in self._handle[self.parameter_file.first_image:]:
-            if h.header["naxis"] >= 2:
-                if self.parameter_file.four_dims:
-                    for idx in range(h.header["naxis4"]):
-                        fname = h.name.lower()+"_%d" % (idx)
-                        self._field_map[fname] = idx
+        self._axis_map = {}
+        self._file_map = {}
+        self._ext_map = {}
+        # We create a field from each slice on the 4th axis
+        if self.parameter_file.naxis == 4:
+            naxis4 = self.parameter_file.primary_header["naxis4"]
+        else:
+            naxis4 = 1
+        for i, fits_file in enumerate(self.parameter_file._fits_files):
+            for j, h in enumerate(fits_file):
+                if self.parameter_file.naxis >= 2:
+                    try:
+                        fname = h.header["btype"].lower()
+                    except:
+                        fname = h.name.lower()
+                    for k in xrange(naxis4):
+                        if naxis4 > 1:
+                            fname += "_%s_%d" % (h.header["CTYPE4"], k+1)
+                        if self.pf.num_files > 1:
+                            try:
+                                fname += "_%5.3fGHz" % (h.header["restfreq"]/1.0e9)
+                            except:
+                                fname += "_%5.3fGHz" % (h.header["restfrq"]/1.0e9)
+                            else:
+                                fname += "_field_%d" % (i)
+                        self._axis_map[fname] = k
+                        self._file_map[fname] = fits_file
+                        self._ext_map[fname] = j
                         self.field_list.append((self.dataset_type, fname))
+                        mylog.info("Adding field %s to the list of fields." % (fname))
                         self._detect_image_units(fname, h.header)
-                else:
-                    fname = h.name.lower()
-                    self.field_list.append((self.dataset_type, fname))
-                    self._detect_image_units(fname, h.header)
+
         line_db = self.parameter_file.line_database
+        primary_fname = self.field_list[0][1]
         for k, v in line_db.iteritems():
             print "Adding line: ", k, v
             self.field_list.append((self.dataset_type, k))
-            self._field_map[k] = 0
+            self._ext_map[k] = self._ext_map[primary_fname]
+            self._axis_map[k] = self._axis_map[primary_fname]
+            self._file_map[k] = self._file_map[primary_fname]
             self.parameter_file.field_units[k] = "Jy/beam"
 
     def _count_grids(self):
@@ -168,6 +173,7 @@
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
 
+        # If nprocs > 1, decompose the domain into virtual grids
         if pf.nprocs > 1:
             bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
                                                        pf.domain_right_edge)])
@@ -227,35 +233,50 @@
     _dataset_type = "fits"
     _handle = None
 
-    def __init__(self, filename, dataset_type='fits',
+    def __init__(self, filename,
+                 dataset_type = 'fits',
+                 slave_files = [],
                  nprocs = None,
                  storage_filename = None,
                  mask_nans = False,
                  folded_axis=None,
                  folded_width=None,
-                 line_database=None
+                 line_database=None,
+                 suppress_astropy_warnings = True
                  ):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
         if line_database is None:
             line_database = {}
         self.line_database = line_database
+
+        if suppress_astropy_warnings:
+            warnings.filterwarnings('ignore', module="astropy", append=True)
+        self.filenames = [filename] + slave_files
+        self.num_files = len(self.filenames)
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(filename, memmap=True, do_not_scale_image_data=True)
-        for i, h in enumerate(self._handle):
-            if h.header["naxis"] >= 2:
-                self.first_image = i
-                break
-
+        self._handle = ap.pyfits.open(self.filenames[0],
+                                      memmap=True,
+                                      do_not_scale_image_data=True)
+        self._fits_files = [self._handle]
+        if self.num_files > 1:
+            for fits_file in slave_files:
+                self._fits_files.append(ap.pyfits.open(fits_file,
+                                                       memmap=True,
+                                                       do_not_scale_image_data=True))
+        self.first_image = 0 # Assumed for now
         self.primary_header = self._handle[self.first_image].header
         self.shape = self._handle[self.first_image].shape
         self.wcs = ap.pywcs.WCS(header=self.primary_header)
-
+        self.axis_names = {}
+        self.naxis = self.primary_header["naxis"]
+        for i, ax in enumerate("xyz"[:self.naxis]):
+            self.axis_names[self.primary_header["CTYPE%d" % (i+1)]] = ax
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
-            if unit in all_units:
+            if unit in mpc_conversion.keys():
                 self.file_unit = unit.name
                 idx = i
                 break
@@ -266,7 +287,6 @@
             self.pixel_scale = self.wcs.wcs.cdelt[idx]
 
         self.refine_by = 2
-        self.four_dims = False
 
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
@@ -275,6 +295,7 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
+        from yt.units.dimensions import length
         if self.new_unit is not None:
             length_factor = self.pixel_scale
             length_unit = str(self.new_unit)
@@ -286,6 +307,7 @@
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
+        self.unit_registry.modify("pixel", self.length_unit),
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -298,7 +320,11 @@
         self.dimensionality = self.primary_header["naxis"]
         self.geometry = "cartesian"
 
-        dims = self._handle[self.first_image].shape[::-1]
+        # Sometimes a FITS file has a 4D datacube, in which case
+        # we take the 4th axis and assume it consists of different fields.
+        if self.dimensionality == 4: self.dimensionality = 3
+
+        dims = self._handle[self.first_image].shape[::-1][:self.dimensionality]
         self.domain_dimensions = np.array(dims)
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
@@ -307,6 +333,11 @@
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
+        if self.folded_axis is not None:
+            self.domain_left_edge[self.folded_axis] = -self.folded_width/2.
+            self.domain_right_edge[self.folded_axis] = self.folded_width/2.
+            self.domain_dimensions[self.folded_axis] = int(self.folded_width)
+
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5
@@ -324,118 +355,79 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
+        # If nprocs is None, do some automatic decomposition of the domain
         if self.nprocs is None:
             self.nprocs = np.around(np.prod(self.domain_dimensions) /
                                     32**self.dimensionality).astype("int")
-            self.nprocs = min(self.nprocs, 2500)
+            self.nprocs = min(self.nprocs, 512)
+
+        # Check to see if this data is in (RA,Dec,?) format
+        self.xyv_data = False
+        x = np.zeros((self.dimensionality), dtype="bool")
+        for ap in axes_prefixes:
+            x += np_char.startswith(self.axis_names.keys()[:self.dimensionality], ap)
+        if x.sum() == self.dimensionality: self._setup_xyv()
+
+    def _setup_xyv(self):
+
+        self.xyv_data = True
+
+        end = min(self.dimensionality+1,4)
+        ctypes = np.array([self.primary_header["CTYPE%d" % (i)] for i in xrange(1,end)])
+        self.ra_axis = np.where(np_char.startswith(ctypes, "RA"))[0][0]
+        self.dec_axis = np.where(np_char.startswith(ctypes, "DEC"))[0][0]
+
+        if self.wcs.naxis > 2:
+
+            self.vel_axis = np_char.startswith(ctypes, "V")
+            self.vel_axis += np_char.startswith(ctypes, "FREQ")
+            self.vel_axis += np_char.startswith(ctypes, "ENER")
+            self.vel_axis = np.where(self.vel_axis)[0][0]
+            self.vel_name = ctypes[self.vel_axis].lower()
+
+            self.wcs_2d = ap.pywcs.WCS(naxis=2)
+            self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.ra_axis, self.dec_axis]]
+            self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
+                                     str(self.wcs.wcs.cunit[self.dec_axis])]
+            self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
+                                     self.wcs.wcs.ctype[self.dec_axis]]
+
+            self.wcs_1d = ap.pywcs.WCS(naxis=1)
+            self.wcs_1d.wcs.crpix = [self.wcs.wcs.crpix[self.vel_axis]]
+            self.wcs_1d.wcs.cdelt = [self.wcs.wcs.cdelt[self.vel_axis]]
+            self.wcs_1d.wcs.crval = [self.wcs.wcs.crval[self.vel_axis]]
+            self.wcs_1d.wcs.cunit = [str(self.wcs.wcs.cunit[self.vel_axis])]
+            self.wcs_1d.wcs.ctype = [self.wcs.wcs.ctype[self.vel_axis]]
+
+        else:
+
+            self.wcs_2d = self.wcs
+            self.wcs_1d = None
+            self.vel_axis = 2
+            self.vel_name = "z"
 
     def __del__(self):
+        for file in self._fits_files:
+            file.close()
         self._handle.close()
 
     @classmethod
-    def _check_axes(cls, handle):
-        for h in handle:
-            if h.header["naxis"] >= 2:
-                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                a = np_char.startswith(axes_names, "RA")
-                b = np_char.startswith(axes_names, "DEC")
-                c = np_char.startswith(axes_names, "VEL")
-                d = np_char.startswith(axes_names, "FREQ")
-                e = np_char.startswith(axes_names, "ENER")
-                if (a+b+c+d+e).sum() != 3:
-                    handle.close()
-                    return True
+    def _is_valid(cls, *args, **kwargs):
+        ext = args[0].rsplit(".", 1)[-1]
+        if ext.upper() == "GZ":
+            # We don't know for sure that there will be > 1
+            ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+        if ext.upper() not in ("FITS", "FTS"):
+            return False
+        try:
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', category=UserWarning, append=True)
+                fileh = ap.pyfits.open(args[0])
+            valid = fileh[0].header["naxis"] >= 2
+            fileh.close()
+            return valid
+        except:
+            pass
         return False
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        return fits_file_validator(cls, *args, **kwargs)
-
-class FITSXYVDataset(FITSDataset):
-    _dataset_type = "xyv_fits"
-    _field_info_class = FITSXYVFieldInfo
-
-    def __init__(self, filename,
-                 dataset_type='xyv_fits',
-                 nprocs=None,
-                 storage_filename=None,
-                 mask_nans=False,
-                 folded_axis=None,
-                 folded_width=None,
-                 line_database=None
-                 ):
-
-        self.fluid_types += ("xyv_fits",)
-
-        super(FITSXYVDataset, self).__init__(filename, dataset_type=dataset_type,
-                                             nprocs=nprocs,
-                                             storage_filename=storage_filename,
-                                             mask_nans=mask_nans,
-                                             folded_axis=folded_axis,
-                                             folded_width=folded_width,
-                                             line_database=line_database)
-        self.axes_names = [self.primary_header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-        self.ra_axis = np.where(np_char.startswith(self.axes_names, "RA"))[0][0]
-        self.dec_axis = np.where(np_char.startswith(self.axes_names, "DEC"))[0][0]
-        self.vel_axis = np_char.startswith(self.axes_names, "VEL")
-        self.vel_axis += np_char.startswith(self.axes_names, "FREQ")
-        self.vel_axis += np_char.startswith(self.axes_names, "ENER")
-        self.vel_axis = np.where(self.vel_axis)[0][0]
-
-        self.wcs_2d = ap.pywcs.WCS(naxis=2)
-        self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.ra_axis, self.dec_axis]]
-        self.wcs_2d.wcs.cunit = [str(self.wcs.wcs.cunit[self.ra_axis]),
-                                 str(self.wcs.wcs.cunit[self.dec_axis])]
-        self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.ra_axis],
-                                 self.wcs.wcs.ctype[self.dec_axis]]
-
-        self.wcs_1d = ap.pywcs.WCS(naxis=1)
-        self.wcs_1d.wcs.crpix = [self.wcs.wcs.crpix[self.vel_axis]]
-        self.wcs_1d.wcs.cdelt = [self.wcs.wcs.cdelt[self.vel_axis]]
-        self.wcs_1d.wcs.crval = [self.wcs.wcs.crval[self.vel_axis]]
-        self.wcs_1d.wcs.cunit = [str(self.wcs.wcs.cunit[self.vel_axis])]
-        self.wcs_1d.wcs.ctype = [self.wcs.wcs.ctype[self.vel_axis]]
-
-    def _parse_parameter_file(self):
-
-        super(FITSXYVDataset, self)._parse_parameter_file()
-
-        if self.dimensionality == 4:
-            self.dimensionality = 3
-            self.four_dims = True
-            self.domain_dimensions = self.domain_dimensions[:3]
-            self.domain_left_edge = self.domain_left_edge[:3]
-            self.domain_right_edge = self.domain_right_edge[:3]
-
-        if self.folded_axis is not None:
-            ax = self.folded_axis
-            ratio = self.folded_width/self.domain_dimensions[ax]
-            self.domain_dimensions[ax] = int(self.folded_width)
-            self.domain_left_edge[ax] = -self.folded_width/2.
-            self.domain_right_edge[ax] = self.folded_width/2.
-
-        if self.nprocs is None:
-            self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                    32**self.dimensionality).astype("int")
-            self.nprocs = min(self.nprocs, 2500)
-
-    @classmethod
-    def _check_axes(cls, handle):
-        for h in handle:
-            if h.header["naxis"] >= 3:
-                axes_names = [h.header["CTYPE%d" % (ax)] for ax in xrange(1,4)]
-                a = np_char.startswith(axes_names, "RA")
-                b = np_char.startswith(axes_names, "DEC")
-                c = np_char.startswith(axes_names, "VEL")
-                d = np_char.startswith(axes_names, "FREQ")
-                e = np_char.startswith(axes_names, "ENER")
-                if (a+b+c+d+e).sum() == 3:
-                    handle.close()
-                    return True
-        return False
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        return fits_file_validator(cls, *args, **kwargs)

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -17,18 +17,55 @@
 
 class FITSFieldInfo(FieldInfoContainer):
     known_other_fields = ()
+
+    def __init__(self, pf, field_list, slice_info=None):
+        super(FITSFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
+        for field in pf.field_list:
+            self[field].take_log = False
+
     def _get_wcs(self, data, axis):
-        if data.pf.dimensionality == 2:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"], 1)
-        else:
-            w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
-                                                 data["z"], 1)
+        w_coords = data.pf.wcs.wcs_pix2world(data["x"], data["y"],
+                                             data["z"], 1)
         return w_coords[axis]
+
+    def _get_2d_wcs(self, data, axis):
+        w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
+        return w_coords[axis]
+
+    def _vel_los(field, data):
+        return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
+                           str(data.pf.wcs_1d.wcs.cunit[0]))
+
+    def _setup_xyv_fields(self):
+        def world_f(axis, unit):
+            def _world_f(field, data):
+                return data.pf.arr(self._get_2d_wcs(data, axis), unit)
+            return _world_f
+        for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
+            name = ["ra","dec"][i]
+            unit = str(self.pf.wcs_2d.wcs.cunit[i])
+            if unit.lower() == "deg": unit = "degree"
+            if unit.lower() == "rad": unit = "radian"
+            self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
+        def _vel_los(field, data):
+            return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
+                               str(data.pf.wcs_1d.wcs.cunit[0]))
+        if self.pf.dimensionality == 3:
+            unit = str(self.pf.wcs_1d.wcs.cunit[0])
+            self.add_field(("fits",self.pf.vel_name),
+                           function=_vel_los, units=unit)
+
     def setup_fluid_fields(self):
+
+        if self.pf.xyv_data:
+            self._setup_xyv_fields()
+            return
+
         def world_f(axis, unit):
             def _world_f(field, data):
                 return data.pf.arr(self._get_wcs(data, axis), unit)
             return _world_f
+
         for i in range(self.pf.dimensionality):
             if self.pf.wcs.wcs.cname[i] == '':
                 name = str(self.pf.wcs.wcs.ctype[i])
@@ -39,36 +76,3 @@
                 if unit.lower() == "deg": unit = "degree"
                 if unit.lower() == "rad": unit = "radian"
                 self.add_field(("fits",name), function=world_f(i, unit), units=unit)
-
-class FITSXYVFieldInfo(FieldInfoContainer):
-    known_other_fields = ()
-
-    def __init__(self, pf, field_list, slice_info=None):
-        super(FITSXYVFieldInfo, self).__init__(pf, field_list, slice_info=slice_info)
-        for field in pf.field_list:
-            self[field].take_log = False
-            
-    def _get_wcs(self, data, axis):
-        w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
-        return w_coords[axis]
-    def setup_fluid_fields(self):
-        def world_f(axis, unit):
-            def _world_f(field, data):
-                return data.pf.arr(self._get_wcs(data, axis), unit)
-            return _world_f
-        for i, axis in enumerate([self.pf.ra_axis, self.pf.dec_axis]):
-            name = str(self.pf.wcs_2d.wcs.ctype[i])
-            unit = str(self.pf.wcs_2d.wcs.cunit[i])
-            if unit.lower() == "deg": unit = "degree"
-            if unit.lower() == "rad": unit = "radian"
-            self.add_field(("xyv_fits",name), function=world_f(axis, unit), units=unit)
-        def _vel_los(field, data):
-            return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
-                               str(data.pf.wcs_1d.wcs.cunit[0]))
-        name = str(self.pf.wcs_1d.wcs.ctype[0])
-        unit = str(self.pf.wcs_1d.wcs.cunit[0])
-        self.add_field(("xyv_fits",name),
-                       function=_vel_los,
-                       units=unit)
-
-

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -12,8 +12,6 @@
 
 import numpy as np
 
-from yt.utilities.math_utils import prec_accum
-
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
@@ -38,7 +36,6 @@
         chunks = list(chunks)
         if any((ftype != "fits" for ftype, fname in fields)):
             raise NotImplementedError
-        f = self._handle
         rv = {}
         dt = "float64"
         for field in fields:
@@ -48,7 +45,12 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            ds = f[fname]
+            tmp_fname = fname
+            if fname in self.pf.line_database:
+                fname = self.pf.field_list[0][1]
+            f = self.pf.index._file_map[fname]
+            ds = f[self.pf.index._ext_map[fname]]
+            fname = tmp_fname
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
@@ -69,54 +71,8 @@
                         nz = 1
                         data = np.zeros((nx,ny,nz))
                         data[:,:,0] = ds.data[start[1]:end[1],start[0]:end[0]].transpose()
-                    elif self.pf.dimensionality == 3:
-                        data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
-                    if self.pf.mask_nans: data[np.isnan(data)] = 0.0
-                    ind += g.select(selector, data.astype("float64"), rv[field], ind)
-        return rv
-
-class IOHandlerFITSXYV(IOHandlerFITS):
-    _particle_reader = False
-    _dataset_type = "xyv_fits"
-
-    def __init__(self, pf):
-        super(IOHandlerFITSXYV,self).__init__(pf)
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        chunks = list(chunks)
-        if any((ftype != "xyv_fits" for ftype, fname in fields)):
-            raise NotImplementedError
-        f = self._handle
-        rv = {}
-        dt = "float64"
-        for field in fields:
-            rv[field] = np.empty(size, dtype=dt)
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s grids",
-                    size, [f2 for f1, f2 in fields], ng)
-        for field in fields:
-            ftype, fname = field
-            if self.pf.four_dims:
-                #ds = f[fname.split("_")[0]]
-                ds = f['primary']
-            else:
-                ds = f['primary']
-            ind = 0
-            for chunk in chunks:
-                for g in chunk.objs:
-                    centering = np.array([0.5]*3)
-                    if self.folded:
-                        centering[-1] = 0.0
-                    start = (g.LeftEdge.ndarray_view()-centering).astype("int")
-                    end = (g.RightEdge.ndarray_view()-centering).astype("int")
-                    if self.folded:
-                        my_off = self.pf.line_database.get(fname, 0)\
-                            + self.pf.folded_width/2
-                        start[-1] += my_off
-                        end[-1] += my_off
-                        mylog.debug("Reading from " + str(start) + str(end))
-                    if self.pf.four_dims:
-                        idx = self.pf.index._field_map[fname]
+                    elif self.pf.naxis == 4:
+                        idx = self.pf.index._axis_map[fname]
                         data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     else:
                         data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -10,115 +10,146 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import __builtin__
-import aplpy
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.funcs import fix_axis, ensure_list
-import astropy.wcs as pywcs
-from yt.utilities.exceptions import \
-    YTNotInsideNotebook
-import matplotlib.pyplot as plt
+import numpy as np
+from yt.funcs import fix_axis, ensure_list, iterable
+from yt.visualization.plot_window import AxisAlignedSlicePlot, \
+    OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
 
-axis_wcs = [[1,2],[0,2],[0,1]]
+def force_aspect(ax,aspect=1):
+    im = ax.get_images()
+    extent = im[0].get_extent()
+    ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
 
-plot_method_list = ["recenter","refresh","add_colorbar",
-                    "remove_colorbar"]
+def convert_ticks(ticks, to_hours=False):
+    deg_ticks = ticks.astype("int")
+    min_ticks = ((ticks - deg_ticks)*60.).astype("int")
+    sec_ticks = ((((ticks - deg_ticks)*60.)-min_ticks)*60.).astype("int")
+    deg_string = "d"
+    if to_hours:
+        deg_ticks = (deg_ticks*24./360.).astype("int")
+        deg_string = "h"
+    return ["%02d%s%02dm%02ds" % (dt, deg_string, mt, st)
+            for dt, mt, st in zip(deg_ticks, min_ticks, sec_ticks)]
 
-def plot_method(method, plots):
-    def _method(*args, **kwargs):
-        for plot in plots.values():
-            getattr(plot, method)(*args, **kwargs)
-        return
-    return _method
+def set_onaxis_wcs(pw):
 
-class FITSPlot(object):
-    def __init__(self, ds, data, axis, fields, **kwargs):
+    ax = pw.plots.values()[0].axes
+    xpix = ax.get_xticks()
+    ypix = ax.get_xticks()
+    ra_ticks, dummy = pw.ds.wcs_2d.wcs_pix2world(xpix, ypix[0]*np.ones((len(xpix))), 1)
+    dummy, dec_ticks = pw.ds.wcs_2d.wcs_pix2world(xpix[0]*np.ones((len(ypix))), ypix, 1)
+    if pw.ds.dimensionality == 3:
+        vlim = pw.ds.wcs_1d.wcs_pix2world([pw.xlim[0], pw.xlim[1]], 1)[0]
+
+    if pw.axis == pw.ds.ra_axis:
+        xname = "Dec"
+        yname = pw.ds.vel_name
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[1])
+        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
+    elif pw.axis == pw.ds.dec_axis:
+        xname = "RA"
+        yname = pw.ds.vel_name
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
+        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
+    elif pw.axis == pw.ds.vel_axis:
+        xname = "RA"
+        yname = "Dec"
+        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
+        yunit = str(pw.ds.wcs_2d.wcs.cunit[1])
+
+    for k,v in pw.plots.iteritems():
+        v.axes.set_xlabel(r"%s (%s)" % (xname, xunit))
+        v.axes.set_ylabel(r"%s (%s)" % (yname, yunit))
+        if xname == "Dec":
+            v.axes.xaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
+        if yname == "Dec":
+            v.axes.yaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
+        if xname == "RA":
+            v.axes.xaxis.set_ticklabels(convert_ticks(ra_ticks, to_hours=True), size=14)
+        if yname == pw.ds.vel_name:
+            extent = (pw.xlim[0].value, pw.xlim[1].value, vlim[0], vlim[1])
+            v.image.set_extent(extent)
+
+class FITSSlicePlot(AxisAlignedSlicePlot):
+
+    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
+
+        if isinstance(axis, basestring):
+            if axis in ds.axis_names:
+                axis = ds.axis_names[axis]
+        self.axis = fix_axis(axis)
         self.ds = ds
-        self.fields = fields
-        self.plots = {}
-        w = pywcs.WCS(naxis=2)
-        w.wcs.crpix = self.ds.wcs.wcs.crpix[axis_wcs[axis]]
-        w.wcs.cdelt = self.ds.wcs.wcs.cdelt[axis_wcs[axis]]
-        w.wcs.crval = self.ds.wcs.wcs.crval[axis_wcs[axis]]
-        w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
-        w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
-        self.buffer = FITSImageBuffer(data, fields=fields, wcs=w)
-        for field in self.fields:
-            self.plots[field] = aplpy.FITSFigure(self.buffer[field], **kwargs)
-            self.plots[field].set_auto_refresh(False)
-        self._setup_plot_methods()
-        self.set_font(family="serif", size=15)
-        for v in self.values():
-            v.show_colorscale()
-        plt.close("all")
+        self.set_wcs = set_wcs
+        super(FITSSlicePlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
+        self.set_axes_unit("pixel")
 
-    def _setup_plot_methods(self):
-        for method in plot_method_list:
-            self.__dict__[method] = plot_method(method, self.plots)
-
-    def __getitem__(self, key):
-        return self.plots[key]
-
-    def keys(self):
-        return self.plots.keys()
-
-    def values(self):
-        return self.plots.values()
-
-    def items(self):
-        return self.plots.items()
-
-    def set_font(self, **kwargs):
-        for plot in self.keys():
-            self[plot].axis_labels.set_font(**kwargs)
-            self[plot].tick_labels.set_font(**kwargs)
+    def _set_wcs(self):
+        if self.set_wcs:
+            set_onaxis_wcs(self)
 
     def show(self):
-        r"""This will send any existing plots to the IPython notebook.
-        function name.
+        self._set_wcs()
+        super(FITSSlicePlot, self).show()
 
-        If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send any existing plots to the
-        notebook for display.
+    def save_wcs(self, *args, **kwargs):
+        self._set_wcs()
+        super(FITSSlicePlot, self).save(*args, **kwargs)
 
-        If yt can't determine if it's inside an IPython session, it will raise
-        YTNotInsideNotebook.
+class FITSOffAxisSlicePlot(OffAxisSlicePlot):
 
-        Examples
-        --------
+    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
-        >>> from yt.mods import SlicePlot
-        >>> slc = SlicePlot(pf, "x", ["Density", "VelocityMagnitude"])
-        >>> slc.show()
+        self.ds = ds
+        my_normal = normal
+        if ds.xyv_data:
+            if len(normal) > 2:
+                raise NotImplementedError("Normal vector must be in two dimensions for this dataset!")
+            my_normal = np.zeros((3))
+            my_normal[ds.ra_axis] = normal[0]
+            my_normal[ds.dec_axis] = normal[1]
 
-        """
-        if "__IPYTHON__" in dir(__builtin__):
-            from IPython.display import display
-            for k, v in sorted(self.plots.iteritems()):
-                display(v._figure)
-        else:
-            raise YTNotInsideNotebook
+        super(FITSOffAxisSlicePlot, self).__init__(ds, my_normal, fields, **kwargs)
+        self.set_axes_unit("pixel")
 
-class FITSSlicePlot(FITSPlot):
-    def __init__(self, ds, axis, fields, coord=None, field_parameters=None, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        if coord is None:
-            coord = ds.domain_center.ndarray_view()[axis]
-        slc = ds.slice(axis, coord, field_parameters=field_parameters)
-        data = {}
-        for field in fields:
-            data[field] = slc.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
-        super(FITSSlicePlot, self).__init__(ds, data, axis, fields, **kwargs)
+class FITSProjectionPlot(ProjectionPlot):
 
-class FITSProjectionPlot(FITSPlot):
-    def __init__(self, ds, axis, fields, weight_field=None, data_source=None,
-                 field_parameters=None, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        prj = ds.proj(fields[0], axis, weight_field=weight_field, data_source=data_source)
-        data = {}
-        for field in fields:
-            data[field] = prj.to_frb((1.0,"unitary"), ds.domain_dimensions[axis_wcs[axis]])[field]
-        super(FITSProjectionPlot, self).__init__(ds, data, axis, fields, **kwargs)
+    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
 
+        self.ds = ds
+        if isinstance(axis, basestring):
+            if axis in ds.axis_names:
+                axis = ds.axis_names[axis]
+        self.axis = fix_axis(axis)
+        self.set_wcs = set_wcs
+
+        super(FITSProjectionPlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
+        self.set_axes_unit("pixel")
+
+    def _set_wcs(self):
+        if self.set_wcs:
+            set_onaxis_wcs(self)
+
+    def show(self):
+        self._set_wcs()
+        super(FITSProjectionPlot, self).show()
+
+    def save(self, *args, **kwargs):
+        self._set_wcs()
+        super(FITSProjectionPlot, self).save(*args, **kwargs)
+
+class FITSOffAxisProjectionPlot(OffAxisProjectionPlot):
+
+    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
+
+        self.ds = ds
+        my_normal = normal
+        if ds.xyv_data:
+            if len(normal) > 2:
+                raise ValueError("Normal vector must be in two dimensions for this dataset!")
+            my_normal = np.zeros((3))
+            my_normal[ds.ra_axis] = normal[0]
+            my_normal[ds.dec_axis] = normal[1]
+
+        super(FITSOffAxisProjectionPlot, self).__init__(ds, my_normal, fields, axes_unit="pixel", **kwargs)
+
+

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -163,7 +163,8 @@
     _field_info_class = GDFFieldInfo
 
     def __init__(self, filename, dataset_type='grid_data_format',
-                 storage_filename=None):
+                 storage_filename=None, geometry = 'cartesian'):
+        self.geometry = geometry
         self.fluid_types += ("gdf",)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -381,8 +381,8 @@
     def __init__(self, filename, dataset_type="tipsy",
                  field_dtypes=None,
                  unit_base=None,
+                 parameter_file=None,
                  cosmology_parameters=None,
-                 parameter_file=None,
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
@@ -480,15 +480,15 @@
             self.domain_left_edge = None
             self.domain_right_edge = None
         if self.parameters.get('bComove', False):
+            cosm = self._cosmology_parameters or {}
+            self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1
-            cosm = self._cosmology_parameters or {}
-            dcosm = dict(current_redshift=0.0,
-                         omega_lambda=0.0,
-                         omega_matter=0.0,
-                         hubble_constant=1.0)
-            for param in ['current_redshift', 'omega_lambda',
-                          'omega_matter', 'hubble_constant']:
-                pval = cosm.get(param, dcosm[param])
+            dcosm = dict(current_redshift=(1.0/self.scale_factor)-1.0,
+                         omega_lambda=self.parameters.get('dLambda', cosm.get('omega_lambda',0.0)),
+                         omega_matter=self.parameters.get('dOmega0', cosm.get('omega_matter',0.0)),
+                         hubble_constant=self.parameters.get('dHubble0', cosm.get('hubble_constant',1.0)))
+            for param in dcosm.keys():
+                pval = dcosm[param]
                 setattr(self, param, pval)
         else:
             self.cosmological_simulation = 0.0
@@ -501,18 +501,18 @@
         f.close()
 
     def _set_code_unit_attributes(self):
-        # Set a sane default for cosmological simulations.
-        if self._unit_base is None and self.cosmological_simulation == 1:
-            mylog.info("Assuming length units are in Mpc/h (comoving)")
-            self._unit_base.update(dict(length = (1.0, "Mpccm/h")))
         if self.cosmological_simulation:
-            length_units = self._unit_base['length']
-            DW = self.quan(1./length_units[1], length_units[0])
-            cosmo = Cosmology(self.hubble_constant * 100.0,
+            mu = self.parameters.get('dMsolUnit', 1.)
+            lu = self.parameters.get('dKpcUnit', 1000.)
+            # In cosmological runs, lengths are stored as length*scale_factor
+            self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
+            self.mass_unit = self.quan(mu, 'Msun')
+            density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
+            # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
+            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)  
+            cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
-            self.length_unit = DW
-            density_unit = cosmo.critical_density(self.current_redshift)
-            self.mass_unit = density_unit * self.length_unit ** 3
+            self.current_time = cosmo.hubble_time(self.current_redshift)
         else:
             mu = self.parameters.get('dMsolUnit', 1.0)
             self.mass_unit = self.quan(mu, 'Msun')

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/frontends/sph/owls_ion_tables.py
--- a/yt/frontends/sph/owls_ion_tables.py
+++ b/yt/frontends/sph/owls_ion_tables.py
@@ -162,8 +162,7 @@
         T  = np.array( T )
 
         if nH.size != T.size:
-            print ' array size mismatch !!! '
-            sys.exit(1)
+            raise ValueError(' owls_ion_tables: array size mismatch !!! ')
         
         # field discovery will have nH.size == 1 and T.size == 1
         # in that case we simply return 1.0
@@ -175,38 +174,25 @@
 
         # find inH and fnH
         #-----------------------------------------------------
-        inH = np.int32( ( nH - self.nH[0] ) / self.DELTA_nH )
-        fnH = ( nH - self.nH[inH] ) / self.dnH[inH]
-
-        indx = np.where( inH < 0 )[0]
-        if len(indx) > 0:
-            inH[indx] = 0
-            fnH[indx] = 0.0
-
-        indx = np.where( inH >= len(nH) )[0]
-        if len(indx) > 0:
-            inH[indx] = len(nH)-2
-            fnH[indx] = 1.0
+        x_nH = ( nH - self.nH[0] ) / self.DELTA_nH
+        x_nH_clip = np.clip( x_nH, 0.0, self.nH.size-1.001 )
+        fnH,inH = np.modf( x_nH_clip )
+        inH = inH.astype( np.int32 )
 
 
         # find iT and fT
         #-----------------------------------------------------
-        iT = np.int32( ( T - self.T[0] ) / self.DELTA_T )
-        fT = ( T - self.T[iT] ) / self.dT[iT]
+        x_T = ( T - self.T[0] ) / self.DELTA_T
+        x_T_clip = np.clip( x_T, 0.0, self.T.size-1.001 )
+        fT,iT = np.modf( x_T_clip )
+        iT = iT.astype( np.int32 )
+        
 
-        indx = np.where( iT < 0 )[0]
-        if len(indx) > 0:
-            iT[indx] = 0
-            fT[indx] = 0.0
-
-        indx = np.where( iT >= len(T) )[0]
-        if len(indx) > 0:
-            iT[indx] = len(T)-2
-            fT[indx] = 1.0
-
-
+        # short names for previously calculated iz and fz
+        #-----------------------------------------------------
         iz = self.iz
         fz = self.fz
+
                    
         # calculate interpolated value
         # use tri-linear interpolation on the log values

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -62,7 +62,8 @@
         period = self.period[:2].copy() # dummy here
         period[0] = self.period[self.x_axis[dim]]
         period[1] = self.period[self.y_axis[dim]]
-        period = period.in_units("code_length").d
+        if hasattr(period, 'in_units'):
+            period = period.in_units("code_length").d
         buff = _MPL.Pixelize(data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field], size[0], size[1],

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/geometry/cylindrical_coordinates.py
--- a/yt/geometry/cylindrical_coordinates.py
+++ b/yt/geometry/cylindrical_coordinates.py
@@ -21,6 +21,8 @@
     _unknown_coord, \
     _get_coord_fields
 import yt.visualization._MPL as _MPL
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder
 #
 # Cylindrical fields
 #
@@ -71,11 +73,12 @@
                  units = "code_length**3")
 
 
-    def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
         ax_name = self.axis_name[dimension]
         if ax_name in ('r', 'theta'):
             return self._ortho_pixelize(data_source, field, bounds, size,
-                                        antialias)
+                                        antialias, dimension, periodic)
         elif ax_name == "z":
             return self._cyl_pixelize(data_source, field, bounds, size,
                                         antialias)
@@ -83,20 +86,26 @@
             # Pixelizing along a cylindrical surface is a bit tricky
             raise NotImplementedError
 
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        period = self.period[:2].copy() # dummy here
+        period[0] = self.period[self.x_axis[dim]]
+        period[1] = self.period[self.y_axis[dim]]
+        if hasattr(period, 'in_units'):
+            period = period.in_units("code_length").d
         buff = _MPL.Pixelize(data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field], size[0], size[1],
                              bounds, int(antialias),
-                             True, self.period).transpose()
+                             period, int(periodic)).transpose()
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
         buff = pixelize_cylinder(data_source['r'],
                                  data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta'],
-                                 size[0], data_source[field], bounds[0])
+                                 data_source['dtheta']/2.0, # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
     axis_name = { 0  : 'r',  1  : 'z',  2  : 'theta',

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/geometry/polar_coordinates.py
--- a/yt/geometry/polar_coordinates.py
+++ b/yt/geometry/polar_coordinates.py
@@ -91,8 +91,8 @@
         buff = pixelize_cylinder(data_source['r'],
                                  data_source['dr'],
                                  data_source['theta'],
-                                 data_source['dtheta'],
-                                 size[0], data_source[field], bounds[1])
+                                 data_source['dtheta'] / 2.0, # half-widths
+                                 size, data_source[field], bounds)
         return buff
 
     axis_name = { 0  : 'r',  1  : 'theta',  2  : 'z',

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/geometry/spherical_coordinates.py
--- /dev/null
+++ b/yt/geometry/spherical_coordinates.py
@@ -0,0 +1,162 @@
+"""
+Spherical fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .coordinate_handler import \
+    CoordinateHandler, \
+    _unknown_coord, \
+    _get_coord_fields
+import yt.visualization._MPL as _MPL
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder, pixelize_aitoff
+
+class SphericalCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, pf, ordering = 'rtp'):
+        if ordering != 'rtp': raise NotImplementedError
+        super(SphericalCoordinateHandler, self).__init__(pf)
+
+    def setup_fields(self, registry):
+        # return the fields for r, z, theta
+        registry.add_field(("index", "dx"), function=_unknown_coord)
+        registry.add_field(("index", "dy"), function=_unknown_coord)
+        registry.add_field(("index", "dz"), function=_unknown_coord)
+        registry.add_field(("index", "x"), function=_unknown_coord)
+        registry.add_field(("index", "y"), function=_unknown_coord)
+        registry.add_field(("index", "z"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(0)
+        registry.add_field(("index", "dr"), function = f1,
+                           display_field = False,
+                           units = "code_length")
+        registry.add_field(("index", "r"), function = f2,
+                           display_field = False,
+                           units = "code_length")
+
+        f1, f2 = _get_coord_fields(1, "")
+        registry.add_field(("index", "dtheta"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "theta"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(2, "")
+        registry.add_field(("index", "dphi"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "phi"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        def _SphericalVolume(field, data):
+            # r**2 sin theta dr dtheta dphi
+            vol = data["index", "r"]**2.0
+            vol *= data["index", "dr"]
+            vol *= np.sin(data["index", "theta"])
+            vol *= data["index", "dtheta"]
+            vol *= data["index", "dphi"]
+            return vol
+        registry.add_field(("index", "cell_volume"),
+                 function=_SphericalVolume,
+                 units = "code_length**3")
+
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
+        if dimension == 0:
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
+        elif dimension in (1, 2):
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                          antialias, dimension)
+        else:
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        # We should be using fcoords
+        period = self.period[:2].copy() # dummy here
+        period[0] = self.period[self.x_axis[dim]]
+        period[1] = self.period[self.y_axis[dim]]
+        period = period.in_units("code_length").d
+        buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+                             data_source['pdx'], data_source['pdy'],
+                             data_source[field], size[0], size[1],
+                             bounds, int(antialias),
+                             period, int(periodic)).transpose()
+        return buff
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
+                      dimension):
+        if dimension == 1:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     data_source['phi'],
+                                     data_source['dphi'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+        elif dimension == 2:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     data_source['theta'],
+                                     data_source['dtheta'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'],
+                                     2.0*np.pi - data_source['theta'],
+                                     data_source['dtheta'] / 2.0, # half-widths
+                                     size, data_source[field], bounds,
+                                     input_img = buff)
+        else:
+            raise RuntimeError
+        return buff
+
+
+    def convert_from_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    # Despite being mutables, we uses these here to be clear about how these
+    # are generated and to ensure that they are not re-generated unnecessarily
+    axis_name = { 0  : 'r',  1  : 'theta',  2  : 'phi',
+                 'r' : 'r', 'theta' : 'theta', 'phi' : 'phi',
+                 'R' : 'r', 'Theta' : 'theta', 'Phi' : 'phi'}
+
+    axis_id = { 'r' : 0, 'theta' : 1, 'phi' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'r' : 1, 'theta' : 0, 'phi' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'r' : 2, 'theta' : 2, 'phi' : 1,
+                0  : 2,  1  : 2,  2  : 1}
+
+    @property
+    def period(self):
+        return self.pf.domain_width
+

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -99,8 +99,8 @@
     "me": (mass_electron_grams, dimensions.mass),
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
-    "beam": (1.0, dimensions.dimensionless)
-
+    "beam": (1.0, dimensions.dimensionless),
+    "pixel": (1.0, dimensions.length),
 }
 
 # Add LaTeX representations for units with trivial representations.

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -82,7 +82,7 @@
         """
         if symbol not in self.lut:
             raise SymbolNotFoundError(
-                "Tried to remove the symbol '%s', but it does not exist" \
+                "Tried to modify the symbol '%s', but it does not exist" \
                 "in this registry." % symbol)
 
         if hasattr(cgs_value, "in_cgs"):

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -477,23 +477,27 @@
                       np.ndarray[np.float64_t, ndim=1] dradius,
                       np.ndarray[np.float64_t, ndim=1] theta,
                       np.ndarray[np.float64_t, ndim=1] dtheta,
-                      int buff_size,
+                      buff_size,
                       np.ndarray[np.float64_t, ndim=1] field,
-                      np.float64_t rmax=-1.0) :
+                      extents, input_img = None):
 
     cdef np.ndarray[np.float64_t, ndim=2] img
     cdef np.float64_t x, y, dx, dy, r0, theta0
+    cdef np.float64_t rmax, x0, y0, x1, y1
     cdef np.float64_t r_i, theta_i, dr_i, dtheta_i, dthetamin
     cdef int i, pi, pj
     
-    if rmax < 0.0 :
-        imax = radius.argmax()
-        rmax = radius[imax] + dradius[imax]
+    imax = radius.argmax()
+    rmax = radius[imax] + dradius[imax]
           
-    img = np.zeros((buff_size, buff_size))
-    extents = [-rmax, rmax] * 2
-    dx = (extents[1] - extents[0]) / img.shape[0]
-    dy = (extents[3] - extents[2]) / img.shape[1]
+    if input_img is None:
+        img = np.zeros((buff_size[0], buff_size[1]))
+        img[:] = np.nan
+    else:
+        img = input_img
+    x0, x1, y0, y1 = extents
+    dx = (x1 - x0) / img.shape[0]
+    dy = (y1 - y0) / img.shape[1]
       
     dthetamin = dx / rmax
       
@@ -513,14 +517,73 @@
                     continue
                 x = r_i * math.cos(theta_i)
                 y = r_i * math.sin(theta_i)
-                pi = <int>((x + rmax)/dx)
-                pj = <int>((y + rmax)/dy)
-                img[pi, pj] = field[i]
+                pi = <int>((x - x0)/dx)
+                pj = <int>((y - y0)/dy)
+                if pi >= 0 and pi < img.shape[0] and \
+                   pj >= 0 and pj < img.shape[1]:
+                    if img[pi, pj] != img[pi, pj]:
+                        img[pi, pj] = 0.0
+                    img[pi, pj] = field[i]
                 r_i += 0.5*dx 
             theta_i += dthetamin
 
     return img
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def pixelize_aitoff(np.ndarray[np.float64_t, ndim=1] theta,
+                    np.ndarray[np.float64_t, ndim=1] dtheta,
+                    np.ndarray[np.float64_t, ndim=1] phi,
+                    np.ndarray[np.float64_t, ndim=1] dphi,
+                    buff_size,
+                    np.ndarray[np.float64_t, ndim=1] field,
+                    extents, input_img = None):
+    
+    cdef np.ndarray[np.float64_t, ndim=2] img
+    cdef int i, j, nf, fi
+    cdef np.float64_t x, y, z, zb
+    cdef np.float64_t dx, dy, inside
+    cdef np.float64_t theta1, dtheta1, phi1, dphi1
+    cdef np.float64_t theta0, phi0
+    cdef np.float64_t PI = np.pi
+    cdef np.float64_t s2 = math.sqrt(2.0)
+    nf = field.shape[0]
+    
+    if input_img is None:
+        img = np.zeros((buff_size[0], buff_size[1]))
+        img[:] = np.nan
+    else:
+        img = input_img
+    dx = 2.0 / (img.shape[0] - 1)
+    dy = 2.0 / (img.shape[1] - 1)
+    for i in range(img.shape[0]):
+        x = (-1.0 + i*dx)*s2*2.0
+        for j in range(img.shape[1]):
+            y = (-1.0 + j * dy)*s2
+            zb = (x*x/8.0 + y*y/2.0 - 1.0)
+            if zb > 0: continue
+            z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
+            z = z**0.5
+            # Longitude
+            phi0 = (2.0*math.atan(z*x/(2.0 * (2.0*z*z-1.0))) + PI)
+            # Latitude
+            # We shift it into co-latitude
+            theta0 = (math.asin(z*y) + PI/2.0)
+            # Now we just need to figure out which pixel contributes.
+            # We do not have a fast search.
+            for fi in range(nf):
+                theta1 = theta[fi]
+                dtheta1 = dtheta[fi]
+                if not (theta1 - dtheta1 <= theta0 <= theta1 + dtheta1):
+                    continue
+                phi1 = phi[fi]
+                dphi1 = dphi[fi]
+                if not (phi1 - dphi1 <= phi0 <= phi1 + dphi1):
+                    continue
+                img[i, j] = field[fi]
+    return img
+
 #@cython.cdivision(True)
 #@cython.boundscheck(False)
 #@cython.wraparound(False)

diff -r 21ff66175bc694f23beb9a2582ca4d8d80bbbd08 -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -165,8 +165,20 @@
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
+        # Set our default width to be the full domain
         width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
         center = pf.arr([0.0, 0.0, 0.0], "code_length")
+    elif pf.geometry == "spherical":
+        if axis == 0:
+            width = pf.domain_width[1], pf.domain_width[2]
+            center = 0.5*(pf.domain_left_edge +
+                pf.domain_right_edge).in_units("code_length")
+        else:
+            # Our default width here is the full domain
+            width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
+            center = pf.arr([0.0, 0.0, 0.0], "code_length")
+    else:
+        raise NotImplementedError
     bounds = (center[x_dict[axis]]-width[0] / 2,
               center[x_dict[axis]]+width[0] / 2,
               center[y_dict[axis]]-width[1] / 2,


https://bitbucket.org/yt_analysis/yt/commits/be03f167a680/
Changeset:   be03f167a680
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-10 20:02:25
Summary:     Few bits of documentation, and print -> mylog
Affected #:  1 file

diff -r 9a3d686de3d45af36dc4f6f924c9aebd35752284 -r be03f167a68040fd922f5f0e792172164d76124b yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -156,10 +156,12 @@
                         mylog.info("Adding field %s to the list of fields." % (fname))
                         self._detect_image_units(fname, h.header)
 
+        # For line fields, we still read the primary field. Not sure how to extend this
+        # For now, we pick off the first field from the field list.
         line_db = self.parameter_file.line_database
         primary_fname = self.field_list[0][1]
         for k, v in line_db.iteritems():
-            print "Adding line: ", k, v
+            mylog.info("Adding line field: %s at offset %i" % (k, v))
             self.field_list.append((self.dataset_type, k))
             self._ext_map[k] = self._ext_map[primary_fname]
             self._axis_map[k] = self._axis_map[primary_fname]


https://bitbucket.org/yt_analysis/yt/commits/aa4ef7ef1e63/
Changeset:   aa4ef7ef1e63
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 20:07:57
Summary:     Merge
Affected #:  2 files

diff -r 57d2946c6dbe72053e28977cf1173b9f04443ce0 -r aa4ef7ef1e63223da0a4b8a47d0bba70bfb71cc4 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -161,6 +161,18 @@
                         mylog.info("Adding field %s to the list of fields." % (fname))
                         self._determine_image_units(fname, h.header)
 
+        # For line fields, we still read the primary field. Not sure how to extend this
+        # For now, we pick off the first field from the field list.
+        line_db = self.parameter_file.line_database
+        primary_fname = self.field_list[0][1]
+        for k, v in line_db.iteritems():
+            mylog.info("Adding line field: %s at offset %i" % (k, v))
+            self.field_list.append((self.dataset_type, k))
+            self._ext_map[k] = self._ext_map[primary_fname]
+            self._axis_map[k] = self._axis_map[primary_fname]
+            self._file_map[k] = self._file_map[primary_fname]
+            self.parameter_file.field_units[k] = "Jy/beam"
+
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
 
@@ -234,7 +246,17 @@
                  nprocs = None,
                  storage_filename = None,
                  mask_nans = False,
-                 suppress_astropy_warnings = True):
+                 folded_axis=None,
+                 folded_width=None,
+                 line_database=None,
+                 suppress_astropy_warnings = True
+                 ):
+        self.folded_axis = folded_axis
+        self.folded_width = folded_width
+        if line_database is None:
+            line_database = {}
+        self.line_database = line_database
+
         if suppress_astropy_warnings:
             warnings.filterwarnings('ignore', module="astropy", append=True)
         self.filenames = [filename] + slave_files
@@ -318,6 +340,11 @@
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
+        if self.folded_axis is not None:
+            self.domain_left_edge[self.folded_axis] = -self.folded_width/2.
+            self.domain_right_edge[self.folded_axis] = self.folded_width/2.
+            self.domain_dimensions[self.folded_axis] = int(self.folded_width)
+
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5

diff -r 57d2946c6dbe72053e28977cf1173b9f04443ce0 -r aa4ef7ef1e63223da0a4b8a47d0bba70bfb71cc4 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -24,7 +24,10 @@
         super(IOHandlerFITS, self).__init__(pf)
         self.pf = pf
         self._handle = pf._handle
-        
+        self.folded = False
+        if self.pf.folded_axis is not None:
+            self.folded = True
+
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
         pass
@@ -42,13 +45,27 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
+            tmp_fname = fname
+            if fname in self.pf.line_database:
+                fname = self.pf.field_list[0][1]
             f = self.pf.index._file_map[fname]
             ds = f[self.pf.index._ext_map[fname]]
+            fname = tmp_fname
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    start = (g.LeftEdge.ndarray_view()-0.5).astype("int")
-                    end = (g.RightEdge.ndarray_view()-0.5).astype("int")
+                    centering = np.array([0.5]*3)
+                    if self.folded:
+                        centering[-1] = 0.0
+                    start = (g.LeftEdge.ndarray_view()-centering).astype("int")
+                    end = (g.RightEdge.ndarray_view()-centering).astype("int")
+                    if self.folded:
+                        my_off = self.pf.line_database.get(fname, 0)\
+                            + self.pf.folded_width/2
+
+                        start[-1] += my_off
+                        end[-1] += my_off
+                        mylog.debug("Reading from " + str(start) + str(end))
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1


https://bitbucket.org/yt_analysis/yt/commits/63c7975b6bf1/
Changeset:   63c7975b6bf1
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-10 20:07:52
Summary:     Units are not handled correctly for lines.
Affected #:  1 file

diff -r be03f167a68040fd922f5f0e792172164d76124b -r 63c7975b6bf190645a5ef0b3788b855056b15f40 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -166,7 +166,7 @@
             self._ext_map[k] = self._ext_map[primary_fname]
             self._axis_map[k] = self._axis_map[primary_fname]
             self._file_map[k] = self._file_map[primary_fname]
-            self.parameter_file.field_units[k] = "Jy/beam"
+            self.parameter_file.field_units[k] = self.parameter_file.field_units[primary_fname]
 
     def _count_grids(self):
         self.num_grids = self.pf.nprocs


https://bitbucket.org/yt_analysis/yt/commits/119582a9ec52/
Changeset:   119582a9ec52
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-10 20:11:14
Summary:     Merge
Affected #:  2 files

diff -r 63c7975b6bf190645a5ef0b3788b855056b15f40 -r 119582a9ec52129d76a28f4b2716aff4f9661d6d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -16,6 +16,7 @@
 import numpy.core.defchararray as np_char
 import weakref
 import warnings
+import re
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -33,6 +34,7 @@
 from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
     decompose_array, get_psize, decompose_array_nocopy
+from yt.units.unit_lookup_table import default_unit_symbol_lut, prefixable_units
 
 class astropy_imports:
     _pyfits = None
@@ -74,10 +76,12 @@
 
 ap = astropy_imports()
 
-known_units = {"k":"K",
-               "jy":"Jy"}
+known_units = dict([(unit.lower(),unit) for unit in default_unit_symbol_lut])
+axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
 
-axes_prefixes = ["RA","DEC","V","ENER","FREQ"]
+delimiters = ["*", "/", "-", "^"]
+delimiters += [str(i) for i in xrange(10)]
+regex_pattern = '|'.join(map(re.escape, delimiters))
 
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
@@ -109,15 +113,16 @@
     def _initialize_data_storage(self):
         pass
 
-    def _detect_image_units(self, fname, header):
+    def _determine_image_units(self, fname, header):
         try:
             field_units = header["bunit"].lower().strip(" ")
             # FITS units always return upper-case, so we need to get
             # the right case by comparing against known units. This
             # only really works for common units.
-            for name in known_units:
-                if field_units.find(name) > -1:
-                    field_units = field_units.replace(name, known_units[name])
+            units = re.split(regex_pattern, field_units)
+            for unit in units:
+                if unit in known_units:
+                    field_units = field_units.replace(unit, known_units[unit])
             self.parameter_file.field_units[fname] = field_units
         except:
             pass
@@ -154,7 +159,7 @@
                         self._ext_map[fname] = j
                         self.field_list.append((self.dataset_type, fname))
                         mylog.info("Adding field %s to the list of fields." % (fname))
-                        self._detect_image_units(fname, h.header)
+                        self._determine_image_units(fname, h.header)
 
         # For line fields, we still read the primary field. Not sure how to extend this
         # For now, we pick off the first field from the field list.

diff -r 63c7975b6bf190645a5ef0b3788b855056b15f40 -r 119582a9ec52129d76a28f4b2716aff4f9661d6d yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -100,15 +100,9 @@
     def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
-        my_normal = normal
-        if ds.xyv_data:
-            if len(normal) > 2:
-                raise NotImplementedError("Normal vector must be in two dimensions for this dataset!")
-            my_normal = np.zeros((3))
-            my_normal[ds.ra_axis] = normal[0]
-            my_normal[ds.dec_axis] = normal[1]
-
-        super(FITSOffAxisSlicePlot, self).__init__(ds, my_normal, fields, **kwargs)
+        self.set_wcs = set_wcs
+        
+        super(FITSOffAxisSlicePlot, self).__init__(ds, normal, fields, **kwargs)
         self.set_axes_unit("pixel")
 
 class FITSProjectionPlot(ProjectionPlot):
@@ -142,14 +136,7 @@
     def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
 
         self.ds = ds
-        my_normal = normal
-        if ds.xyv_data:
-            if len(normal) > 2:
-                raise ValueError("Normal vector must be in two dimensions for this dataset!")
-            my_normal = np.zeros((3))
-            my_normal[ds.ra_axis] = normal[0]
-            my_normal[ds.dec_axis] = normal[1]
+        self.set_wcs = set_wcs
+        super(FITSOffAxisProjectionPlot, self).__init__(ds, normal, fields, axes_unit="pixel", **kwargs)
 
-        super(FITSOffAxisProjectionPlot, self).__init__(ds, my_normal, fields, axes_unit="pixel", **kwargs)
 
-


https://bitbucket.org/yt_analysis/yt/commits/7d54cfd44432/
Changeset:   7d54cfd44432
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-11 20:43:46
Summary:     An XYVCube class and analysis module for creating FITS data cubes in X,Y, and line-of-sight velocity from off-axis projections.
Affected #:  6 files

diff -r 119582a9ec52129d76a28f4b2716aff4f9661d6d -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -120,3 +120,6 @@
      TableAbsorbModel, \
      PhotonModel, \
      ThermalPhotonModel
+
+from .xyv_cube.api import \
+    XYVCube

diff -r 119582a9ec52129d76a28f4b2716aff4f9661d6d -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,5 +20,8 @@
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
-    config.add_subpackage("sunyaev_zeldovich")    
+    config.add_subpackage("sunyaev_zeldovich")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
+    config.add_subpackage("xyv_cube")
     return config

diff -r 119582a9ec52129d76a28f4b2716aff4f9661d6d -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 yt/analysis_modules/xyv_cube/api.py
--- /dev/null
+++ b/yt/analysis_modules/xyv_cube/api.py
@@ -0,0 +1,12 @@
+"""
+API for xyv_cube
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from xyv_cube import XYVCube

diff -r 119582a9ec52129d76a28f4b2716aff4f9661d6d -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 yt/analysis_modules/xyv_cube/setup.py
--- /dev/null
+++ b/yt/analysis_modules/xyv_cube/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('xyv_cube', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 119582a9ec52129d76a28f4b2716aff4f9661d6d -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 yt/analysis_modules/xyv_cube/xyv_cube.py
--- /dev/null
+++ b/yt/analysis_modules/xyv_cube/xyv_cube.py
@@ -0,0 +1,158 @@
+"""
+Generating XYV FITS cubes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.frontends.fits.data_structures import ap
+from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.funcs import get_pbar
+
+def create_intensity(vmin, vmax, ifield):
+    def _intensity(field, data):
+        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
+        f = np.zeros(data[ifield].shape)
+        f[idxs] = data[ifield][idxs]
+        return f
+    return _intensity
+
+def create_vlos(z_hat):
+    def _v_los(field, data):
+        vz = data["velocity_x"]*z_hat[0] + \
+             data["velocity_y"]*z_hat[1] + \
+             data["velocity_z"]*z_hat[2]
+        return vz
+    return _v_los
+
+class XYVCube(object):
+    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
+                 dims=(100,100,100), velocity_bounds=None):
+        r""" Initialize a XYVCube object.
+
+        Parameters
+        ----------
+        ds : dataset
+            The dataset.
+        normal : array_like
+            The normal vector along with to make the projections.
+        field : string
+            The field to project.
+        width : float or tuple, optional
+            The width of the projection in length units. Specify a float
+            for code_length units or a tuple (value, units).
+        dims : tuple, optional
+            A 3-tuple of dimensions (nx,ny,nv) for the cube.
+        velocity_bounds : tuple, optional
+            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
+            integrate over. If None, the largest velocity of the
+            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
+
+        Examples
+        --------
+        >>> i = 60*np.pi/180.
+        >>> L = [0.0,np.sin(i),np.cos(i)]
+        >>> cube = XYVCube(ds, L, "density", width=(10.,"kpc"),
+        ...                velocity_bounds=(-5.,4.,"km/s"))
+        """
+        self.ds = ds
+        self.field = field
+        self.width = width
+
+        self.nx = dims[0]
+        self.ny = dims[1]
+        self.nv = dims[2]
+
+        orient = Orientation(normal)
+
+        dd = ds.all_data()
+
+        fd = dd._determine_fields(field)[0]
+
+        self.field_units = ds.field_info[fd].units
+
+        if velocity_bounds is None:
+            vmin, vmax = dd.quantities["Extrema"]("velocity_magnitude")
+            self.v_bnd = -vmax, vmax
+        else:
+            self.v_bnd = (ds.arr(velocity_bounds[0], velocity_bounds[2]),
+                     ds.arr(velocity_bounds[1], velocity_bounds[2]))
+
+        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+
+        vel_slices = []
+        pbar = get_pbar("Generating cube.", self.nv)
+        for i in xrange(self.nv):
+            v1 = vbins[i]
+            v2 = vbins[i+1]
+            _intensity = create_intensity(v1, v2, field)
+            _vlos = create_vlos(orient.unit_vectors[2])
+            ds.field_info.add_field(("gas","intensity"),
+                                    function=_intensity, units=self.field_units)
+            ds.field_info.add_field(("gas","v_los"),
+                                    function=_vlos, units="cm/s")
+            prj = off_axis_projection(ds, ds.domain_center, normal, width,
+                                      (self.nx, self.ny), "intensity")
+            vel_slices.append(prj)
+            pbar.update(i)
+
+        pbar.finish()
+        self.cube = ds.arr(np.array(vel_slices).transpose(1,2,0), self.field_units)
+
+    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
+                   velocity_unit="m/s", sky_center=(30.,45.)):
+        r""" Write the XYVCube to a FITS file.
+
+        Parameters
+        ----------
+        filename : string
+            The name of the file to write.
+        clobber : boolean
+            Whether or not to clobber an existing file with the same name.
+        length_unit : tuple, optional
+            The length that corresponds to the width of the projection in
+            (value, unit) form. Accepts a length unit or 'deg'.
+        velocity_unit : string, optional
+            The units for the velocity axis.
+        sky_center : tuple, optional
+            The (RA, Dec) coordinate in degrees of the central pixel if
+            *length_unit* is 'deg'.
+
+        Examples
+        --------
+        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
+        ...                 velocity_unit="km/s")
+        """
+        if length_unit == "deg":
+            center = sky_center
+            types = ["RA---SIN","DEC--SIN"]
+        else:
+            center = [0.0,0.0]
+            types = ["LINEAR","LINEAR"]
+
+        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
+
+        dx = length_unit[0]/self.nx
+        dy = length_unit[0]/self.ny
+        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
+
+        w = ap.pywcs.WCS(naxis=3)
+        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
+        w.wcs.cdelt = [dx,dy,dv]
+        w.wcs.crval = [center[0], center[1], v_center]
+        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
+        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
+
+        fib = FITSImageBuffer(self.cube, fields=self.field, wcs=w)
+        fib[0].header["bunit"] = self.field_units
+        fib[0].header["btype"] = self.field
+
+        fib.writeto(filename, clobber=clobber)


https://bitbucket.org/yt_analysis/yt/commits/9cfea7c13f6f/
Changeset:   9cfea7c13f6f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:24:18
Summary:     This was a failed experiment that probably should have never been attempted.
Affected #:  1 file

diff -r 7d54cfd44432f266476ad3f7c0af2ec3c9b54028 -r 9cfea7c13f6f1c6baac252a4edf06a82ace515f4 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -9,134 +9,4 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.funcs import fix_axis, ensure_list, iterable
-from yt.visualization.plot_window import AxisAlignedSlicePlot, \
-    OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
-
-def force_aspect(ax,aspect=1):
-    im = ax.get_images()
-    extent = im[0].get_extent()
-    ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
-
-def convert_ticks(ticks, to_hours=False):
-    deg_ticks = ticks.astype("int")
-    min_ticks = ((ticks - deg_ticks)*60.).astype("int")
-    sec_ticks = ((((ticks - deg_ticks)*60.)-min_ticks)*60.).astype("int")
-    deg_string = "d"
-    if to_hours:
-        deg_ticks = (deg_ticks*24./360.).astype("int")
-        deg_string = "h"
-    return ["%02d%s%02dm%02ds" % (dt, deg_string, mt, st)
-            for dt, mt, st in zip(deg_ticks, min_ticks, sec_ticks)]
-
-def set_onaxis_wcs(pw):
-
-    ax = pw.plots.values()[0].axes
-    xpix = ax.get_xticks()
-    ypix = ax.get_xticks()
-    ra_ticks, dummy = pw.ds.wcs_2d.wcs_pix2world(xpix, ypix[0]*np.ones((len(xpix))), 1)
-    dummy, dec_ticks = pw.ds.wcs_2d.wcs_pix2world(xpix[0]*np.ones((len(ypix))), ypix, 1)
-    if pw.ds.dimensionality == 3:
-        vlim = pw.ds.wcs_1d.wcs_pix2world([pw.xlim[0], pw.xlim[1]], 1)[0]
-
-    if pw.axis == pw.ds.ra_axis:
-        xname = "Dec"
-        yname = pw.ds.vel_name
-        xunit = str(pw.ds.wcs_2d.wcs.cunit[1])
-        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
-    elif pw.axis == pw.ds.dec_axis:
-        xname = "RA"
-        yname = pw.ds.vel_name
-        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
-        yunit = str(pw.ds.wcs_1d.wcs.cunit[0])
-    elif pw.axis == pw.ds.vel_axis:
-        xname = "RA"
-        yname = "Dec"
-        xunit = str(pw.ds.wcs_2d.wcs.cunit[0])
-        yunit = str(pw.ds.wcs_2d.wcs.cunit[1])
-
-    for k,v in pw.plots.iteritems():
-        v.axes.set_xlabel(r"%s (%s)" % (xname, xunit))
-        v.axes.set_ylabel(r"%s (%s)" % (yname, yunit))
-        if xname == "Dec":
-            v.axes.xaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
-        if yname == "Dec":
-            v.axes.yaxis.set_ticklabels(convert_ticks(dec_ticks), size=14)
-        if xname == "RA":
-            v.axes.xaxis.set_ticklabels(convert_ticks(ra_ticks, to_hours=True), size=14)
-        if yname == pw.ds.vel_name:
-            extent = (pw.xlim[0].value, pw.xlim[1].value, vlim[0], vlim[1])
-            v.image.set_extent(extent)
-
-class FITSSlicePlot(AxisAlignedSlicePlot):
-
-    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
-
-        if isinstance(axis, basestring):
-            if axis in ds.axis_names:
-                axis = ds.axis_names[axis]
-        self.axis = fix_axis(axis)
-        self.ds = ds
-        self.set_wcs = set_wcs
-        super(FITSSlicePlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
-        self.set_axes_unit("pixel")
-
-    def _set_wcs(self):
-        if self.set_wcs:
-            set_onaxis_wcs(self)
-
-    def show(self):
-        self._set_wcs()
-        super(FITSSlicePlot, self).show()
-
-    def save_wcs(self, *args, **kwargs):
-        self._set_wcs()
-        super(FITSSlicePlot, self).save(*args, **kwargs)
-
-class FITSOffAxisSlicePlot(OffAxisSlicePlot):
-
-    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
-
-        self.ds = ds
-        self.set_wcs = set_wcs
-        
-        super(FITSOffAxisSlicePlot, self).__init__(ds, normal, fields, **kwargs)
-        self.set_axes_unit("pixel")
-
-class FITSProjectionPlot(ProjectionPlot):
-
-    def __init__(self, ds, axis, fields, set_wcs=False, **kwargs):
-
-        self.ds = ds
-        if isinstance(axis, basestring):
-            if axis in ds.axis_names:
-                axis = ds.axis_names[axis]
-        self.axis = fix_axis(axis)
-        self.set_wcs = set_wcs
-
-        super(FITSProjectionPlot, self).__init__(ds, axis, fields, origin="native", **kwargs)
-        self.set_axes_unit("pixel")
-
-    def _set_wcs(self):
-        if self.set_wcs:
-            set_onaxis_wcs(self)
-
-    def show(self):
-        self._set_wcs()
-        super(FITSProjectionPlot, self).show()
-
-    def save(self, *args, **kwargs):
-        self._set_wcs()
-        super(FITSProjectionPlot, self).save(*args, **kwargs)
-
-class FITSOffAxisProjectionPlot(OffAxisProjectionPlot):
-
-    def __init__(self, ds, normal, fields, set_wcs=False, **kwargs):
-
-        self.ds = ds
-        self.set_wcs = set_wcs
-        super(FITSOffAxisProjectionPlot, self).__init__(ds, normal, fields, axes_unit="pixel", **kwargs)
-
-
+#-----------------------------------------------------------------------------
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/2febc5316631/
Changeset:   2febc5316631
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:26:28
Summary:     Cleaning up
Affected #:  2 files

diff -r 9cfea7c13f6f1c6baac252a4edf06a82ace515f4 -r 2febc5316631c232f075a60c05118311c4e2cbbf yt/frontends/fits/api.py
--- a/yt/frontends/fits/api.py
+++ b/yt/frontends/fits/api.py
@@ -19,10 +19,4 @@
       FITSFieldInfo
 
 from .io import \
-      IOHandlerFITS
-
-from .misc import \
-      FITSOffAxisSlicePlot, \
-      FITSSlicePlot, \
-      FITSProjectionPlot, \
-      FITSOffAxisProjectionPlot
+      IOHandlerFITS
\ No newline at end of file

diff -r 9cfea7c13f6f1c6baac252a4edf06a82ace515f4 -r 2febc5316631c232f075a60c05118311c4e2cbbf yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -9,4 +9,4 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-#-----------------------------------------------------------------------------
\ No newline at end of file
+


https://bitbucket.org/yt_analysis/yt/commits/9ed3b62b454a/
Changeset:   9ed3b62b454a
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:30:55
Summary:     Cleaning up FITSImageBuffer
Affected #:  1 file

diff -r 2febc5316631c232f075a60c05118311c4e2cbbf -r 9ed3b62b454a66ef2765196c6903edb1ddd744da yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -87,16 +87,20 @@
             mylog.error("Please specify one or more fields to write.")
             raise KeyError
 
-        first = False
-    
+        first = True
+
         for key in fields:
             if key not in exclude_fields:
                 mylog.info("Making a FITS image of field %s" % (key))
                 if first:
                     hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
-                    hdu.name = key
+                    first = False
                 else:
-                    hdu = pyfits.ImageHDU(np.array(img_data[key]), name=key)
+                    hdu = pyfits.ImageHDU(np.array(img_data[key]))
+                hdu.name = key
+                hdu.header["btype"] = key
+                if hasattr(img_data[key], "units"):
+                    hdu.header["bunit"] = str(img_data[key].units)
                 self.append(hdu)
 
         self.dimensionality = len(self[0].data.shape)
@@ -192,21 +196,9 @@
     def items(self):
         return [(k, self[k]) for k in self.keys()]
 
-    def __add__(self, other):
-        if len(set(self.keys()).intersection(set(other.keys()))) > 0:
-            mylog.error("There are duplicate extension names! Don't know which ones you want to keep!")
-            raise KeyError
-        new_buffer = {}
-        for im1 in self:
-            new_buffer[im1.name] = im1.data
-        for im2 in other:
-            new_buffer[im2.name] = im2.data
-        new_wcs = self.wcs
-        return FITSImageBuffer(new_buffer, wcs=new_wcs)
-
     def writeto(self, fileobj, **kwargs):
         pyfits.HDUList(self).writeto(fileobj, **kwargs)
-        
+
     @property
     def shape(self):
         if self.dimensionality == 2:


https://bitbucket.org/yt_analysis/yt/commits/adfba562d35c/
Changeset:   adfba562d35c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:31:20
Summary:     Check for unit vector field parameters
Affected #:  1 file

diff -r 9ed3b62b454a66ef2765196c6903edb1ddd744da -r adfba562d35c361b8756f4f380504bfb645e5081 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -173,6 +173,9 @@
         'cp_x_vec': '',
         'cp_y_vec': '',
         'cp_z_vec': '',
+        'x_hat': '',
+        'y_hat': '',
+        'z_hat': '',
         }
 
     def get_field_parameter(self, param, default = None):
@@ -186,6 +189,11 @@
             rv = YTArray((0.0, 0.0, 0.0), self.fp_units[param])
             rv['xyz'.index(ax)] = 1.0
             return rv
+        elif param.endswith("_hat"):
+            ax = param[0]
+            rv = YTArray((0.0, 0.0, 0.0), self.fp_units[param])
+            rv['xyz'.index(ax)] = 1.0
+            return rv
         elif param == "fof_groups":
             return None
         elif param == "mu":


https://bitbucket.org/yt_analysis/yt/commits/4088a57d27ff/
Changeset:   4088a57d27ff
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:31:36
Summary:     This sky coordinate needs to be reversed
Affected #:  1 file

diff -r adfba562d35c361b8756f4f380504bfb645e5081 -r 4088a57d27ffb5602068c3191f1f3fb5c044fc76 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -319,7 +319,8 @@
             center = sky_center
             units = "deg"
             deltas *= sky_scale
-            
+            deltas[0] *= -1.
+
         fib = FITSImageBuffer(self.data, fields=self.data.keys(),
                               center=center, units=units,
                               scale=deltas)


https://bitbucket.org/yt_analysis/yt/commits/ed47f2c59511/
Changeset:   ed47f2c59511
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 16:33:08
Summary:     Refinements to get the axes order correct
Affected #:  1 file

diff -r 4088a57d27ffb5602068c3191f1f3fb5c044fc76 -r ed47f2c5951184bf0cf5f391aa2adab2befb1591 yt/analysis_modules/xyv_cube/xyv_cube.py
--- a/yt/analysis_modules/xyv_cube/xyv_cube.py
+++ b/yt/analysis_modules/xyv_cube/xyv_cube.py
@@ -30,7 +30,7 @@
         vz = data["velocity_x"]*z_hat[0] + \
              data["velocity_y"]*z_hat[1] + \
              data["velocity_z"]*z_hat[2]
-        return vz
+        return -vz
     return _v_los
 
 class XYVCube(object):
@@ -71,7 +71,13 @@
         self.ny = dims[1]
         self.nv = dims[2]
 
-        orient = Orientation(normal)
+        normal = np.array(normal)
+        normal /= np.sqrt(np.dot(normal, normal))
+        vecs = np.identity(3)
+        t = np.cross(normal, vecs).sum(axis=1)
+        ax = t.argmax()
+        north = np.cross(normal, vecs[ax,:]).ravel()
+        orient = Orientation(normal, north_vector=north)
 
         dd = ds.all_data()
 
@@ -88,7 +94,7 @@
 
         vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
 
-        vel_slices = []
+        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
             v1 = vbins[i]
@@ -101,11 +107,10 @@
                                     function=_vlos, units="cm/s")
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
-            vel_slices.append(prj)
+            self.data[:,:,i] = prj[:,:]
             pbar.update(i)
 
         pbar.finish()
-        self.cube = ds.arr(np.array(vel_slices).transpose(1,2,0), self.field_units)
 
     def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
                    velocity_unit="m/s", sky_center=(30.,45.)):
@@ -131,7 +136,7 @@
         >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
         ...                 velocity_unit="km/s")
         """
-        if length_unit == "deg":
+        if length_unit[1] == "deg":
             center = sky_center
             types = ["RA---SIN","DEC--SIN"]
         else:
@@ -144,6 +149,9 @@
         dy = length_unit[0]/self.ny
         dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
 
+        if length_unit[1] == "deg":
+            dx *= -1.
+
         w = ap.pywcs.WCS(naxis=3)
         w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
         w.wcs.cdelt = [dx,dy,dv]
@@ -151,7 +159,7 @@
         w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
         w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
 
-        fib = FITSImageBuffer(self.cube, fields=self.field, wcs=w)
+        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
         fib[0].header["bunit"] = self.field_units
         fib[0].header["btype"] = self.field
 


https://bitbucket.org/yt_analysis/yt/commits/d12aa79d7872/
Changeset:   d12aa79d7872
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-11 20:52:12
Summary:     Make sure we don't get out of bounds issues when shifting the windows around. Ideally this would maybe pad with zeros, but I'm not sure.
Affected #:  2 files

diff -r 63c7975b6bf190645a5ef0b3788b855056b15f40 -r d12aa79d7872806e128a6e8e10e035d35c1ae78a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -248,6 +248,7 @@
                  ):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
+        self._unfolded_domain_dimensions = None
         if line_database is None:
             line_database = {}
         self.line_database = line_database
@@ -338,6 +339,7 @@
         if self.folded_axis is not None:
             self.domain_left_edge[self.folded_axis] = -self.folded_width/2.
             self.domain_right_edge[self.folded_axis] = self.folded_width/2.
+            self._unfolded_domain_dimensions = self.domain_dimensions.copy()
             self.domain_dimensions[self.folded_axis] = int(self.folded_width)
 
         if self.dimensionality == 2:

diff -r 63c7975b6bf190645a5ef0b3788b855056b15f40 -r d12aa79d7872806e128a6e8e10e035d35c1ae78a yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -56,15 +56,21 @@
                 for g in chunk.objs:
                     centering = np.array([0.5]*3)
                     if self.folded:
-                        centering[-1] = 0.0
+                        centering[-1] = self.pf.domain_left_edge[2]
                     start = (g.LeftEdge.ndarray_view()-centering).astype("int")
                     end = (g.RightEdge.ndarray_view()-centering).astype("int")
                     if self.folded:
-                        my_off = self.pf.line_database.get(fname, 0)\
-                            + self.pf.folded_width/2
+                        my_off = \
+                            self.pf.line_database.get(fname,
+                                                      self.pf.folded_width/2)\
+                            - self.pf.folded_width/2
+                        my_off = max(my_off, 0)
+                        my_off = min(my_off,
+                                     self.pf._unfolded_domain_dimensions[
+                                         self.pf.folded_axis]-1)
 
-                        start[-1] += my_off
-                        end[-1] += my_off
+                        start[-1] = start[-1] + my_off
+                        end[-1] = end[-1] + my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]


https://bitbucket.org/yt_analysis/yt/commits/c4622c39de1f/
Changeset:   c4622c39de1f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 18:37:17
Summary:     Merge
Affected #:  2 files

diff -r ed47f2c5951184bf0cf5f391aa2adab2befb1591 -r c4622c39de1ff67939818ef246687650c5765eaa yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -253,6 +253,7 @@
                  ):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
+        self._unfolded_domain_dimensions = None
         if line_database is None:
             line_database = {}
         self.line_database = line_database
@@ -343,6 +344,7 @@
         if self.folded_axis is not None:
             self.domain_left_edge[self.folded_axis] = -self.folded_width/2.
             self.domain_right_edge[self.folded_axis] = self.folded_width/2.
+            self._unfolded_domain_dimensions = self.domain_dimensions.copy()
             self.domain_dimensions[self.folded_axis] = int(self.folded_width)
 
         if self.dimensionality == 2:

diff -r ed47f2c5951184bf0cf5f391aa2adab2befb1591 -r c4622c39de1ff67939818ef246687650c5765eaa yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -56,15 +56,21 @@
                 for g in chunk.objs:
                     centering = np.array([0.5]*3)
                     if self.folded:
-                        centering[-1] = 0.0
+                        centering[-1] = self.pf.domain_left_edge[2]
                     start = (g.LeftEdge.ndarray_view()-centering).astype("int")
                     end = (g.RightEdge.ndarray_view()-centering).astype("int")
                     if self.folded:
-                        my_off = self.pf.line_database.get(fname, 0)\
-                            + self.pf.folded_width/2
+                        my_off = \
+                            self.pf.line_database.get(fname,
+                                                      self.pf.folded_width/2)\
+                            - self.pf.folded_width/2
+                        my_off = max(my_off, 0)
+                        my_off = min(my_off,
+                                     self.pf._unfolded_domain_dimensions[
+                                         self.pf.folded_axis]-1)
 
-                        start[-1] += my_off
-                        end[-1] += my_off
+                        start[-1] = start[-1] + my_off
+                        end[-1] = end[-1] + my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]


https://bitbucket.org/yt_analysis/yt/commits/13bf37546b51/
Changeset:   13bf37546b51
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 18:56:16
Summary:     Unnecessary file now
Affected #:  1 file

diff -r c4622c39de1ff67939818ef246687650c5765eaa -r 13bf37546b518d7221c2ae298864ecfadca1e3c8 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-FITS-specific miscellaneous functions
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-


https://bitbucket.org/yt_analysis/yt/commits/1894bcc0abe5/
Changeset:   1894bcc0abe5
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 19:13:50
Summary:     Implementing a "no_cgs_equiv_length" attribute for datasets that indicates that there is no verified cgs length equivalent. This is set to False by default, and only in the Athena and the FITS frontends is there logic that could possibly set it otherwise. In PlotWindow plots, if this attribute is True, the axes will be in units of "code_length" by default. It is still the case that 1 code_length = 1 cm, and if one wants they can always change the axes units if they wish (though it doesn't make sense).
Affected #:  4 files

diff -r 13bf37546b518d7221c2ae298864ecfadca1e3c8 -r 1894bcc0abe5859cebdb424e45626a9caf09812e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -187,6 +187,7 @@
             pass
         self.print_key_parameters()
 
+        self.no_cgs_equiv_length = False
         self.set_units()
         self._set_derived_attrs()
         self._setup_classes()

diff -r 13bf37546b518d7221c2ae298864ecfadca1e3c8 -r 1894bcc0abe5859cebdb424e45626a9caf09812e yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -374,6 +374,7 @@
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
             val = self.specified_parameters.get("%s_unit" % unit, None)
             if val is None:
+                self.no_cgs_equiv_length = True
                 mylog.warning("No %s conversion to cgs provided.  " +
                               "Assuming 1.0 = 1.0 %s", unit, cgs)
                 val = 1.0

diff -r 13bf37546b518d7221c2ae298864ecfadca1e3c8 -r 1894bcc0abe5859cebdb424e45626a9caf09812e yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -308,6 +308,7 @@
             length_factor = self.pixel_scale
             length_unit = str(self.new_unit)
         else:
+            self.no_cgs_equiv_length = True
             mylog.warning("No length conversion provided. Assuming 1 = 1 cm.")
             length_factor = 1.0
             length_unit = "cm"

diff -r 13bf37546b518d7221c2ae298864ecfadca1e3c8 -r 1894bcc0abe5859cebdb424e45626a9caf09812e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -204,6 +204,8 @@
     r"""
     Infers the axes unit names from the input width specification
     """
+    if pf.no_cgs_equiv_length:
+        return ("code_length",)*2
     if iterable(width):
         if isinstance(width[1], basestring):
             axes_unit = (width[1], width[1])


https://bitbucket.org/yt_analysis/yt/commits/a248869f48c7/
Changeset:   a248869f48c7
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-12 20:21:22
Summary:     Remove pixel units--we're sticking with code_length
Affected #:  2 files

diff -r 1894bcc0abe5859cebdb424e45626a9caf09812e -r a248869f48c734c4ebd870405da3137cc076ef6a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -316,7 +316,6 @@
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
-        self.unit_registry.modify("pixel", self.length_unit),
 
     def _parse_parameter_file(self):
         self.unique_identifier = \

diff -r 1894bcc0abe5859cebdb424e45626a9caf09812e -r a248869f48c734c4ebd870405da3137cc076ef6a yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -100,7 +100,6 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "beam": (1.0, dimensions.dimensionless),
-    "pixel": (1.0, dimensions.length),
 }
 
 # Add LaTeX representations for units with trivial representations.


https://bitbucket.org/yt_analysis/yt/commits/5b93dbd600b0/
Changeset:   5b93dbd600b0
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-13 05:47:29
Summary:     Bug fix
Affected #:  1 file

diff -r a248869f48c734c4ebd870405da3137cc076ef6a -r 5b93dbd600b07b26727984c2b8c3b22aa490da44 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -66,7 +66,7 @@
         >>> f_deg.writeto("temp.fits")
         """
         
-        super(pyfits.HDUList, self).__init__()
+        super(FITSImageBuffer, self).__init__()
 
         if isinstance(fields, basestring): fields = [fields]
             


https://bitbucket.org/yt_analysis/yt/commits/79fdcecab09e/
Changeset:   79fdcecab09e
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-13 05:47:49
Summary:     Writing slices and projections to FITS files
Affected #:  1 file

diff -r 5b93dbd600b07b26727984c2b8c3b22aa490da44 -r 79fdcecab09e508682164449abf60de1a7495226 yt/frontends/fits/misc.py
--- /dev/null
+++ b/yt/frontends/fits/misc.py
@@ -0,0 +1,62 @@
+"""
+FITS-specific miscellaneous functions
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import ap
+from yt.funcs import ensure_list, fix_axis
+pyfits = ap.pyfits
+pywcs = ap.pywcs
+
+axis_wcs = [[1,2,0],[0,2,1],[0,1,2]]
+
+class FITSFile(pyfits.HDUList):
+    def __init__(self, ds, data_source, fields, axis):
+        super(FITSFile, self).__init__()
+        self.ds = ds
+        self.fields = fields
+        self.axis = axis
+        ndims = ds.dimensionality
+        dims = ds.domain_dimensions
+        nx, ny = dims[axis_wcs[axis][0]], dims[axis_wcs[axis][1]]
+        self._frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
+        w = pywcs.WCS(naxis=ndims)
+        w.wcs.crpix = [self.ds.wcs.wcs.crpix[idx] for idx in axis_wcs[axis][:ndims]]
+        w.wcs.cdelt = [self.ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis][:ndims]]
+        w.wcs.crval = [self.ds.wcs.wcs.crval[idx] for idx in axis_wcs[axis][:ndims]]
+        w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis][:ndims]]
+        w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis][:ndims]]
+        im = self._frb[fields[0]].ndarray_view()
+        if ndims == 3: im = im.reshape(1,nx,ny)
+        self.append(pyfits.PrimaryHDU(im, header=w.to_header()))
+        if len(fields) > 1:
+            for field in fields[1:]:
+                im = self._frb[field].ndarray_view()
+                if ndims == 3: im = im.reshape(1,nx,ny)
+                self.append(pyfits.ImageHDU(im, header=w.to_header()))
+
+    def writeto(self, fileobj, **kwargs):
+        pyfits.HDUList(self).writeto(fileobj, **kwargs)
+
+class FITSSlice(FITSFile):
+    def __init__(self, ds, axis, fields, coord, **kwargs):
+        fields = ensure_list(fields)
+        axis = fix_axis(axis)
+        slc = ds.slice(axis, coord, **kwargs)
+        super(FITSSlice, self).__init__(ds, slc, fields, axis)
+
+class FITSProjection(FITSFile):
+    def __init__(self, ds, axis, fields, **kwargs):
+        fields = ensure_list(fields)
+        axis = fix_axis(axis)
+        prj = ds.proj(fields[0], axis, **kwargs)
+        super(FITSProjection, self).__init__(ds, prj, fields, axis)
+
+


https://bitbucket.org/yt_analysis/yt/commits/b8e830e75407/
Changeset:   b8e830e75407
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-13 15:56:00
Summary:     To avoid code duplication, this now subclasses FITSImageBuffer
Affected #:  1 file

diff -r 79fdcecab09e508682164449abf60de1a7495226 -r b8e830e75407fa59a97683f1aa226e2a4802c46e yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -12,38 +12,33 @@
 
 from .data_structures import ap
 from yt.funcs import ensure_list, fix_axis
+from yt.utilities.fits_image import FITSImageBuffer
 pyfits = ap.pyfits
 pywcs = ap.pywcs
 
 axis_wcs = [[1,2,0],[0,2,1],[0,1,2]]
 
-class FITSFile(pyfits.HDUList):
+class FITSFile(FITSImageBuffer):
     def __init__(self, ds, data_source, fields, axis):
-        super(FITSFile, self).__init__()
         self.ds = ds
         self.fields = fields
         self.axis = axis
         ndims = ds.dimensionality
         dims = ds.domain_dimensions
         nx, ny = dims[axis_wcs[axis][0]], dims[axis_wcs[axis][1]]
-        self._frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
+        frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
         w = pywcs.WCS(naxis=ndims)
         w.wcs.crpix = [self.ds.wcs.wcs.crpix[idx] for idx in axis_wcs[axis][:ndims]]
         w.wcs.cdelt = [self.ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis][:ndims]]
         w.wcs.crval = [self.ds.wcs.wcs.crval[idx] for idx in axis_wcs[axis][:ndims]]
         w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis][:ndims]]
         w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis][:ndims]]
-        im = self._frb[fields[0]].ndarray_view()
-        if ndims == 3: im = im.reshape(1,nx,ny)
-        self.append(pyfits.PrimaryHDU(im, header=w.to_header()))
-        if len(fields) > 1:
-            for field in fields[1:]:
-                im = self._frb[field].ndarray_view()
-                if ndims == 3: im = im.reshape(1,nx,ny)
-                self.append(pyfits.ImageHDU(im, header=w.to_header()))
-
-    def writeto(self, fileobj, **kwargs):
-        pyfits.HDUList(self).writeto(fileobj, **kwargs)
+        buffer = {}
+        for field in fields:
+            im = frb[field]
+            if ndims == 3: im = im.reshape(1,nx,ny)
+            buffer[field] = im
+        super(FITSFile, self).__init__(buffer, fields=fields, wcs=w)
 
 class FITSSlice(FITSFile):
     def __init__(self, ds, axis, fields, coord, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/7abb5b4d28a1/
Changeset:   7abb5b4d28a1
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-13 17:21:59
Summary:     Some docstrings
Affected #:  1 file

diff -r b8e830e75407fa59a97683f1aa226e2a4802c46e -r 7abb5b4d28a1c2cbdc1dd8c908e7673a9b2e24c2 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -41,6 +41,21 @@
         super(FITSFile, self).__init__(buffer, fields=fields, wcs=w)
 
 class FITSSlice(FITSFile):
+    r"""
+    Write an on-axis slice of a FITS cube to disk.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The FITS dataset object.
+    axis : character or integer
+        The axis of the slice. One of "x","y","z", or
+        0,1,2.
+    fields : string or list of strings
+        The fields to slice
+    coord : float
+        The coordinate in pixel units (code length) of the slice.
+    """
     def __init__(self, ds, axis, fields, coord, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis)
@@ -48,6 +63,19 @@
         super(FITSSlice, self).__init__(ds, slc, fields, axis)
 
 class FITSProjection(FITSFile):
+    r"""
+    Write an on-axis projection of a FITS cube to disk.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The FITS dataset object.
+    axis : character or integer
+        The axis along which to project. One of "x","y","z", or
+        0,1,2.
+    fields : string or list of strings
+        The fields to project
+    """
     def __init__(self, ds, axis, fields, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis)


https://bitbucket.org/yt_analysis/yt/commits/5fd4458c1efa/
Changeset:   5fd4458c1efa
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-13 18:04:16
Summary:     Fixing .hgtags newline
Affected #:  1 file

diff -r 7abb5b4d28a1c2cbdc1dd8c908e7673a9b2e24c2 -r 5fd4458c1efa89815bd2b174409a721b0a286254 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5159,4 +5159,4 @@
 a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
-079e456c38a87676472a458210077e2be325dc85 last_gplv3
\ No newline at end of file
+079e456c38a87676472a458210077e2be325dc85 last_gplv3


https://bitbucket.org/yt_analysis/yt/commits/9f3e3bf21b95/
Changeset:   9f3e3bf21b95
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-14 04:55:42
Summary:     Grab scaling information from FITS files. This will almost never be used, but we should check for it.
Affected #:  2 files

diff -r 5fd4458c1efa89815bd2b174409a721b0a286254 -r 9f3e3bf21b95b4f9e5c3c3d45f46a9324b0a6247 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -132,6 +132,7 @@
         self._axis_map = {}
         self._file_map = {}
         self._ext_map = {}
+        self._scale_map {}
         # We create a field from each slice on the 4th axis
         if self.parameter_file.naxis == 4:
             naxis4 = self.parameter_file.primary_header["naxis4"]
@@ -157,6 +158,11 @@
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j
+                        self._scale_map[fname] = [0.0,1.0]
+                        if "bzero" in h.header:
+                            self._scale_map[fname][0] = h.header["bzero"]
+                        if "bscale" in h.header:
+                            self._scale_map[fname][1] = h.header["bscale"]
                         self.field_list.append((self.dataset_type, fname))
                         mylog.info("Adding field %s to the list of fields." % (fname))
                         self._determine_image_units(fname, h.header)
@@ -303,7 +309,6 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        from yt.units.dimensions import length
         if self.new_unit is not None:
             length_factor = self.pixel_scale
             length_unit = str(self.new_unit)

diff -r 5fd4458c1efa89815bd2b174409a721b0a286254 -r 9f3e3bf21b95b4f9e5c3c3d45f46a9324b0a6247 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -50,6 +50,7 @@
                 fname = self.pf.field_list[0][1]
             f = self.pf.index._file_map[fname]
             ds = f[self.pf.index._ext_map[fname]]
+            bzero, bscale = self._scale_map[fname]
             fname = tmp_fname
             ind = 0
             for chunk in chunks:
@@ -83,5 +84,6 @@
                     else:
                         data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     if self.pf.mask_nans: data[np.isnan(data)] = 0.0
+                    data = bzero + bscale*data
                     ind += g.select(selector, data.astype("float64"), rv[field], ind)
         return rv


https://bitbucket.org/yt_analysis/yt/commits/72c9d17ec11f/
Changeset:   72c9d17ec11f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-14 14:11:56
Summary:     Oops
Affected #:  1 file

diff -r 9f3e3bf21b95b4f9e5c3c3d45f46a9324b0a6247 -r 72c9d17ec11f7ee9a1f02d04b0eada2e1b121c3d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -132,7 +132,7 @@
         self._axis_map = {}
         self._file_map = {}
         self._ext_map = {}
-        self._scale_map {}
+        self._scale_map = {}
         # We create a field from each slice on the 4th axis
         if self.parameter_file.naxis == 4:
             naxis4 = self.parameter_file.primary_header["naxis4"]


https://bitbucket.org/yt_analysis/yt/commits/ba22d466f3a3/
Changeset:   ba22d466f3a3
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-14 20:49:16
Summary:     Refactored compose_array. All tests pass. Included the ignore_blank parameter in the call to pyfits.open for optimization.
Affected #:  4 files

diff -r 72c9d17ec11f7ee9a1f02d04b0eada2e1b121c3d -r ba22d466f3a38f82df2ece5353a3a1664065ec0d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -33,8 +33,8 @@
     io_registry
 from .fields import FITSFieldInfo
 from yt.utilities.decompose import \
-    decompose_array, get_psize, decompose_array_nocopy
-from yt.units.unit_lookup_table import default_unit_symbol_lut, prefixable_units
+    decompose_array, get_psize
+from yt.units.unit_lookup_table import default_unit_symbol_lut
 
 class astropy_imports:
     _pyfits = None
@@ -191,7 +191,7 @@
             bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
                                                        pf.domain_right_edge)])
             psize = get_psize(np.array(pf.domain_dimensions), pf.nprocs)
-            gle, gre, shapes = decompose_array_nocopy(pf.domain_dimensions, psize, bbox)
+            gle, gre, shapes, slices = decompose_array(pf.domain_dimensions, psize, bbox)
             self.grid_left_edge = self.pf.arr(gle, "code_length")
             self.grid_right_edge = self.pf.arr(gre, "code_length")
             self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
@@ -273,7 +273,8 @@
         self.nprocs = nprocs
         self._handle = ap.pyfits.open(self.filenames[0],
                                       memmap=True,
-                                      do_not_scale_image_data=True)
+                                      do_not_scale_image_data=True,
+                                      ignore_blank=True)
         self._fits_files = [self._handle]
         if self.num_files > 1:
             for fits_file in slave_files:

diff -r 72c9d17ec11f7ee9a1f02d04b0eada2e1b121c3d -r ba22d466f3a38f82df2ece5353a3a1664065ec0d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -614,10 +614,11 @@
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
-            grid_left_edges, grid_right_edges, temp[key] = \
-                             decompose_array(data[key], psize, bbox)
-            grid_dimensions = np.array([grid.shape for grid in temp[key]],
+            grid_left_edges, grid_right_edges, shapes, slices = \
+                             decompose_array(data[key].shape, psize, bbox)
+            grid_dimensions = np.array([shape for shape in shapes],
                                        dtype="int32")
+            temp[key] = [data[key][slice] for slice in slices]
         for gid in range(nprocs):
             new_data[gid] = {}
             for key in temp.keys():

diff -r 72c9d17ec11f7ee9a1f02d04b0eada2e1b121c3d -r ba22d466f3a38f82df2ece5353a3a1664065ec0d yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -30,29 +30,7 @@
     if max_prime > 1:
         yield max_prime
 
-
-def decompose_array(arr, psize, bbox):
-    """ Calculate list of product(psize) subarrays of arr, along with their
-        left and right edges
-    """
-    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
-    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
-    n_d = arr.shape
-    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
-    dist = np.mgrid[bbox[0, 0]:bbox[0, 1]:d_s[0],
-                    bbox[1, 0]:bbox[1, 1]:d_s[1],
-                    bbox[2, 0]:bbox[2, 1]:d_s[2]]
-    for i in range(3):
-        xyz = split_array(dist[i], psize)
-        for j in range(np.product(psize)):
-            grid_left_edges[j, i] = xyz[j][0, 0, 0]
-            grid_right_edges[j, i] = xyz[j][-1, -1, -1] + d_s[i]
-        del xyz
-    del dist
-    patches = split_array(arr, psize)
-    return grid_left_edges, grid_right_edges, patches
-
-def decompose_array_nocopy(shape, psize, bbox):
+def decompose_array(shape, psize, bbox):
     """ Calculate list of product(psize) subarrays of arr, along with their
         left and right edges
     """
@@ -60,10 +38,9 @@
     grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
     n_d = shape
     d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
-    grid_left_edges, grid_right_edges = \
-            split_array_alt(bbox[:, 0], bbox[:, 1], shape, psize)
-    shapes = split_array_nocopy(shape, psize)
-    return grid_left_edges, grid_right_edges, shapes
+    grid_left_edges, grid_right_edges, shapes, slices = \
+            split_array(bbox[:, 0], bbox[:, 1], shape, psize)
+    return grid_left_edges, grid_right_edges, shapes, slices
 
 
 def evaluate_domain_decomposition(n_d, pieces, ldom):
@@ -140,27 +117,14 @@
 
     return p_size
 
-
-def split_array(tab, psize):
-    """ Split array into px*py*pz subarrays. """
-    n_d = np.array(tab.shape, dtype=np.int64)
-    slices = []
-    for i in range(psize[0]):
-        for j in range(psize[1]):
-            for k in range(psize[2]):
-                piece = np.array((i, j, k), dtype=np.int64)
-                lei = n_d * piece / psize
-                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
-                slices.append(np.s_[lei[0]:rei[0], lei[1]:
-                                    rei[1], lei[2]:rei[2]])
-    return [tab[slc] for slc in slices]
-
-def split_array_alt(gle, gre, shape, psize):
+def split_array(gle, gre, shape, psize):
     """ Split array into px*py*pz subarrays. """
     n_d = np.array(shape, dtype=np.int64)
     dds = (gre-gle)/shape
     left_edges = []
     right_edges = []
+    shapes = []
+    slices = []
     for i in range(psize[0]):
         for j in range(psize[1]):
             for k in range(psize[2]):
@@ -171,19 +135,8 @@
                 lre = gle + rei*dds
                 left_edges.append(lle)
                 right_edges.append(lre)
+                shapes.append(rei-lei)
+                slices.append(np.s_[lei[0]:rei[0], lei[1]:
+                                    rei[1], lei[2]:rei[2]])
 
-    return left_edges, right_edges 
-
-
-def split_array_nocopy(shape, psize):
-    """ Split array into px*py*pz subarrays. """
-    n_d = np.array(shape, dtype=np.int64)
-    shapes = []
-    for i in range(psize[0]):
-        for j in range(psize[1]):
-            for k in range(psize[2]):
-                piece = np.array((i, j, k), dtype=np.int64)
-                lei = n_d * piece / psize
-                rei = n_d * (piece + np.ones(3, dtype=np.int64)) / psize
-                shapes.append(rei-lei)
-    return shapes 
+    return left_edges, right_edges, shapes, slices

diff -r 72c9d17ec11f7ee9a1f02d04b0eada2e1b121c3d -r ba22d466f3a38f82df2ece5353a3a1664065ec0d yt/utilities/tests/test_decompose.py
--- a/yt/utilities/tests/test_decompose.py
+++ b/yt/utilities/tests/test_decompose.py
@@ -39,8 +39,10 @@
 def test_decomposition_2d():
     array = np.ones((7, 5, 1))
     bbox = np.array([[-0.7, 0.0], [1.5, 2.0], [0.0, 0.7]])
-    ledge, redge, data = dec.decompose_array(array, np.array([2, 3, 1]), bbox)
+    ledge, redge, shapes, slices = dec.decompose_array(array.shape,
+                                                       np.array([2, 3, 1]), bbox)
 
+    data = [array[slice] for slice in slices]
     assert_array_equal(data[1].shape, np.array([3, 2, 1]))
 
     gold_le = np.array([
@@ -62,7 +64,10 @@
     array = np.ones((33, 35, 37))
     bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
 
-    ledge, redge, data = dec.decompose_array(array, np.array([3, 2, 2]), bbox)
+    ledge, redge, shapes, slices = dec.decompose_array(array.shape,
+                                                       np.array([3, 2, 2]), bbox)
+    data = [array[slice] for slice in slices]
+
     assert_array_equal(data[0].shape, np.array([11, 17, 18]))
 
     gold_le = np.array(


https://bitbucket.org/yt_analysis/yt/commits/d05200032ecb/
Changeset:   d05200032ecb
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-14 22:10:16
Summary:     Turns out this was never used
Affected #:  1 file

diff -r ba22d466f3a38f82df2ece5353a3a1664065ec0d -r d05200032ecbb7e88c7e686520250916bc96c7ea yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -14,7 +14,7 @@
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.CICDeposit import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    communication_system, parallel_root_only
+    parallel_root_only
 from yt.funcs import *
 from yt.units.yt_array import array_like_field
 from yt.config import ytcfg
@@ -23,8 +23,6 @@
 import numpy as np
 import h5py
 
-comm = communication_system.communicators[-1]
-
 class ParticleTrajectories(object):
     r"""A collection of particle trajectories in time over a series of
     parameter files. 


https://bitbucket.org/yt_analysis/yt/commits/af0ed629b326/
Changeset:   af0ed629b326
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-14 22:48:47
Summary:     Modifying code as per suggestions
Affected #:  2 files

diff -r d05200032ecbb7e88c7e686520250916bc96c7ea -r af0ed629b326e045fe2c3e1ba18fe9d3296d6ac3 yt/analysis_modules/xyv_cube/xyv_cube.py
--- a/yt/analysis_modules/xyv_cube/xyv_cube.py
+++ b/yt/analysis_modules/xyv_cube/xyv_cube.py
@@ -83,10 +83,10 @@
 
         fd = dd._determine_fields(field)[0]
 
-        self.field_units = ds.field_info[fd].units
+        self.field_units = ds._get_field_info(fd).units
 
         if velocity_bounds is None:
-            vmin, vmax = dd.quantities["Extrema"]("velocity_magnitude")
+            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
             self.v_bnd = -vmax, vmax
         else:
             self.v_bnd = (ds.arr(velocity_bounds[0], velocity_bounds[2]),

diff -r d05200032ecbb7e88c7e686520250916bc96c7ea -r af0ed629b326e045fe2c3e1ba18fe9d3296d6ac3 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -81,7 +81,7 @@
 
 delimiters = ["*", "/", "-", "^"]
 delimiters += [str(i) for i in xrange(10)]
-regex_pattern = '|'.join(map(re.escape, delimiters))
+regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
 class FITSGrid(AMRGridPatch):
     _id_offset = 0
@@ -125,7 +125,7 @@
                     field_units = field_units.replace(unit, known_units[unit])
             self.parameter_file.field_units[fname] = field_units
         except:
-            pass
+            self.parameter_file.field_units[fname] = "dimensionless"
 
     def _detect_output_fields(self):
         self.field_list = []


https://bitbucket.org/yt_analysis/yt/commits/67402286e23f/
Changeset:   67402286e23f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-15 05:30:47
Summary:     Moved FITSSlice and FITSProjection to yt.utilties.fits_image, and made it possible to make them from any dataset.
Affected #:  2 files

diff -r af0ed629b326e045fe2c3e1ba18fe9d3296d6ac3 -r 67402286e23f65493338974440376c04fe2f5351 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""
-FITS-specific miscellaneous functions
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import ap
-from yt.funcs import ensure_list, fix_axis
-from yt.utilities.fits_image import FITSImageBuffer
-pyfits = ap.pyfits
-pywcs = ap.pywcs
-
-axis_wcs = [[1,2,0],[0,2,1],[0,1,2]]
-
-class FITSFile(FITSImageBuffer):
-    def __init__(self, ds, data_source, fields, axis):
-        self.ds = ds
-        self.fields = fields
-        self.axis = axis
-        ndims = ds.dimensionality
-        dims = ds.domain_dimensions
-        nx, ny = dims[axis_wcs[axis][0]], dims[axis_wcs[axis][1]]
-        frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
-        w = pywcs.WCS(naxis=ndims)
-        w.wcs.crpix = [self.ds.wcs.wcs.crpix[idx] for idx in axis_wcs[axis][:ndims]]
-        w.wcs.cdelt = [self.ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis][:ndims]]
-        w.wcs.crval = [self.ds.wcs.wcs.crval[idx] for idx in axis_wcs[axis][:ndims]]
-        w.wcs.cunit = [str(self.ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis][:ndims]]
-        w.wcs.ctype = [self.ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis][:ndims]]
-        buffer = {}
-        for field in fields:
-            im = frb[field]
-            if ndims == 3: im = im.reshape(1,nx,ny)
-            buffer[field] = im
-        super(FITSFile, self).__init__(buffer, fields=fields, wcs=w)
-
-class FITSSlice(FITSFile):
-    r"""
-    Write an on-axis slice of a FITS cube to disk.
-
-    Parameters
-    ----------
-    ds : FITSDataset
-        The FITS dataset object.
-    axis : character or integer
-        The axis of the slice. One of "x","y","z", or
-        0,1,2.
-    fields : string or list of strings
-        The fields to slice
-    coord : float
-        The coordinate in pixel units (code length) of the slice.
-    """
-    def __init__(self, ds, axis, fields, coord, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        slc = ds.slice(axis, coord, **kwargs)
-        super(FITSSlice, self).__init__(ds, slc, fields, axis)
-
-class FITSProjection(FITSFile):
-    r"""
-    Write an on-axis projection of a FITS cube to disk.
-
-    Parameters
-    ----------
-    ds : FITSDataset
-        The FITS dataset object.
-    axis : character or integer
-        The axis along which to project. One of "x","y","z", or
-        0,1,2.
-    fields : string or list of strings
-        The fields to project
-    """
-    def __init__(self, ds, axis, fields, **kwargs):
-        fields = ensure_list(fields)
-        axis = fix_axis(axis)
-        prj = ds.proj(fields[0], axis, **kwargs)
-        super(FITSProjection, self).__init__(ds, prj, fields, axis)
-
-

diff -r af0ed629b326e045fe2c3e1ba18fe9d3296d6ac3 -r 67402286e23f65493338974440376c04fe2f5351 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -11,7 +11,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.funcs import mylog, iterable
+from yt.funcs import mylog, iterable, fix_axis, ensure_list
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
 from yt.data_objects.construction_data_containers import YTCoveringGridBase
 from yt.frontends.fits.data_structures import ap
@@ -24,11 +24,13 @@
                  center=None, scale=None, wcs=None):
         r""" Initialize a FITSImageBuffer object.
 
-        FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
-        WCS information. It inherits from HDUList, so operations such as `writeto` are
-        enabled. Images can be constructed from ImageArrays, NumPy arrays, dicts of such
-        arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
-        two are the most powerful because WCS information can be constructed from their coordinates.
+        FITSImageBuffer contains a list of FITS ImageHDU instances, and
+        optionally includes WCS information. It inherits from HDUList, so
+        operations such as `writeto` are enabled. Images can be constructed
+        from ImageArrays, NumPy arrays, dicts of such arrays,
+        FixedResolutionBuffers, and YTCoveringGrids. The latter two are the
+        most powerful because WCS information can be constructed from their
+        coordinates.
 
         Parameters
         ----------
@@ -36,9 +38,9 @@
             ImageArray, an numpy.ndarray, or dict of such arrays
             The data to be made into a FITS image or images.
         fields : single string or list of strings, optional
-            The field names for the data. If *fields* is none and *data* has keys,
-            it will use these for the fields. If *data* is just a single array one field name
-            must be specified.
+            The field names for the data. If *fields* is none and *data* has
+            keys, it will use these for the fields. If *data* is just a
+            single array one field name must be specified.
         units : string
             The units of the WCS coordinates, default "cm". 
         center : array_like, optional
@@ -233,13 +235,90 @@
 
     def to_aplpy(self, **kwargs):
         """
-        Use APLpy (http://aplpy.github.io) for plotting. Returns an `aplpy.FITSFigure`
-        instance. All keyword arguments are passed to the
+        Use APLpy (http://aplpy.github.io) for plotting. Returns an
+        `aplpy.FITSFigure` instance. All keyword arguments are passed to the
         `aplpy.FITSFigure` constructor.
         """
         import aplpy
         return aplpy.FITSFigure(self, **kwargs)
 
+axis_wcs = [[1,2],[0,2],[0,1]]
+
+def construct_image(data_source):
+    ds = data_source.pf
+    axis = data_source.axis
+    if hasattr(ds, "wcs"):
+        # This is a FITS dataset
+        nx, ny = ds.domain_dimensions[axis_wcs[axis]]
+        crpix = [ds.wcs.wcs.crpix[idx] for idx in axis_wcs[axis]]
+        cdelt = [ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis]]
+        crval = [ds.wcs.wcs.crval[idx] for idx in axis_wcs[axis]]
+        cunit = [str(ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
+        ctype = [ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
+    else:
+        # This is some other kind of dataset
+        unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
+        dx = ds.index.get_smallest_dx()
+        nx, ny = (ds.domain_width[axis_wcs[axis]]/dx).ndarray_view().astype("int")
+        crpix = [0.5*(nx+1), 0.5*(ny+1)]
+        cdelt = [dx.in_units(unit)]*2
+        crval = [ds.domain_center[idx].in_units(unit) for idx in axis_wcs[axis]]
+        cunit = [unit]*2
+        ctype = ["LINEAR"]*2
+    frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
+    w = pywcs.WCS(naxis=2)
+    w.wcs.crpix = crpix
+    w.wcs.cdelt = cdelt
+    w.wcs.crval = crval
+    w.wcs.cunit = cunit
+    w.wcs.ctype = ctype
+    return w, frb
+
+class FITSSlice(FITSImageBuffer):
+    r"""
+    Generate a FITSImageBuffer of an on-axis slice.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The FITS dataset object.
+    axis : character or integer
+        The axis of the slice. One of "x","y","z", or 0,1,2.
+    fields : string or list of strings
+        The fields to slice
+    coord : float
+        The coordinate in pixel units (code length) of the slice along *axis*.
+    """
+    def __init__(self, ds, axis, fields, coord, **kwargs):
+        fields = ensure_list(fields)
+        axis = fix_axis(axis)
+        slc = ds.slice(axis, coord, **kwargs)
+        w, frb = construct_image(slc)
+        super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
+
+class FITSProjection(FITSImageBuffer):
+    r"""
+    Generate a FITSImageBuffer of an on-axis projection.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The FITS dataset object.
+    axis : character or integer
+        The axis along which to project. One of "x","y","z", or 0,1,2.
+    fields : string or list of strings
+        The fields to project
+    weight_field : string
+        The field used to weight the projection.
+    """
+    def __init__(self, ds, axis, fields, weight_field=None, **kwargs):
+        fields = ensure_list(fields)
+        axis = fix_axis(axis)
+        prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
+        w, frb = construct_image(prj)
+        super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
+
+
 
         
 


https://bitbucket.org/yt_analysis/yt/commits/f7e49d290fdf/
Changeset:   f7e49d290fdf
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-15 07:13:28
Summary:     Merge
Affected #:  46 files

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -86,12 +86,12 @@
     data["velocity_y"] = (np.zeros(ddims), "cm/s")
     data["velocity_z"] = (velz, "cm/s")
 
-    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    L = 2 * R * cm_per_kpc
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L
 
-    L = 2*R*cm_per_kpc
     dl = L/nz
 
-    pf = load_uniform_grid(data, ddims, length_unit=L, bbox=bbox)
+    pf = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
     pf.h
 
     return pf

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -336,7 +336,8 @@
                                   input_units=input_units,
                                   registry=self.pf.unit_registry)
             if self.weight_field is None:
-                if Unit(units).is_code_unit and input_units != units:
+                u_obj = Unit(units, registry=self.pf.unit_registry)
+                if u_obj.is_code_unit and input_units != units:
                     if units is '':
                         final_unit = "code_length"
                     else:
@@ -427,11 +428,11 @@
         self.left_edge = self.pf.arr(left_edge, 'code_length')
         self.level = level
 
+        self.ActiveDimensions = np.array(dims, dtype='int32')
         rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
         rdx[np.where(np.array(dims) - 2 * num_ghost_zones <= 1)] = 1   # issue 602
         self.base_dds = self.pf.domain_width / self.pf.domain_dimensions
         self.dds = self.pf.domain_width / rdx.astype("float64")
-        self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones
         self._use_pbar = use_pbar

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -27,6 +27,7 @@
     new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
+from yt.utilities.exceptions import YTEmptyProfileData
 
 def preserve_source_parameters(func):
     def save_state(*args, **kwargs):
@@ -782,11 +783,13 @@
            The name of the new unit.
         """
         if field in self.field_units:
-            self.field_units[field] = Unit(new_unit)
+            self.field_units[field] = \
+                Unit(new_unit, registry=self.pf.unit_registry)
         else:
             fd = self.field_map[field]
             if fd in self.field_units:
-                self.field_units[fd] = Unit(new_unit)
+                self.field_units[fd] = \
+                    Unit(new_unit, registry=self.pf.unit_registry)
             else:
                 raise KeyError("%s not in profile!" % (field))
 

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -458,12 +458,12 @@
         """
         if iterable(width):
             assert_valid_width_tuple(width)
-            width = YTQuantity(width[0], width[1])
+            width = self.pf.quan(width[0], width[1])
         if height is None:
             height = width
         elif iterable(height):
             assert_valid_width_tuple(height)
-            height = YTQuantity(height[0], height[1])
+            height = self.pf.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -112,6 +112,7 @@
     _index_class = None
     field_units = None
     derived_field_list = requires_index("derived_field_list")
+    _instantiated = False
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -147,8 +148,7 @@
         """
         # We return early and do NOT initialize a second time if this file has
         # already been initialized.
-        if self.known_filters is not None:
-            return
+        if self._instantiated: return
         self.dataset_type = dataset_type
         self.file_style = file_style
         self.conversion_factors = {}
@@ -173,9 +173,11 @@
         self._instantiated = time.time()
 
         self.min_level = 0
+        self.no_cgs_equiv_length = False
 
         self._create_unit_registry()
         self._parse_parameter_file()
+        self.set_units()
         self._setup_coordinate_handler()
 
         # Because we need an instantiated class to check the pf's existence in
@@ -187,8 +189,6 @@
             pass
         self.print_key_parameters()
 
-        self.no_cgs_equiv_length = False
-        self.set_units()
         self._set_derived_attrs()
         self._setup_classes()
 
@@ -201,10 +201,6 @@
             self.domain_width = self.domain_right_edge - self.domain_left_edge
         if not isinstance(self.current_time, YTQuantity):
             self.current_time = self.quan(self.current_time, "code_time")
-        # need to do this if current_time was set before units were set
-        elif self.current_time.units.registry.lut["code_time"] != \
-          self.unit_registry.lut["code_time"]:
-            self.current_time.units.registry = self.unit_registry
         for attr in ("center", "width", "left_edge", "right_edge"):
             n = "domain_%s" % attr
             v = getattr(self, n)
@@ -569,12 +565,12 @@
     def _create_unit_registry(self):
         self.unit_registry = UnitRegistry()
         import yt.units.dimensions as dimensions
-        self.unit_registry.lut["code_length"] = (1.0, dimensions.length)
-        self.unit_registry.lut["code_mass"] = (1.0, dimensions.mass)
-        self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
-        self.unit_registry.lut["code_magnetic"] = (1.0, dimensions.magnetic_field)
-        self.unit_registry.lut["code_temperature"] = (1.0, dimensions.temperature)
-        self.unit_registry.lut["code_velocity"] = (1.0, dimensions.velocity)
+        self.unit_registry.add("code_length", 1.0, dimensions.length)
+        self.unit_registry.add("code_mass", 1.0, dimensions.mass)
+        self.unit_registry.add("code_time", 1.0, dimensions.time)
+        self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
+        self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
+        self.unit_registry.add("code_velocity", 1.0, dimensions.velocity)
 
     def set_units(self):
         """
@@ -633,7 +629,7 @@
             DW = np.zeros(3)
         else:
             DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.modify("unitary", DW.max())
+        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
 
     _arr = None
     @property

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -56,7 +56,8 @@
                             proj_field
                     field_unit = Unit(fi.units)
                     if wf is not None:
-                        yield assert_equal, frb[proj_field].units, Unit(field_unit)
+                        yield assert_equal, frb[proj_field].units, \
+                            Unit(field_unit, registry=pf.unit_registry)
                     else:
                         if frb[proj_field].units.is_code_unit:
                             proj_unit = "code_length"
@@ -64,7 +65,8 @@
                             proj_unit = "cm"
                         if field_unit != '' and field_unit != Unit():
                             proj_unit = "({0}) * {1}".format(field_unit, proj_unit)
-                        yield assert_equal, frb[proj_field].units, Unit(proj_unit)
+                        yield assert_equal, frb[proj_field].units, \
+                            Unit(proj_unit, registry=pf.unit_registry)
                     yield assert_equal, frb[proj_field].info['xlim'], \
                             frb.bounds[:2]
                     yield assert_equal, frb[proj_field].info['ylim'], \

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -181,12 +181,12 @@
     def get_field_parameter(self, param, default = None):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
-            return YTArray(np.random.random(3) * 1e-2, self.fp_units[param])
+            return self.pf.arr(np.random.random(3) * 1e-2, self.fp_units[param])
         elif param in ['axis']:
             return 0
         elif param.startswith("cp_"):
             ax = param[3]
-            rv = YTArray((0.0, 0.0, 0.0), self.fp_units[param])
+            rv = self.pf.arr((0.0, 0.0, 0.0), self.fp_units[param])
             rv['xyz'.index(ax)] = 1.0
             return rv
         elif param.endswith("_hat"):
@@ -205,7 +205,7 @@
     id = 1
 
     def apply_units(self, arr, units):
-        return YTArray(arr, input_units = units)
+        return self.pf.arr(arr, input_units = units)
 
     def has_field_parameter(self, param):
         return True
@@ -219,7 +219,7 @@
             fc.shape = (self.nd*self.nd*self.nd, 3)
         else:
             fc = fc.transpose()
-        return YTArray(fc, input_units = "code_length")
+        return self.pf.arr(fc, input_units = "code_length")
 
     @property
     def icoords(self):
@@ -244,5 +244,5 @@
         fw = np.ones((self.nd**3, 3), dtype="float64") / self.nd
         if not self.flat:
             fw.shape = (self.nd, self.nd, self.nd, 3)
-        return YTArray(fw, input_units = "code_length")
+        return self.pf.arr(fw, input_units = "code_length")
 

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -61,6 +61,7 @@
         self.field_list = field_list
         self.slice_info = slice_info
         self.field_aliases = {}
+        self.species_names = []
         self.setup_fluid_aliases()
 
     def setup_fluid_fields(self):
@@ -161,6 +162,8 @@
         :class:`~yt.data_objects.api.DerivedField`.
 
         """
+        override = kwargs.pop("force_override", False)
+        if not override and name in self: return
         if function is None:
             def create_function(function):
                 self[name] = DerivedField(name, function, **kwargs)

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -162,12 +162,22 @@
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
                        units="g")
+
+    def _number_density(field, data):
+        field_data = np.zeros_like(data["gas", "%s_number_density" % \
+                                        data.pf.field_info.species_names[0]])
+        for species in data.pf.field_info.species_names:
+            field_data += data["gas", "%s_number_density" % species]
+        return field_data
+    registry.add_field((ftype, "number_density"),
+                       function = _number_density,
+                       units="cm**-3")
     
     def _mean_molecular_weight(field, data):
         return (data[ftype, "density"] / (mh * data[ftype, "number_density"]))
     registry.add_field((ftype, "mean_molecular_weight"),
               function=_mean_molecular_weight,
-              units=r"")
+              units="")
 
     setup_gradient_fields(registry, (ftype, "pressure"), "dyne/cm**2",
                           slice_info)

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/fields/interpolated_fields.py
--- /dev/null
+++ b/yt/fields/interpolated_fields.py
@@ -0,0 +1,49 @@
+"""
+Fields from interpolating data tables.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.fields.local_fields import add_field
+
+from yt.utilities.linear_interpolators import \
+    UnilinearFieldInterpolator, \
+    BilinearFieldInterpolator, \
+    TrilinearFieldInterpolator
+
+_int_class = {1: UnilinearFieldInterpolator,
+              2: BilinearFieldInterpolator,
+              3: TrilinearFieldInterpolator}
+
+def add_interpolated_field(name, units, table_data, axes_data, axes_fields,
+                           ftype="gas", particle_type=False, validators=None,
+                           truncate=True):
+    
+    if len(table_data.shape) not in _int_class:
+        raise RuntimeError("Interpolated field can only be created from 1d, 2d, or 3d data.")
+
+    if len(axes_fields) != len(axes_data) or len(axes_fields) != len(table_data.shape):
+        raise RuntimeError("Data dimension mismatch: data is %d, %d axes data provided, and %d axes fields provided." %
+                           (len(table_data.shape), len(axes_data), len(axes_fields)))
+
+    int_class = _int_class[len(table_data.shape)]
+    my_interpolator = int_class(table_data, axes_data, axes_fields, truncate=truncate)
+
+    def _interpolated_field(field, data):
+        return my_interpolator(data)
+    add_field((ftype, name), 
+              function=_interpolated_field,
+              units=units,
+              validators=validators, particle_type=particle_type)
+              

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -14,15 +14,22 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import re
 
 from yt.utilities.physical_constants import \
     mh, \
     mass_sun_cgs, \
     amu_cgs
+from yt.utilities.physical_ratios import \
+    primordial_H_mass_fraction
 from yt.funcs import *
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
 
+_primordial_mass_fraction = \
+  {"H": primordial_H_mass_fraction,
+   "He" : (1 - primordial_H_mass_fraction)}
+    
 # See YTEP-0003 for details, but we want to ensure these fields are all
 # populated:
 #
@@ -50,7 +57,7 @@
     weight *= amu_cgs
     def _number_density(field, data):
         return data[ftype, "%s_density" % species] \
-             / amu_cgs
+             / weight
     return _number_density
 
 def _create_density_func(ftype, species):
@@ -102,3 +109,53 @@
                        function = _create_number_density_func(ftype, species),
                        particle_type = particle_type,
                        units = "cm**-3")
+
+def add_nuclei_density_fields(registry, ftype,
+                              particle_type = False):
+    elements = _get_all_elements(registry.species_names)
+    for element in elements:
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           function = _nuclei_density,
+                           particle_type = particle_type,
+                           units = "cm**-3")
+    if len(elements) == 0:
+        for element in ["H", "He"]:
+            registry.add_field((ftype, "%s_nuclei_density" % element),
+                               function = _default_nuclei_density,
+                               particle_type = particle_type,
+                               units = "cm**-3")
+
+def _default_nuclei_density(field, data):
+    element = field.name[1][:field.name[1].find("_")]
+    return data["gas", "density"] * _primordial_mass_fraction[element] / \
+      ChemicalFormula(element).weight / amu_cgs
+        
+def _nuclei_density(field, data):
+    element = field.name[1][:field.name[1].find("_")]
+    field_data = np.zeros_like(data["gas", "%s_number_density" % 
+                                    data.pf.field_info.species_names[0]])
+    for species in data.pf.field_info.species_names:
+        nucleus = species
+        if "_" in species:
+            nucleus = species[:species.find("_")]
+        num = _get_element_multiple(nucleus, element)
+        field_data += num * data["gas", "%s_number_density" % species]
+    return field_data
+
+def _get_all_elements(species_list):
+    elements = []
+    for species in species_list:
+        for item in re.findall('[A-Z][a-z]?|[0-9]+', species):
+            if not item.isdigit() and item not in elements \
+              and item != "El":
+                elements.append(item)
+    return elements
+    
+def _get_element_multiple(compound, element):
+    my_split = re.findall('[A-Z][a-z]?|[0-9]+', compound)
+    if element not in my_split:
+        return 0
+    loc = my_split.index(element)
+    if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
+        return 1
+    return int(my_split[loc + 1])

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -385,6 +385,9 @@
         self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                   (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
+
+    def set_code_units(self):
+        super(self, AthenaDataset).set_code_units()
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -32,9 +32,6 @@
     mpc_conversion, sec_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
-from yt.units.yt_array import \
-    YTArray, \
-    YTQuantity
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
 from yt.geometry.selection_routines import \
@@ -45,7 +42,9 @@
     cm_per_mpc
 
 from .fields import \
-    BoxlibFieldInfo
+    BoxlibFieldInfo, \
+    MaestroFieldInfo
+
 from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
@@ -457,21 +456,7 @@
             elif param == "castro.use_comoving":
                 vals = self.cosmological_simulation = int(vals)
             else:
-                # Now we guess some things about the parameter and its type
-                v = vals.split()[0] # Just in case there are multiple; we'll go
-                                    # back afterward to using vals.
-                try:
-                    float(v.upper().replace("D","E"))
-                except:
-                    pcast = str
-                else:
-                    syms = (".", "D+", "D-", "E+", "E-")
-                    if any(sym in v.upper() for sym in syms for v in vals.split()):
-                        pcast = float
-                    else:
-                        pcast = int
-                vals = [pcast(v) for v in vals.split()]
-                if len(vals) == 1: vals = vals[0]
+                vals = _guess_pcast(vals)
             self.parameters[param] = vals
 
         if getattr(self, "cosmological_simulation", 0) == 1:
@@ -600,10 +585,10 @@
             self._setup2d()
 
     def _set_code_unit_attributes(self):
-        self.length_unit = YTQuantity(1.0, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _setup1d(self):
 #        self._index_class = BoxlibHierarchy1D
@@ -759,6 +744,8 @@
 
 class MaestroDataset(BoxlibDataset):
 
+    _field_info_class = MaestroFieldInfo
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args
@@ -775,6 +762,33 @@
         if any("maestro" in line.lower() for line in lines): return True
         return False
 
+    def _parse_parameter_file(self):
+        super(MaestroDataset, self)._parse_parameter_file()
+        jobinfo_filename = os.path.join(self.output_dir, "job_info")
+        line = ""
+        with open(jobinfo_filename, "r") as f:
+            while not line.startswith(" [*] indicates overridden default"):
+                line = f.next()
+            for line in f:
+                p, v = (_.strip() for _ in line[4:].split("="))
+                if len(v) == 0:
+                    self.parameters[p] = ""
+                else:
+                    self.parameters[p] = _guess_pcast(v)
+
+        # set the periodicity based on the integer BC runtime parameters
+        periodicity = [True, True, True]
+        if not self.parameters['bcx_lo'] == -1:
+            periodicity[0] = False
+
+        if not self.parameters['bcy_lo'] == -1:
+            periodicity[1] = False
+
+        if not self.parameters['bcz_lo'] == -1:
+            periodicity[2] = False
+
+        self.periodicity = ensure_tuple(periodicity)
+
 
 class NyxHierarchy(BoxlibHierarchy):
 
@@ -866,8 +880,27 @@
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = YTQuantity(1.0, "Msun")
-        self.time_unit = YTQuantity(1.0 / 3.08568025e19, "s")
-        self.length_unit = YTQuantity(1.0 / (1 + self.current_redshift),
-                                      "mpc")
+        self.mass_unit = self.quan(1.0, "Msun")
+        self.time_unit = self.quan(1.0 / 3.08568025e19, "s")
+        self.length_unit = self.quan(1.0 / (1 + self.current_redshift), "Mpc")
         self.velocity_unit = self.length_unit / self.time_unit
+
+def _guess_pcast(vals):
+    # Now we guess some things about the parameter and its type
+    v = vals.split()[0] # Just in case there are multiple; we'll go
+                        # back afterward to using vals.
+    try:
+        float(v.upper().replace("D","E"))
+    except:
+        pcast = str
+        if v in ("F", "T"):
+            pcast = bool
+    else:
+        syms = (".", "D+", "D-", "E+", "E-")
+        if any(sym in v.upper() for sym in syms for v in vals.split()):
+            pcast = float
+        else:
+            pcast = int
+    vals = [pcast(v) for v in vals.split()]
+    if len(vals) == 1: vals = vals[0]
+    return vals

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -14,11 +14,16 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import string
 
 from yt.utilities.physical_constants import \
     mh, boltzmann_constant_cgs, amu_cgs
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from yt.fields.species_fields import \
+    add_species_field_by_fraction
+from yt.utilities.chemical_formulas import \
+    ChemicalFormula
 
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
@@ -108,3 +113,90 @@
             self.add_field(("gas", "velocity_%s" % ax),
                            function = _get_vel(ax),
                            units = "cm/s")
+
+class MaestroFieldInfo(FieldInfoContainer):
+
+    known_other_fields = (
+        ("density", ("g/cm**3", ["density"], None)),
+        ("x_vel", ("cm/s", ["velocity_x"], None)),
+        ("y_vel", ("cm/s", ["velocity_y"], None)),
+        ("z_vel", ("cm/s", ["velocity_z"], None)),
+        ("magvel", ("cm/s", ["velocity_magnitude"], None)),
+        ("tfromp", ("K", [], None)),
+        ("tfromh", ("K", [], None)),
+        ("Machnumber", ("", ["mach_number"], None)),
+        ("S", ("1/s", [], None)),
+        ("ad_excess", ("", [], "Adiabatic Excess")),
+        ("deltaT", ("", [], None)),
+        ("deltagamma", ("", [], None)),
+        ("deltap", ("", [], None)),
+        ("divw0", ("1/s", [], None)),
+        # Specific entropy
+        ("entropy", ("erg/(g*K)", ["entropy"], None)),
+        ("entropypert", ("", [], None)),
+        ("enucdot", ("ergs/(g*s)", [], None)),
+        ("gpi_x", ("dyne/cm**3", [], None)), # Perturbational pressure grad
+        ("gpi_y", ("dyne/cm**3", [], None)),
+        ("gpi_z", ("dyne/cm**3", [], None)),
+        ("h", ("erg/g", [], "Specific Enthalpy")),
+        ("h0", ("erg/g", [], "Base State Specific Enthalpy")),
+        # Momentum cannot be computed because we need to include base and
+        # full state.
+        ("momentum", ("g*cm/s", ["momentum_magnitude"], None)),
+        ("p0", ("erg/cm**3", [], "p_0")),
+        ("p0pluspi", ("erg/cm**3", [], "p_0 + \pi")),
+        ("pi", ("erg/cm**3", [], None)),
+        ("pioverp0", ("", [], "\pi/p_0")),
+        # Base state density
+        ("rho0", ("g/cm**3", [], "\\rho_0")),
+        ("rhoh", ("erg/cm**3", ["enthalpy_density"], "(\\rho h)")),
+        # Base state enthalpy density
+        ("rhoh0", ("erg/cm**3", [], "(\\rho h)_0")),
+        ("rhohpert", ("erg/cm**3", [], "(\\rho h)^\prime")),
+        ("rhopert", ("g/cm**3", [], "\\rho^\prime")),
+        ("soundspeed", ("cm/s", ["sound_speed"], None)),
+        ("sponge", ("", [], None)),
+        ("tpert", ("K", [], None)),
+        # Again, base state -- so we can't compute ourselves.
+        ("vort", ("1/s", ["vorticity_magnitude"], None)),
+        # Base state
+        ("w0_x", ("cm/s", [], None)),
+        ("w0_y", ("cm/s", [], None)),
+        ("w0_z", ("cm/s", [], None)),
+    )
+
+    def setup_fluid_fields(self):
+        # Add omegadots, units of 1/s
+        if self.pf.parameters["use_tfromp"]:
+            self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
+                       units = "K")
+        else:
+            self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
+                       units = "K")
+
+        for _, field in self.pf.field_list:
+            if field.startswith("X("):
+                # We have a fraction
+                nice_name = field[2:-1]
+                self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
+                           units = "")
+                def _create_density_func(field_name):
+                    def _func(field, data):
+                        return data[field_name] * data["gas", "density"]
+                    return _func
+                func = _create_density_func(("gas", "%s_fraction" % nice_name))
+                self.add_field(name = ("gas", "%s_density" % nice_name),
+                               function = func,
+                               units = "g/cm**3")
+                # We know this will either have one letter, or two.
+                if field[3] in string.letters:
+                    element, weight = field[2:4], field[4:-1]
+                else:
+                    element, weight = field[2:3], field[3:-1]
+                weight = int(weight)
+                # Here we can, later, add number density.
+            if field.startswith("omegadot("):
+                nice_name = field[9:-1]
+                self.add_output_field(("boxlib", field), units = "1/s")
+                self.alias(("gas", "%s_creation_rate" % nice_name),
+                           ("boxlib", field), units = "1/s")

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -43,9 +43,6 @@
      mpc_conversion, sec_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_root_only
-from yt.units.yt_array import \
-    YTArray, \
-    YTQuantity
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
 from yt.utilities.io_handler import \
@@ -70,7 +67,7 @@
         level.
 
         """
-        if self.start_index != None:
+        if self.start_index is not None:
             return self.start_index
         if self.Parent == []:
             iLE = self.LeftEdge - self.pf.domain_left_edge
@@ -84,8 +81,7 @@
 
     def _setup_dx(self):
         # has already been read in and stored in index
-        self.dds = self.index.dds_list[self.Level]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.dds = self.pf.arr(self.index.dds_list[self.Level], "code_length")
 
 class ChomboHierarchy(GridIndex):
 
@@ -225,10 +221,10 @@
         self._handle.close()
 
     def _set_code_unit_attributes(self):
-        self.length_unit = YTQuantity(1.0, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _localize(self, f, default):
         if f is None:

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -25,7 +25,7 @@
     _temperature
 
 rho_units = "code_mass / code_length**3"
-mom_units = "code_mass * code_length / code_time"
+mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
 # We duplicate everything here from Boxlib, because we want to be able to
@@ -69,7 +69,8 @@
     def setup_fluid_fields(self):
         def _get_vel(axis):
             def velocity(field, data):
-                return data["%smom" % ax]/data["density"]
+                return data["momentum_%s" % ax]/data["density"]
+            return velocity
         for ax in 'xyz':
             self.add_field("velocity_%s" % ax, function = _get_vel(ax),
                            units = "cm/s")

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -21,7 +21,7 @@
     data_dir_load
 from yt.frontends.chombo.api import ChomboDataset
 
-_fields = ("density", "velocity_magnitude", "velocity_divergence",
+_fields = ("density", "velocity_magnitude", #"velocity_divergence",
            "magnetic_field_x")
 
 gc = "GaussianCloud/data.0077.3d.hdf5"

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -868,7 +868,7 @@
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_velocity", self.velocity_unit)
         DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.modify("unitary", DW.max())
+        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
 
     def cosmology_get_units(self):
         """

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -21,6 +21,7 @@
 from yt.units.yt_array import \
     YTArray
 from yt.fields.species_fields import \
+    add_nuclei_density_fields, \
     add_species_field_by_density
 from yt.utilities.physical_constants import \
     mh, me, mp, \
@@ -42,7 +43,7 @@
     'HM'      : 'H_m1',
     'DI'      : 'D',
     'DII'     : 'D_p1',
-    'HD'      : 'HD',
+    'HDI'     : 'HD',
     'Electron': 'El'
 }
 
@@ -119,8 +120,10 @@
                            take_log=True,
                            units="code_mass/code_length**3")
         yt_name = known_species_names[species]
-        self.alias(("gas", "%s_density" % yt_name),
-                   ("enzo", "%s_Density" % species))
+        # don't alias electron density since mass is wrong
+        if species != "Electron":
+            self.alias(("gas", "%s_density" % yt_name),
+                       ("enzo", "%s_Density" % species))
         add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
@@ -135,7 +138,8 @@
                        units = "g/cm**3")
         for sp in species_names:
             self.add_species_field(sp)
-
+            self.species_names.append(known_species_names[sp])
+        add_nuclei_density_fields(self, "gas")
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -34,7 +34,6 @@
     io_registry
 from yt.utilities.physical_constants import cm_per_mpc
 from .fields import FLASHFieldInfo
-from yt.units.yt_array import YTQuantity
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -225,20 +224,19 @@
         else:
             length_factor = 1.0
             temperature_factor = 1.0
-        self.magnetic_unit = YTQuantity(b_factor, "gauss")
-        self.length_unit = YTQuantity(length_factor, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
-        self.temperature_unit = YTQuantity(temperature_factor, "K")
+        self.magnetic_unit = self.quan(b_factor, "gauss")
+        self.length_unit = self.quan(length_factor, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.quan(1.0, "cm/s")
+        self.temperature_unit = self.quan(temperature_factor, "K")
         # Still need to deal with:
         #self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
 
     def set_code_units(self):
         super(FLASHDataset, self).set_code_units()
-        from yt.units.dimensions import dimensionless
         self.unit_registry.modify("code_temperature",
-            self.temperature_unit.value)
+                                  self.temperature_unit.value)
 
     def _find_parameter(self, ptype, pname, scalar = False):
         nn = "/%s %s" % (ptype,

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -43,8 +43,6 @@
 erg_units = "code_mass * (code_length/code_time)**2"
 rho_units = "code_mass / code_length**3"
 
-Na_code = Na.in_units("1/code_mass")
-
 class FLASHFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("velx", ("code_length/code_time", ["velocity_x"], None)),
@@ -145,10 +143,12 @@
                            units="erg/g")
         ## Derived FLASH Fields
         def _nele(field, data):
+            Na_code = data.pf.quan(Na, '1/code_mass')
             return data["flash","dens"]*data["flash","ye"]*Na_code
         self.add_field(('flash','nele'), function=_nele, units="code_length**-3")
         self.add_field(('flash','edens'), function=_nele, units="code_length**-3")
         def _nion(field, data):
+            Na_code = data.pf.quan(Na, '1/code_mass')
             return data["flash","dens"]*data["flash","sumy"]*Na_code
         self.add_field(('flash','nion'), function=_nion, units="code_length**-3")
         def _abar(field, data):

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/halo_catalogs/setup.py
--- a/yt/frontends/halo_catalogs/setup.py
+++ b/yt/frontends/halo_catalogs/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-import glob
+from numpy.distutils.misc_util import Configuration
+
 
 def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_catalogs', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
+    config.add_subpackage("halo_catalog")
+    config.add_subpackage("rockstar")
+    config.make_config_py()
     return config

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -24,7 +24,7 @@
     FieldValuesTest
 from yt.frontends.moab.api import MoabHex8Dataset
 
-_fields = (("gas", "flux"),
+_fields = (("moab", "flux"),
           )
 
 c5 = "c5/c5.h5m"
@@ -39,12 +39,14 @@
     yield assert_almost_equal, pf.index.get_smallest_dx(), 0.00411522633744843, 10
     yield assert_equal, dd["x"].shape[0], 63*63*63
     yield assert_almost_equal, \
-        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64"), \
+        dd["cell_volume"].in_units("code_length**3").sum(dtype="float64").d, \
         1.0, 10
     for offset_1 in [1e-9, 1e-4, 0.1]:
         for offset_2 in [1e-9, 1e-4, 0.1]:
-            ray = pf.ray(pf.domain_left_edge + offset_1,
-                           pf.domain_right_edge - offset_2)
+            DLE = pf.domain_left_edge
+            DRE = pf.domain_right_edge
+            ray = pf.ray(DLE + offset_1 * DLE.uq,
+                         DRE - offset_2 * DRE.uq)
             yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
     for i, p1 in enumerate(np.random.random((5, 3))):
         for j, p2 in enumerate(np.random.random((5, 3))):

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -29,7 +29,6 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 
-from yt.units.yt_array import YTQuantity
 from .definitions import ramses_header, field_aliases
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
@@ -351,7 +350,7 @@
         for domain in self.domains:
             pfl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(pfl)
-        self.field_list = [("gas", f) for f in self.fluid_field_list] \
+        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
                         + self.particle_field_list
 
     def _setup_auto_fields(self):
@@ -373,8 +372,16 @@
         if hydro_fn:
             # Read the number of hydro  variables
             f = open(hydro_fn, "rb")
-            fpu.skip(f, 1)
-            nvar = fpu.read_vector(f, "i")[0]
+            hydro_header = ( ('ncpu', 1, 'i'),
+                             ('nvar', 1, 'i'),
+                             ('ndim', 1, 'i'),
+                             ('nlevelmax', 1, 'i'),
+                             ('nboundary', 1, 'i'),
+                             ('gamma', 1, 'd')
+                            )
+            hvals = fpu.read_attrs(f, hydro_header)
+            self.pf.gamma = hvals['gamma']
+            nvar = hvals['nvar']
         # OK, we got NVAR, now set up the arrays depending on what NVAR is
         # Allow some wiggle room for users to add too many variables
         if nvar < 5:
@@ -440,6 +447,7 @@
 class RAMSESDataset(Dataset):
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo
+    gamma = 1.4 # This will get replaced on hydro_fn open
     
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None):
@@ -450,6 +458,7 @@
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
         '''
+        self.fluid_types += ("ramses",)
         self._fields_in_file = fields
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
@@ -472,10 +481,10 @@
 
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
-        self.magnetic_unit = YTQuantity(magnetic_unit, "gauss")
-        self.length_unit = YTQuantity(length_unit, "cm")
-        self.mass_unit = YTQuantity(mass_unit, "g")
-        self.time_unit = YTQuantity(time_unit, "s")
+        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.length_unit = self.quan(length_unit, "cm")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.time_unit = self.quan(time_unit, "s")
         self.velocity_unit = self.length_unit / self.time_unit
 
     def _parse_parameter_file(self):

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -29,16 +29,6 @@
               )
     yield header
     # TODO: REMOVE
-    '''
-    hydro_header = ( ('ncpu', 1, 'i'),
-                     ('nvar', 1, 'i'),
-                     ('ndim', 1, 'i'),
-                     ('nlevelmax', 1, 'i'),
-                     ('nboundary', 1, 'i'),
-                     ('gamma', 1, 'd')
-                    )
-    yield hydro_header
-    '''
     noutput, iout, ifout = hvals['nout']
     next_set = ( ('tout', noutput, 'd'),
                  ('aout', noutput, 'd'),

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -70,7 +70,7 @@
         ("x-velocity", (vel_units, ["velocity_x"], None)),
         ("y-velocity", (vel_units, ["velocity_y"], None)),
         ("z-velocity", (vel_units, ["velocity_z"], None)),
-        ("Pressure", ("code_mass / (code_length * code_time**2)", [], None)),
+        ("Pressure", ("code_mass / (code_length * code_time**2)", ["pressure"], None)),
         ("Metallicity", ("", ["metallicity"], None)),
     )
     known_particle_fields = (
@@ -105,8 +105,8 @@
         def _create_field(name, interp_object):
             def _func(field, data):
                 shape = data["Temperature"].shape
-                d = {'lognH': np.log10(_X*data["Density"]/mh).ravel(),
-                     'logT' : np.log10(data["Temperature"]).ravel()}
+                d = {'lognH': np.log10(_X*data["density"]/mh).ravel(),
+                     'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
                 return rv
             self.add_field(name = name, function=_func,

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -95,6 +95,7 @@
                  header_spec = "default",
                  field_spec = "default",
                  ptype_spec = "default"):
+        if self._instantiated: return
         self._header_spec = self._setup_binary_spec(
             header_spec, gadget_header_specs)
         self._field_spec = self._setup_binary_spec(
@@ -388,7 +389,7 @@
         self.over_refine_factor = over_refine_factor
         if field_dtypes is None:
             field_dtypes = {}
-        success, self.endian = self._validate_header(filename, field_dtypes)
+        success, self.endian = self._validate_header(filename)
         if not success:
             print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
             print "%s != (%s == %s + %s + %s)" % (
@@ -522,7 +523,7 @@
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
     @staticmethod
-    def _validate_header(filename, field_dtypes):
+    def _validate_header(filename):
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
@@ -534,10 +535,11 @@
         except:
             return False, 1
         try:
-            fs = len(f.read())
+            f.seek(0, os.SEEK_END)
+            fs = f.tell()
+            f.seek(0, os.SEEK_SET)
         except IOError:
             return False, 1
-        f.seek(0)
         #Read in the header
         t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))
         endianswap = "<"
@@ -546,16 +548,13 @@
             endianswap = ">"
             f.seek(0)
             t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
-        # Now we construct the sizes of each of the particles.
-        dtypes = IOHandlerTipsyBinary._compute_dtypes(field_dtypes, endianswap)
-        #Catch for 4 byte padding
-        gas_size = dtypes["Gas"].itemsize
-        dm_size = dtypes["DarkMatter"].itemsize
-        star_size = dtypes["Stars"].itemsize
-        if (fs == 32+gas_size*ng+dm_size*nd+star_size*ns):
-            f.read(4)
-        #File is borked if this is true
-        elif (fs != 28+gas_size*ng+dm_size*nd+star_size*ns):
+        # File is borked if this is true.  The header is 28 bytes, and may
+        # Be followed by a 4 byte pad.  Next comes gas particles, which use
+        # 48 bytes, followed by 36 bytes per dark matter particle, and 44 bytes
+        # per star particle.  If positions are stored as doubles, each of these
+        # sizes is increased by 12 bytes.
+        if (fs != 28+48*ng+36*nd+44*ns and fs != 28+60*ng+48*nd+56*ns and
+                fs != 32+48*ng+36*nd+44*ns and fs != 32+60*ng+48*nd+56*ns):
             f.close()
             return False, 0
         f.close()
@@ -563,8 +562,7 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        field_dtypes = kwargs.get("field_dtypes", {})
-        return TipsyDataset._validate_header(args[0], field_dtypes)[0]
+        return TipsyDataset._validate_header(args[0])[0]
 
 class HTTPParticleFile(ParticleFile):
     pass

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -563,10 +563,12 @@
                     for ax in 'xyz':
                         mi = pp["Coordinates"][ax].min()
                         ma = pp["Coordinates"][ax].max()
-                        outlier = YTArray(np.max(np.abs((mi,ma))), 'code_length')
+                        outlier = self.arr(np.max(np.abs((mi,ma))), 'code_length')
                         if outlier > pf.domain_right_edge or -outlier < pf.domain_left_edge:
-                            pf.domain_left_edge = -1.01*outlier # scale these up so the domain is slightly
-                            pf.domain_right_edge = 1.01*outlier # larger than the most distant particle position
+                            # scale these up so the domain is slightly
+                            # larger than the most distant particle position
+                            pf.domain_left_edge = -1.01*outlier
+                            pf.domain_right_edge = 1.01*outlier
                     ind += c
         pf.domain_left_edge = np.ones(3)*pf.domain_left_edge
         pf.domain_right_edge = np.ones(3)*pf.domain_right_edge

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -9,6 +9,8 @@
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
 from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
+from yt.units.unit_registry import UnitRegistry
+import yt.units.dimensions as dimensions
 import yt.data_objects.api
 import time, os
 
@@ -127,22 +129,26 @@
     domain_left_edge = None
     domain_right_edge = None
     domain_width = None
+    unit_registry = UnitRegistry()
+    unit_registry.add('code_length', 1.0, dimensions.length)
     periodicity = (False, False, False)
 
 class FakeRegion:
     def __init__(self, nfiles):
         self.pf = FakePF()
-        self.pf.domain_left_edge = YTArray([0.0, 0.0, 0.0], "code_length")
-        self.pf.domain_right_edge = YTArray([nfiles, nfiles, nfiles], "code_length")
+        self.pf.domain_left_edge = YTArray([0.0, 0.0, 0.0], "code_length",
+                                           registry=self.pf.unit_registry)
+        self.pf.domain_right_edge = YTArray([nfiles, nfiles, nfiles], "code_length",
+                                            registry=self.pf.unit_registry)
         self.pf.domain_width = self.pf.domain_right_edge - \
                                self.pf.domain_left_edge
         self.nfiles = nfiles
 
     def set_edges(self, file_id):
         self.left_edge = YTArray([file_id + 0.1, 0.0, 0.0],
-                                  "code_length")
+                                 'code_length', registry=self.pf.unit_registry)
         self.right_edge = YTArray([file_id+1 - 0.1, self.nfiles, self.nfiles],
-                                  "code_length")
+                                  'code_length', registry=self.pf.unit_registry)
 
 def test_particle_regions():
     np.random.seed(int(0x4d3d3d3))

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -21,6 +21,7 @@
     assert_allclose, assert_raises
 from nose.tools import assert_true
 from sympy import Symbol
+from yt.testing import fake_random_pf
 
 # dimensions
 from yt.units.dimensions import \
@@ -420,11 +421,12 @@
         Msun_cgs / Mpc_cgs**3, 1e-12
 
 def test_is_code_unit():
-    u1 = Unit('code_mass')
-    u2 = Unit('code_mass/code_length')
-    u3 = Unit('code_velocity*code_mass**2')
-    u4 = Unit('code_time*code_mass**0.5')
-    u5 = Unit('code_mass*g')
+    pf = fake_random_pf(64, nprocs=1)
+    u1 = Unit('code_mass', registry=pf.unit_registry)
+    u2 = Unit('code_mass/code_length', registry=pf.unit_registry)
+    u3 = Unit('code_velocity*code_mass**2', registry=pf.unit_registry)
+    u4 = Unit('code_time*code_mass**0.5', registry=pf.unit_registry)
+    u5 = Unit('code_mass*g', registry=pf.unit_registry)
     u6 = Unit('g/cm**3')
 
     yield assert_true, u1.is_code_unit

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -325,23 +325,44 @@
 
     yield assert_equal, km_in_cm, km
     yield assert_equal, km_in_cm.in_cgs(), 1e5
+    yield assert_equal, km_in_cm.in_mks(), 1e3
     yield assert_equal, km_in_cm.units, cm_unit
 
     km.convert_to_units('cm')
 
     yield assert_equal, km, YTQuantity(1, 'km')
     yield assert_equal, km.in_cgs(), 1e5
+    yield assert_equal, km.in_mks(), 1e3
     yield assert_equal, km.units, cm_unit
 
     km.convert_to_units('kpc')
 
     yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km')
     yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm')
+    yield assert_array_almost_equal_nulp, km.in_mks(), YTQuantity(1e3, 'm')
     yield assert_equal, km.units, kpc_unit
 
     yield assert_isinstance, km.to_ndarray(), np.ndarray
     yield assert_isinstance, km.ndarray_view(), np.ndarray
 
+    dyne = YTQuantity(1.0, 'dyne')
+
+    yield assert_equal, dyne.in_cgs(), dyne
+    yield assert_equal, dyne.in_cgs(), 1.0
+    yield assert_equal, dyne.in_mks(), dyne
+    yield assert_equal, dyne.in_mks(), 1e-5
+    yield assert_equal, str(dyne.in_mks().units), 'kg*m/s**2'
+    yield assert_equal, str(dyne.in_cgs().units), 'cm*g/s**2'
+
+    em3 = YTQuantity(1.0, 'erg/m**3')
+
+    yield assert_equal, em3.in_cgs(), em3
+    yield assert_equal, em3.in_cgs(), 1e-6
+    yield assert_equal, em3.in_mks(), em3
+    yield assert_equal, em3.in_mks(), 1e-7
+    yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
+    yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
+
 
 def test_yt_array_yt_quantity_ops():
     """

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -32,17 +32,6 @@
     "K":  (1.0, dimensions.temperature),
     "radian": (1.0, dimensions.angle),
 
-    # "code" units, default to CGS conversion.
-    # These default values are overridden in the code frontends
-    "code_length" : (1.0, dimensions.length),
-    "unitary"   : (1.0, dimensions.length),
-    "code_mass" : (1.0, dimensions.mass),
-    "code_time" : (1.0, dimensions.time),
-    "code_velocity" : (1.0, dimensions.velocity),
-    "code_magnetic" : (1.0, dimensions.magnetic_field),
-    "code_temperature" : (1.0, dimensions.temperature),
-    "code_metallicity" : (1.0, dimensions.dimensionless),
-
     # other cgs
     "dyne": (1.0, dimensions.force),
     "erg":  (1.0, dimensions.energy),
@@ -172,3 +161,11 @@
     dimensions.temperature:'K',
     dimensions.angle:'radian',
 }
+
+mks_base_units = {
+    dimensions.mass:'kg',
+    dimensions.length:'m',
+    dimensions.time:'s',
+    dimensions.temperature:'K',
+    dimensions.angle:'radian',
+}

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -24,7 +24,8 @@
 from yt.units import dimensions as dimensions_mod
 from yt.units.unit_lookup_table import \
     latex_symbol_lut, unit_prefixes, \
-    prefixable_units, cgs_base_units
+    prefixable_units, cgs_base_units, \
+    mks_base_units
 from yt.units.unit_registry import UnitRegistry
 
 import copy
@@ -136,7 +137,7 @@
         """
         # Simplest case. If user passes a Unit object, just use the expr.
         unit_key = None
-        if isinstance(unit_expr, str):
+        if isinstance(unit_expr, basestring):
             if registry and unit_expr in registry.unit_objs:
                 return registry.unit_objs[unit_expr]
             else:
@@ -317,23 +318,36 @@
                 return False
         return True
 
-    # @todo: might be a simpler/smarter sympy way to do this...
+    def _get_system_unit_string(self, base_units):
+        # The dimensions of a unit object is the product of the base dimensions.
+        # Use sympy to factor the dimensions into base CGS unit symbols.
+        units = []
+        my_dims = self.dimensions.expand()
+        for dim in base_units:
+            unit_string = base_units[dim]
+            power_string = "**(%s)" % my_dims.as_coeff_exponent(dim)[1]
+            units.append("".join([unit_string, power_string]))
+        return " * ".join(units)
+
+
     def get_cgs_equivalent(self):
         """
         Create and return dimensionally-equivalent cgs units.
 
         """
-        # The dimensions of a unit object is the product of the base dimensions.
-        # Use sympy to factor the dimensions into base CGS unit symbols.
-        cgs_units = []
-        my_dims = self.dimensions.expand()
-        for dim in cgs_base_units:
-            unit_string = cgs_base_units[dim]
-            power_string = "**(%s)" % my_dims.as_coeff_exponent(dim)[1]
-            cgs_units.append("".join([unit_string, power_string]))
-        cgs_units_string = " * ".join(cgs_units)
+        units_string = self._get_system_unit_string(cgs_base_units)
+        return Unit(units_string, cgs_value=1.0,
+                    dimensions=self.dimensions, registry=self.registry)
 
-        return Unit(cgs_units_string, cgs_value=1.0,
+    def get_mks_equivalent(self):
+        """
+        Create and return dimensionally-equivalent mks units.
+
+        """
+        units_string = self._get_system_unit_string(mks_base_units)
+        cgs_value = (get_conversion_factor(self, self.get_cgs_equivalent()) /
+                     get_conversion_factor(self, units_string))
+        return Unit(units_string, cgs_value=cgs_value,
                     dimensions=self.dimensions, registry=self.registry)
 
     def get_conversion_factor(self, other_units):

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 import copy
-
 import numpy as np
 
 from functools import wraps
@@ -29,7 +28,7 @@
     isfinite, isinf, isnan, signbit, copysign, nextafter, modf, frexp, \
     floor, ceil, trunc, fmax, fmin
 
-from yt.units.unit_object import Unit
+from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \
@@ -241,9 +240,19 @@
     def __new__(cls, input_array, input_units=None, registry=None, dtype=np.float64):
         if input_array is NotImplemented:
             return input_array
+        if registry is None and isinstance(input_units, basestring):
+            if input_units.startswith('code_'):
+                raise UnitParseError(
+                    "Code units used without referring to a dataset. \n"
+                    "Perhaps you meant to do something like this instead: \n"
+                    "ds.arr(%s, \"%s\")" % (input_array, input_units)
+                    )
         if isinstance(input_array, YTArray):
             if input_units is None:
-                pass
+                if registry is None:
+                    pass
+                else:
+                    input_array.units.registry = registry
             elif isinstance(input_units, Unit):
                 input_array.units = input_units
             else:
@@ -342,6 +351,13 @@
         """
         return self.convert_to_units(self.units.get_cgs_equivalent())
 
+    def convert_to_mks(self):
+        """
+        Convert the array and units to the equivalent mks units.
+
+        """
+        return self.convert_to_units(self.units.get_mks_equivalent())
+
     def in_units(self, units):
         """
         Creates a copy of this array with the data in the supplied units, and
@@ -372,11 +388,23 @@
 
         Returns
         -------
-        Quantity object with data converted to cgs and cgs units.
+        Quantity object with data converted to cgs units.
 
         """
         return self.in_units(self.units.get_cgs_equivalent())
 
+    def in_mks(self):
+        """
+        Creates a copy of this array with the data in the equivalent mks units,
+        and returns it.
+
+        Returns
+        -------
+        Quantity object with data converted to mks units.
+
+        """
+        return self.in_units(self.units.get_mks_equivalent())
+
     def ndarray_view(self):
         """
         Returns a view into the array, but as an ndarray rather than ytarray.

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -94,6 +94,7 @@
 jansky_cgs = 1.0e-23
 # Cosmological constants
 rho_crit_g_cm3_h2 = 1.8788e-29
+primordial_H_mass_fraction = 0.76
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/utilities/tests/test_selectors.py
--- a/yt/utilities/tests/test_selectors.py
+++ b/yt/utilities/tests/test_selectors.py
@@ -24,7 +24,7 @@
         data = pf.sphere(center, 0.25)
         # WARNING: this value has not be externally verified
         dd = pf.h.all_data()
-        dd.set_field_parameter("center", YTArray(center, 'code_length'))
+        dd.set_field_parameter("center", pf.arr(center, 'code_length'))
         n_outside = (dd["radius"] >= 0.25).sum()
         assert_equal(data["radius"].size + n_outside, dd["radius"].size)
 
@@ -55,7 +55,7 @@
         data.get_data()
 
         dd = pf.h.all_data()
-        dd.set_field_parameter("center", YTArray(center, "code_length"))
+        dd.set_field_parameter("center", pf.arr(center, "code_length"))
         n_outside = (dd["radius"] >= ratios[0]).sum()
         assert_equal(data["radius"].size + n_outside, dd["radius"].size)
 

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -17,6 +17,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.exceptions import YTNotInsideNotebook
 import _colormap_data as cmd
 import yt.utilities.lib.image_utilities as au
 import yt.utilities.png_writer as pw

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -91,7 +91,7 @@
         return (pf.quan(width[0], 'code_length'),
                 pf.quan(width[1], 'code_length'))
     elif isinstance(width[0], YTQuantity) and isinstance(width[1], YTQuantity):
-        return width
+        return (pf.quan(width[0]), pf.quan(width[1]))
     else:
         assert_valid_width_tuple(width)
         # If width and unit are both valid width tuples, we
@@ -150,7 +150,7 @@
         else:
             raise RuntimeError('center keyword \"%s\" not recognized' % center)
     elif isinstance(center, YTArray):
-        pass
+        return pf.arr(center)
     elif iterable(center):
         if iterable(center[0]) and isinstance(center[1], basestring):
             center = pf.arr(center[0], center[1])
@@ -793,7 +793,8 @@
             colorbar_label = image.info['label']
 
             # Determine the units of the data
-            units = Unit(self._frb[f].units).latex_representation()
+            units = Unit(self._frb[f].units, registry=self.pf.unit_registry)
+            units = units.latex_representation()
 
             if units is None or units == '':
                 pass

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 
+import __builtin__
 import base64
 import types
 
@@ -30,13 +31,15 @@
     ImagePlotContainer, \
     log_transform, linear_transform
 from yt.data_objects.profiles import \
-    create_profile
+     create_profile
+from yt.utilities.exceptions import \
+     YTNotInsideNotebook
 from yt.utilities.logger import ytLogger as mylog
 import _mpl_imports as mpl
 from yt.funcs import \
-    ensure_list, \
-    get_image_suffix, \
-    get_ipython_api_version
+     ensure_list, \
+     get_image_suffix, \
+     get_ipython_api_version
 
 def get_canvas(name):
     suffix = get_image_suffix(name)

diff -r 67402286e23f65493338974440376c04fe2f5351 -r f7e49d290fdf8399da06801159dc8a6578040ef7 yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -74,7 +74,7 @@
         p = SlicePlot(pf, "x", "density")
         p.annotate_velocity(factor=8, scale=0.5, scale_units="inches",
                             normalize = True)
-        p.save()
+        p.save(prefix)
 
 def test_magnetic_callback():
     with _cleanup_fname() as prefix:
@@ -91,7 +91,7 @@
         p = SlicePlot(pf, "x", "density")
         p.annotate_magnetic_field(factor=8, scale=0.5,
             scale_units="inches", normalize = True)
-        p.save()
+        p.save(prefix)
 
 def test_quiver_callback():
     with _cleanup_fname() as prefix:
@@ -113,7 +113,7 @@
             scale_units="inches", normalize = True,
             bv_x = 0.5 * u.cm / u.s,
             bv_y = 0.5 * u.cm / u.s)
-        p.save()
+        p.save(prefix)
 
 def test_contour_callback():
     with _cleanup_fname() as prefix:
@@ -134,7 +134,7 @@
             take_log=False, clim=(0.4, 0.6),
             plot_args={'lw':2.0}, label=True,
             label_args={'text-size':'x-large'})
-        p.save()
+        p.save(prefix)
 
         p = SlicePlot(pf, "x", "density")
         s2 = pf.slice(0, 0.2)
@@ -143,7 +143,7 @@
             plot_args={'lw':2.0}, label=True,
             label_args={'text-size':'x-large'},
             data_source=s2)
-        p.save()
+        p.save(prefix)
 
 def test_grids_callback():
     with _cleanup_fname() as prefix:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b3e3a273a30c/
Changeset:   b3e3a273a30c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-15 07:15:55
Summary:     Added a check to make sure that the fields in the files all have the same dimensions. Removed some cruft that is no longer necessary.
Affected #:  1 file

diff -r f7e49d290fdf8399da06801159dc8a6578040ef7 -r b3e3a273a30cc11d6ebed59968c4f03074fb6ed5 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -127,7 +127,19 @@
         except:
             self.parameter_file.field_units[fname] = "dimensionless"
 
+    def _ensure_same_dims(self, hdu):
+        ds = self.parameter_file
+        conditions = [hdu.header["naxis"] != ds.primary_header["naxis"]]
+        for i in xrange(ds.naxis):
+            nax = "naxis%d" % (i+1)
+            conditions.append(hdu.header[nax] != ds.primary_header[nax])
+        if np.any(conditions):
+            return False
+        else:
+            return True
+
     def _detect_output_fields(self):
+        ds = self.parameter_file
         self.field_list = []
         self._axis_map = {}
         self._file_map = {}
@@ -139,33 +151,38 @@
         else:
             naxis4 = 1
         for i, fits_file in enumerate(self.parameter_file._fits_files):
-            for j, h in enumerate(fits_file):
-                if self.parameter_file.naxis >= 2:
+            for j, hdu in enumerate(fits_file):
+                if self._ensure_same_dims(hdu):
                     try:
-                        fname = h.header["btype"].lower()
+                        fname = hdu.header["btype"].lower()
                     except:
-                        fname = h.name.lower()
+                        fname = hdu.name.lower()
                     for k in xrange(naxis4):
                         if naxis4 > 1:
-                            fname += "_%s_%d" % (h.header["CTYPE4"], k+1)
+                            fname += "_%s_%d" % (hdu.header["CTYPE4"], k+1)
                         if self.pf.num_files > 1:
                             try:
-                                fname += "_%5.3fGHz" % (h.header["restfreq"]/1.0e9)
+                                fname += "_%5.3fGHz" % (hdu.header["restfreq"]/1.0e9)
                             except:
-                                fname += "_%5.3fGHz" % (h.header["restfrq"]/1.0e9)
+                                fname += "_%5.3fGHz" % (hdu.header["restfrq"]/1.0e9)
                             else:
                                 fname += "_field_%d" % (i)
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j
                         self._scale_map[fname] = [0.0,1.0]
-                        if "bzero" in h.header:
-                            self._scale_map[fname][0] = h.header["bzero"]
-                        if "bscale" in h.header:
-                            self._scale_map[fname][1] = h.header["bscale"]
+                        if "bzero" in hdu.header:
+                            self._scale_map[fname][0] = hdu.header["bzero"]
+                        if "bscale" in hdu.header:
+                            self._scale_map[fname][1] = hdu.header["bscale"]
                         self.field_list.append((self.dataset_type, fname))
                         mylog.info("Adding field %s to the list of fields." % (fname))
-                        self._determine_image_units(fname, h.header)
+                        self._determine_image_units(fname, hdu.header)
+                else:
+                    mylog.warning("Image block %s does not have " % (hdu.name.lower()) +
+                                  "the same dimensions as the primary and will not be " +
+                                  "available as a field.")
+
 
         # For line fields, we still read the primary field. Not sure how to extend this
         # For now, we pick off the first field from the field list.
@@ -255,8 +272,7 @@
                  folded_axis=None,
                  folded_width=None,
                  line_database=None,
-                 suppress_astropy_warnings = True
-                 ):
+                 suppress_astropy_warnings = True):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
         self._unfolded_domain_dimensions = None
@@ -280,27 +296,16 @@
             for fits_file in slave_files:
                 self._fits_files.append(ap.pyfits.open(fits_file,
                                                        memmap=True,
-                                                       do_not_scale_image_data=True))
+                                                       do_not_scale_image_data=True,
+                                                       ignore_blank=True))
+
         self.first_image = 0 # Assumed for now
         self.primary_header = self._handle[self.first_image].header
-        self.shape = self._handle[self.first_image].shape
         self.wcs = ap.pywcs.WCS(header=self.primary_header)
         self.axis_names = {}
         self.naxis = self.primary_header["naxis"]
         for i, ax in enumerate("xyz"[:self.naxis]):
-            self.axis_names[self.primary_header["CTYPE%d" % (i+1)]] = ax
-        self.file_unit = None
-        for i, unit in enumerate(self.wcs.wcs.cunit):
-            if unit in mpc_conversion.keys():
-                self.file_unit = unit.name
-                idx = i
-                break
-        self.new_unit = None
-        self.pixel_scale = 1.0
-        if self.file_unit in mpc_conversion:
-            self.new_unit = self.file_unit
-            self.pixel_scale = self.wcs.wcs.cdelt[idx]
-
+            self.axis_names[self.primary_header["ctype%d" % (i+1)]] = ax
         self.refine_by = 2
 
         Dataset.__init__(self, filename, dataset_type)
@@ -310,14 +315,20 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        if self.new_unit is not None:
-            length_factor = self.pixel_scale
-            length_unit = str(self.new_unit)
-        else:
+        file_unit = None
+        for i, unit in enumerate(self.wcs.wcs.cunit):
+            if unit in mpc_conversion.keys():
+                file_unit = unit.name
+                idx = i
+                break
+        if file_unit is None:
             self.no_cgs_equiv_length = True
             mylog.warning("No length conversion provided. Assuming 1 = 1 cm.")
             length_factor = 1.0
             length_unit = "cm"
+        else:
+            length_factor = self.wcs.wcs.cdelt[idx]
+            length_unit = str(file_unit)
         self.length_unit = self.quan(length_factor,length_unit)
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
@@ -326,20 +337,18 @@
     def _parse_parameter_file(self):
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        for k, v in self.primary_header.items():
-            self.parameters[k] = v
 
         # Determine dimensionality
 
-        self.dimensionality = self.primary_header["naxis"]
+        self.dimensionality = self.naxis
         self.geometry = "cartesian"
 
         # Sometimes a FITS file has a 4D datacube, in which case
         # we take the 4th axis and assume it consists of different fields.
         if self.dimensionality == 4: self.dimensionality = 3
 
-        dims = self._handle[self.first_image].shape[::-1][:self.dimensionality]
-        self.domain_dimensions = np.array(dims)
+        dims = [self.primary_header["naxis%d" % (i+1)] for i in xrange(self.naxis)]
+        self.domain_dimensions = np.array(dims)[:self.dimensionality]
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])


https://bitbucket.org/yt_analysis/yt/commits/22b9340109c7/
Changeset:   22b9340109c7
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-15 18:50:21
Summary:     Bugfix
Affected #:  1 file

diff -r b3e3a273a30cc11d6ebed59968c4f03074fb6ed5 -r 22b9340109c768c6650d52133da0c906d6c85c43 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -387,7 +387,7 @@
         self.magnetic_unit.convert_to_units("gauss")
 
     def set_code_units(self):
-        super(self, AthenaDataset).set_code_units()
+        super(AthenaDataset, self).set_code_units()
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
 
     def _parse_parameter_file(self):


https://bitbucket.org/yt_analysis/yt/commits/90367454b20e/
Changeset:   90367454b20e
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-15 23:45:25
Summary:     Only mark this for no specified length units
Affected #:  1 file

diff -r 22b9340109c768c6650d52133da0c906d6c85c43 -r 90367454b20e2e5c426192c7edd86ae919504b14 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -374,7 +374,7 @@
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
             val = self.specified_parameters.get("%s_unit" % unit, None)
             if val is None:
-                self.no_cgs_equiv_length = True
+                if unit == "length": self.no_cgs_equiv_length = True
                 mylog.warning("No %s conversion to cgs provided.  " +
                               "Assuming 1.0 = 1.0 %s", unit, cgs)
                 val = 1.0


https://bitbucket.org/yt_analysis/yt/commits/24e77256e562/
Changeset:   24e77256e562
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-16 11:38:39
Summary:     Bugfix
Affected #:  1 file

diff -r 90367454b20e2e5c426192c7edd86ae919504b14 -r 24e77256e5629419ccfe9307423a0a71fa2c8728 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -50,7 +50,7 @@
                 fname = self.pf.field_list[0][1]
             f = self.pf.index._file_map[fname]
             ds = f[self.pf.index._ext_map[fname]]
-            bzero, bscale = self._scale_map[fname]
+            bzero, bscale = self.pf.index._scale_map[fname]
             fname = tmp_fname
             ind = 0
             for chunk in chunks:


https://bitbucket.org/yt_analysis/yt/commits/cd946b5b14aa/
Changeset:   cd946b5b14aa
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-16 11:40:48
Summary:     Merge
Affected #:  13 files

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
 computation engine, Matplotlib for some visualization tasks and Mercurial for
 version control.  Because installation of all of these interlocking parts can 
 be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.  
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
 other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
 supported.
@@ -86,16 +86,41 @@
 Alternative Installation Methods
 --------------------------------
 
-If you want to forego the use of the install script, you need to make sure 
-you have yt's dependencies installed on your system.  These include: a C compiler, 
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and 
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system.  These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
 
 .. code-block:: bash
 
   $ pip install yt
 
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+  $ echo '/usr/local' > hdf5.cfg
+  $ export FTYPE_DIR=/opt/freetype
+
 If you choose this installation method, you do not need to run the activation
 script as it is unnecessary.
 

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -709,8 +709,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,12 +63,6 @@
     HaloProfiler, \
     FakeProfile
 
-from .hierarchy_subset.api import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile
-
 from .level_sets.api import \
     identify_contours, \
     Clump, \

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-API for hierarchy_subset
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .hierarchy_subset import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ /dev/null
@@ -1,339 +0,0 @@
-"""
-A means of extracting a subset of the index
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py, os.path
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.data_containers import YTFieldData
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
-from yt.data_objects.static_output import \
-    Dataset
-from yt.geometry.grid_geometry_handler import \
-    GridIndex
-
-class DummyHierarchy(object):
-    pass
-
-class ConstructedRootGrid(AMRGridPatch):
-    __slots__ = ['base_grid', 'id', 'base_pf']
-    _id_offset = 1
-    def __init__(self, base_pf, pf, index, level, left_edge, right_edge):
-        """
-        This is a fake root grid, constructed by creating a
-        :class:`yt.data_objects.api.CoveringGridBase` at a given *level* between
-        *left_edge* and *right_edge*.
-        """
-        self.pf = pf
-        self.base_pf = base_pf
-        self.field_parameters = {}
-        self.NumberOfParticles = 0
-        self.id = 1
-        self.index = index
-        self._child_mask = self._child_indices = self._child_index_mask = None
-        self.Level = level
-        self.LeftEdge = left_edge
-        self.RightEdge = right_edge
-        self.start_index = np.min([grid.get_global_startindex() for grid in
-                             base_pf.h.select_grids(level)], axis=0).astype('int64')
-        self.dds = base_pf.h.select_grids(level)[0].dds.copy()
-        dims = (self.RightEdge-self.LeftEdge)/self.dds
-        self.ActiveDimensions = dims
-        print "Constructing base grid of size %s" % (self.ActiveDimensions)
-        self.base_grid = base_pf.smoothed_covering_grid(level, self.LeftEdge,
-                        self.RightEdge, dims=dims)
-        self.base_grid.Level = self.base_grid.level
-        self.field_data = YTFieldData()
-        #self._calculate_child_masks()
-        self.Parent = None
-        self.Children = []
-
-    def get_vertex_centered_data(self, field, smoothed=True):
-        vc = self.base_pf.smoothed_covering_grid(self.base_grid.Level,
-                self.base_grid.LeftEdge - self.base_grid.dds*0.5,
-                self.base_grid.RightEdge + self.base_grid.dds*0.5,
-                dims = self.ActiveDimensions + 1)
-        return vc[field]
-
-class AMRExtractedGridProxy(AMRGridPatch):
-    __slots__ = ['base_grid']
-    _id_offset = 1
-    def __init__(self, grid_id, base_grid, index):
-        # We make a little birdhouse in our soul for the base_grid
-        # (they're the only bee in our bonnet!)
-        self.base_grid = base_grid
-        AMRGridPatch.__init__(self, grid_id, filename = None, index=index)
-        self.Parent = None
-        self.Children = []
-        self.Level = -1
-
-    def get_vertex_centered_data(self, *args, **kwargs):
-        return self.base_grid.get_vertex_centered_data(*args, **kwargs)
-
-class OldExtractedHierarchy(object):
-
-    def __init__(self, pf, min_level, max_level = -1, offset = None,
-                 always_copy=False):
-        """
-        This is a class that extracts a index from another index,
-        filling in regions as necessary.  It accepts a parameter file (*pf*), a
-        *min_level*, a *max_level*, and alternately an *offset*.  This class is
-        typically or exclusively used to extract for the purposes of visualization.
-        """
-        self.pf = pf
-        self.always_copy = always_copy
-        self.min_level = min_level
-        self.int_offset = np.min([grid.get_global_startindex() for grid in
-                             pf.h.select_grids(min_level)], axis=0).astype('float64')
-        min_left = np.min([grid.LeftEdge for grid in
-                           pf.h.select_grids(min_level)], axis=0).astype('float64')
-        max_right = np.max([grid.RightEdge for grid in 
-                                   pf.h.select_grids(min_level)], axis=0).astype('float64')
-        if offset is None: offset = (max_right + min_left)/2.0
-        self.left_edge_offset = offset
-        self.mult_factor = 2**min_level
-        self.min_left_edge = self._convert_coords(min_left)
-        self.max_right_edge = self._convert_coords(max_right)
-        if max_level == -1: max_level = pf.h.max_level
-        self.max_level = min(max_level, pf.h.max_level)
-        self.final_level = self.max_level - self.min_level
-        if len(self.pf.h.select_grids(self.min_level)) > 0:
-            self._base_grid = ConstructedRootGrid(self.pf, self.min_level,
-                               min_left, max_right)
-        else: self._base_grid = None
-        
-    def select_level(self, level):
-        if level == 0 and self._base_grid is not None:
-            return [self._base_grid]
-        return self.pf.h.select_grids(self.min_level + level)
-
-    def export_output(self, afile, n, field):
-        # I prefer dict access, but tables doesn't.
-        # But h5py does!
-        time_node = afile.create_group("/time-%s" % n)
-        time_node.attrs['time'] = self.pf.current_time
-        time_node.attrs['numLevels'] = self.pf.h.max_level+1-self.min_level
-        # Can take a while, so let's get a progressbar
-        self._export_all_levels(afile, time_node, field)
-
-    def _export_all_levels(self, afile, time_node, field):
-        pbar = yt.funcs.get_pbar("Exporting levels", self.final_level+1)
-        for i,grid_set in enumerate(self.get_levels()):
-            pbar.update(i)
-            self.export_level(afile, time_node, i, field, grid_set)
-        pbar.finish()
-
-    def export_level(self, afile, time_node, level, field, grids = None):
-        level_node = afile.create_group("%s/level-%s" % (time_node,level))
-        # Grid objects on this level...
-        if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
-        level_node.attrs['delta'] = grids[0].dds*self.mult_factor
-        level_node.attrs['relativeRefinementFactor'] = np.array([2]*3, dtype='int32')
-        level_node.attrs['numGrids'] = len(grids)
-        for i,g in enumerate(grids):
-            self.export_grid(afile, level_node, g, i, field)
-
-    def _convert_grid(self, grid):
-        int_origin = (grid.get_global_startindex() \
-                    - self.int_offset*2**(grid.Level-self.min_level)).astype('int64')
-        level_int_origin = (grid.LeftEdge - self.left_edge_offset)/grid.dds
-        origin = self._convert_coords(grid.LeftEdge)
-        dds = grid.dds * self.mult_factor
-        return int_origin, level_int_origin, origin, dds
-
-    def export_grid(self, afile, level_node, grid, i, field):
-        grid_node = afile.create_group("%s/grid-%s" % (level_node,i))
-        int_origin, lint, origin, dds = self._convert_grid(grid)
-        grid_node.attrs['integerOrigin'] = int_origin
-        grid_node.attrs['origin'] = origin
-        grid_node.attrs['ghostzoneFlags'] = np.zeros(6, dtype='int32')
-        grid_node.attrs['numGhostzones'] = np.zeros(3, dtype='int32')
-        grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
-        if not self.always_copy and self.pf.h.dataset_type == 6 \
-           and field in self.pf.field_list:
-            if grid.index.dataset_type == -1: # constructed grid
-                # if we can get conversion in amira we won't need to do this
-                ff = grid[field].astype('float32')
-                ff /= self.pf.conversion_factors.get(field, 1.0)
-                afile.create_dataset("%s/grid-data" % grid_node, data=ff.swapaxes(0,2))
-            else:
-                tfn = os.path.abspath(afile.filename)
-                gfn = os.path.abspath(grid.filename)
-                fpn = os.path.commonprefix([tfn, grid.filename])
-                fn = grid.filename[len(os.path.commonprefix([tfn, grid.filename])):]
-                grid_node.attrs['referenceFileName'] = fn
-                grid_node.attrs['referenceDataPath'] = \
-                    "/Grid%08i/%s" % (grid.id, field)
-        else:
-            # Export our array
-            afile.create_dataset("%s/grid-data" % grid_node, 
-                                 data = grid[field].astype('float32').swapaxes(0,2))
-
-    def _convert_coords(self, val):
-        return (val - self.left_edge_offset)*self.mult_factor
-
-class ExtractedHierarchy(GridIndex):
-
-    grid = AMRExtractedGridProxy
-
-    def __init__(self, pf, dataset_type):
-        # First we set up our translation between original and extracted
-        self.dataset_type = dataset_type
-        self.min_level = pf.min_level
-        self.int_offset = np.min([grid.get_global_startindex() for grid in
-                           pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        min_left = np.min([grid.LeftEdge for grid in
-                           pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        max_right = np.max([grid.RightEdge for grid in 
-                           pf.base_pf.h.select_grids(pf.min_level)], axis=0).astype('float64')
-        level_dx = pf.base_pf.h.select_grids(pf.min_level)[0].dds[0]
-        dims = ((max_right-min_left)/level_dx)
-        max_right += (dims.max() - dims) * level_dx
-        offset = pf.offset
-        if offset is None: offset = min_left
-        self.left_edge_offset = offset
-        pf.offset = offset
-        self.mult_factor = 2**pf.min_level
-        self.min_left_edge = self._convert_coords(min_left)
-        self.max_right_edge = self._convert_coords(max_right)
-        self.min_left, self.max_right = min_left, max_right
-        max_level = pf.max_level
-        if max_level == -1: max_level = pf.base_pf.h.max_level
-        self.max_level = min(max_level, pf.base_pf.h.max_level)
-        self.final_level = self.max_level - self.min_level
-
-        # Now we utilize the existing machinery for generating the appropriate
-        # arrays of grids, etc etc.
-        self.base_pf = pf.base_pf
-        GridIndex.__init__(self, pf, dataset_type)
-
-        # Now a few cleanups
-        self.pf.override["DomainRightEdge"] = self.max_right_edge
-        self.pf.override["DomainLeftEdge"] = self.min_left_edge
-        for u,v in self.base_pf.units.items():
-            self.pf.override[u] = v / self.mult_factor
-        self.pf.override['unitary'] = 1.0 / (self.pf.domain_right_edge -
-                                             self.pf.domain_left_edge).max()
-
-    def _count_grids(self):
-        self.num_grids = 1 + sum( ( # 1 is the base grid
-            len(self.base_pf.h.select_grids(level)) 
-                for level in range(self.min_level+1, self.max_level)) )
-
-    def _parse_index(self):
-        # Here we need to set up the grid info, which for the Enzo index
-        # is done like:
-        # self.grid_dimensions.flat[:] = ei
-        # self.grid_dimensions -= np.array(si, self.float_type)
-        # self.grid_dimensions += 1
-        # self.grid_left_edge.flat[:] = LE
-        # self.grid_right_edge.flat[:] = RE
-        # self.grid_particle_count.flat[:] = np
-        # self.grids = np.array(self.grids, dtype='object')
-        #
-        # For now, we make the presupposition that all of our grids are
-        # strictly nested and we are not doing any cuts.  However, we do
-        # construct a root grid!
-        root_level_grids = self.base_pf.h.select_grids(self.min_level)
-        base_grid = ConstructedRootGrid(self.base_pf, self.pf, self,
-                        self.min_level, self.min_left, self.max_right)
-        self._fill_grid_arrays(base_grid, 0)
-        grids = [base_grid]
-        # We need to ensure we have the correct parentage relationships
-        # However, we want the parent/child to be to the new proxy grids
-        # so we need to map between the old ids and the new ids
-        self.id_map = {}
-        grid_id = 2 # id 0 is the base grid
-        for level in range(self.min_level+1, self.max_level):
-            for grid in self.base_pf.h.select_grids(level):
-                # This next little bit will have to be changed if we ever move to
-                # not-strictly-nested AMR hierarchies
-                parent = self.id_map.get(grid.Parent.id, base_grid)
-                grids.append(self.grid(grid_id, grid, self))
-                parent.Children.append(grids[-1])
-                grids[-1].Parent = parent
-                self.id_map[grid.id] = grids[-1]
-                # Now we fill in our arrays of values -- note that we
-                # are filling in values from the base grids, not the newly
-                # extracted grids.  We will perform bulk changes after we
-                # finish.
-                self._fill_grid_arrays(grid, grid_id-1)
-                grid_id += 1
-
-        self.grid_left_edge = self._convert_coords(self.grid_left_edge)
-        self.grid_right_edge = self._convert_coords(self.grid_right_edge)
-        self.grids = np.array(grids, dtype='object')
-
-    def _fill_grid_arrays(self, grid, i):
-        # This just fills in the grid arrays for a single grid --
-        # note that we presuppose here that we are being handed a grid object
-        # that has these defined; this means we are being handed the *base*
-        # grid, not the newly extracted one
-        self.grid_dimensions[i,:] = grid.ActiveDimensions
-        self.grid_left_edge[i,:] = grid.LeftEdge
-        self.grid_right_edge[i,:] = grid.RightEdge
-        self.grid_particle_count[i] = grid.NumberOfParticles
-
-    def _populate_grid_objects(self):
-        for grid in self.grids:
-            grid.Level = grid.base_grid.Level - self.pf.min_level
-            grid._prepare_grid()
-            grid._setup_dx()
-            grid.start_index = None
-        self.max_level -= self.pf.min_level
-        print "New max level:", self.max_level
-
-    def _convert_coords(self, val):
-        return (val - self.left_edge_offset)*self.mult_factor
-
-    def _detect_output_fields(self):
-        self.field_list = self.base_pf.field_list[:]
-
-    def _setup_unknown_fields(self):
-        pass # Done in the base_h
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = self.base_pf.derived_field_list[:]
-
-    def _initialize_data_storage(self):
-        self._data_file = None
-
-class ExtractedParameterFile(Dataset):
-    _index_class = ExtractedHierarchy
-    dataset_type = "extracted"
-    
-    def __init__(self, base_pf, min_level, max_level = -1, offset = None):
-        self.base_pf = base_pf
-        self.min_level = min_level
-        self.max_level = max_level
-        self.offset = offset
-        self.override = {}
-
-    def __repr__(self):
-        return "extracted_%s" % self.base_pf
-
-    def __getattr__(self, name):
-        # This won't get called if 'name' is found already
-        # and we'd like it to raise AttributeError if it's not anywhere
-        if name in ['h', 'index']:
-            return Dataset._get_index(self)
-        return getattr(self.base_pf, name)
-
-    def __getitem__(self, key):
-        if key not in self.override:
-            return self.base_pf[key]
-        return self.override[key]
-

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('hierarchy_subset', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -13,7 +13,6 @@
     config.add_subpackage("halo_mass_function")
     config.add_subpackage("halo_merger_tree")
     config.add_subpackage("halo_profiler")
-    config.add_subpackage("hierarchy_subset")
     config.add_subpackage("level_sets")
     config.add_subpackage("radial_column_density")
     config.add_subpackage("spectral_integrator")

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,7 +19,6 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.utilities.fits_image import FITSImageBuffer
 from yt.fields.local_fields import add_field, derived_field
 from yt.data_objects.image_array import ImageArray
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
@@ -321,6 +320,7 @@
             deltas *= sky_scale
             deltas[0] *= -1.
 
+        from yt.utilities.fits_image import FITSImageBuffer
         fib = FITSImageBuffer(self.data, fields=self.data.keys(),
                               center=center, units=units,
                               scale=deltas)

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -391,12 +391,14 @@
         Dataset.__init__(self, output_dir, dataset_type)
 
         # These are still used in a few places.
-        self.parameters["HydroMethod"] = 'boxlib'
+        if not "HydroMethod" in self.parameters.keys():
+            self.parameters["HydroMethod"] = 'boxlib'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["EOSType"] = -1 # default
         self.parameters["gamma"] = self.parameters.get(
             "materials.gamma", 1.6667)
 
+
     def _localize_check(self, fn):
         # If the file exists, use it.  If not, set it to None.
         root_dir = os.path.dirname(self.output_dir)
@@ -768,13 +770,21 @@
         line = ""
         with open(jobinfo_filename, "r") as f:
             while not line.startswith(" [*] indicates overridden default"):
+                # get the code git hashes
+                if "git hash" in line:
+                    # line format: codename git hash:  the-hash
+                    fields = line.split(":")
+                    self.parameters[fields[0]] = fields[1].strip()
                 line = f.next()
+            # get the runtime parameters
             for line in f:
                 p, v = (_.strip() for _ in line[4:].split("="))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:
                     self.parameters[p] = _guess_pcast(v)
+            # hydro method is set by the base class -- override it here
+            self.parameters["HydroMethod"] = "Maestro"
 
         # set the periodicity based on the integer BC runtime parameters
         periodicity = [True, True, True]

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -118,10 +118,10 @@
 
     known_other_fields = (
         ("density", ("g/cm**3", ["density"], None)),
-        ("x_vel", ("cm/s", ["velocity_x"], None)),
-        ("y_vel", ("cm/s", ["velocity_y"], None)),
-        ("z_vel", ("cm/s", ["velocity_z"], None)),
-        ("magvel", ("cm/s", ["velocity_magnitude"], None)),
+        ("x_vel", ("cm/s", ["velocity_x"], r"\tilde{u}")),
+        ("y_vel", ("cm/s", ["velocity_y"], r"\tilde{v}")),
+        ("z_vel", ("cm/s", ["velocity_z"], r"\tilde{w}")),
+        ("magvel", ("cm/s", ["velocity_magnitude"], r"|\tilde{U} + w_0 e_r|")),
         ("tfromp", ("K", [], None)),
         ("tfromh", ("K", [], None)),
         ("Machnumber", ("", ["mach_number"], None)),

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -69,6 +69,12 @@
     def __repr__(self):
         return "RAMSESDomainFile: %i" % self.domain_id
 
+    def _is_hydro(self):
+        '''
+        Does the output include hydro?
+        '''
+        return os.path.exists(self.hydro_fn)
+
     @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
@@ -112,6 +118,9 @@
         return self._hydro_offset
 
     def _read_hydro_header(self):
+        # If no hydro file is found, return
+        if not self._is_hydro():
+            return
         if self.nvar > 0: return self.nvar
         # Read the number of hydro  variables
         f = open(self.hydro_fn, "rb")
@@ -359,6 +368,8 @@
         '''
         # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
         # Find nvar
+        
+
         # TODO: copy/pasted from DomainFile; needs refactoring!
         num = os.path.basename(self._pf.parameter_filename).split("."
                 )[0].split("_")[1]
@@ -369,19 +380,21 @@
             num, testdomain)
         hydro_fn = basename % "hydro"
         # Do we have a hydro file?
-        if hydro_fn:
-            # Read the number of hydro  variables
-            f = open(hydro_fn, "rb")
-            hydro_header = ( ('ncpu', 1, 'i'),
-                             ('nvar', 1, 'i'),
-                             ('ndim', 1, 'i'),
-                             ('nlevelmax', 1, 'i'),
-                             ('nboundary', 1, 'i'),
-                             ('gamma', 1, 'd')
-                            )
-            hvals = fpu.read_attrs(f, hydro_header)
-            self.pf.gamma = hvals['gamma']
-            nvar = hvals['nvar']
+        if not os.path.exists(hydro_fn):
+            self.fluid_field_list = []
+            return
+        # Read the number of hydro  variables
+        f = open(hydro_fn, "rb")
+        hydro_header = ( ('ncpu', 1, 'i'),
+                         ('nvar', 1, 'i'),
+                         ('ndim', 1, 'i'),
+                         ('nlevelmax', 1, 'i'),
+                         ('nboundary', 1, 'i'),
+                         ('gamma', 1, 'd')
+                         )
+        hvals = fpu.read_attrs(f, hydro_header)
+        self.pf.gamma = hvals['gamma']
+        nvar = hvals['nvar']
         # OK, we got NVAR, now set up the arrays depending on what NVAR is
         # Allow some wiggle room for users to add too many variables
         if nvar < 5:

diff -r 24e77256e5629419ccfe9307423a0a71fa2c8728 -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -523,66 +523,6 @@
     if good_u is None : good_u = 'cm'
     return good_u
 
-class UnitBoundaryCallback(PlotCallback):
-    """
-    Add on a plot indicating where *factor*s of *unit* are shown.
-    Optionally *text_annotate* on the *text_which*-indexed box on display.
-    """
-    _type_name = "units"
-    def __init__(self, unit = "au", factor=4, text_annotate=True, text_which=-2):
-        PlotCallback.__init__(self)
-        self.unit = unit
-        self.factor = factor
-        self.text_annotate = text_annotate
-        self.text_which = -2
-
-    def __call__(self, plot):
-        x0, x1 = plot.xlim
-        y0, y1 = plot.ylim
-        l, b, width, height = mpl_get_bounds(plot._axes.bbox)
-        xi = x_dict[plot.data.axis]
-        yi = y_dict[plot.data.axis]
-        dx = plot.image._A.shape[0] / (x1-x0)
-        dy = plot.image._A.shape[1] / (y1-y0)
-        center = plot.data.center
-        min_dx = plot.data['pdx'].min()
-        max_dx = plot.data['pdx'].max()
-        w_min_x = 250.0 * min_dx
-        w_max_x = 1.0 / self.factor
-        min_exp_x = np.ceil(np.log10(w_min_x*plot.data.pf[self.unit])
-                           /np.log10(self.factor))
-        max_exp_x = np.floor(np.log10(w_max_x*plot.data.pf[self.unit])
-                            /np.log10(self.factor))
-        n_x = max_exp_x - min_exp_x + 1
-        widths = np.logspace(min_exp_x, max_exp_x, num = n_x, base=self.factor)
-        widths /= plot.data.pf[self.unit]
-        left_edge_px = (center[xi] - widths/2.0 - x0)*dx
-        left_edge_py = (center[yi] - widths/2.0 - y0)*dy
-        right_edge_px = (center[xi] + widths/2.0 - x0)*dx
-        right_edge_py = (center[yi] + widths/2.0 - y0)*dy
-        verts = np.array(
-                [(left_edge_px, left_edge_px, right_edge_px, right_edge_px),
-                 (left_edge_py, right_edge_py, right_edge_py, left_edge_py)])
-        visible =  ( right_edge_px - left_edge_px > 25 ) & \
-                   ( right_edge_px - left_edge_px > 25 ) & \
-                   ( (right_edge_px < width) & (left_edge_px > 0) ) & \
-                   ( (right_edge_py < height) & (left_edge_py > 0) )
-        verts=verts.transpose()[visible,:,:]
-        grid_collection = matplotlib.collections.PolyCollection(
-                verts, facecolors="none",
-                       edgecolors=(0.0,0.0,0.0,1.0),
-                       linewidths=2.5)
-        plot._axes.hold(True)
-        plot._axes.add_collection(grid_collection)
-        if self.text_annotate:
-            ti = max(self.text_which, -1*len(widths[visible]))
-            if ti < len(widths[visible]): 
-                w = widths[visible][ti]
-                good_u = get_smallest_appropriate_unit(w, plot.data.pf)
-                w *= plot.data.pf[good_u]
-                plot._axes.annotate("%0.3e %s" % (w,good_u), verts[ti,1,:]+5)
-        plot._axes.hold(False)
-
 class LinePlotCallback(PlotCallback):
     """
     annotate_line(x, y, plot_args = None)
@@ -980,73 +920,6 @@
             plot._axes.hold(False)
 
 
-class CoordAxesCallback(PlotCallback):
-    """
-    Creates x and y axes for a VMPlot. In the future, it will
-    attempt to guess the proper units to use.
-    """
-    _type_name = "coord_axes"
-    def __init__(self,unit=None,coords=False):
-        PlotCallback.__init__(self)
-        self.unit = unit
-        self.coords = coords
-
-    def __call__(self,plot):
-        # 1. find out what the domain is
-        # 2. pick a unit for it
-        # 3. run self._axes.set_xlabel & self._axes.set_ylabel to actually lay things down.
-        # 4. adjust extent information to make sure labels are visable.
-
-        # put the plot into data coordinates
-        nx,ny = plot.image._A.shape
-        dx = (plot.xlim[1] - plot.xlim[0])/nx
-        dy = (plot.ylim[1] - plot.ylim[0])/ny
-
-        unit_conversion = plot.pf[plot.im["Unit"]]
-        aspect = (plot.xlim[1]-plot.xlim[0])/(plot.ylim[1]-plot.ylim[0])
-
-        print "aspect ratio = ", aspect
-
-        # if coords is False, label axes relative to the center of the
-        # display. if coords is True, label axes with the absolute
-        # coordinates of the region.
-        xcenter = 0.
-        ycenter = 0.
-        if not self.coords:
-            center = plot.data.center
-            if plot.data.axis == 0:
-                xcenter = center[1]
-                ycenter = center[2]
-            elif plot.data.axis == 1:
-                xcenter = center[0]
-                ycenter = center[2]
-            else:
-                xcenter = center[0]
-                ycenter = center[1]
-
-
-            xformat_function = lambda a,b: '%7.1e' %((a*dx + plot.xlim[0] - xcenter)*unit_conversion)
-            yformat_function = lambda a,b: '%7.1e' %((a*dy + plot.ylim[0] - ycenter)*unit_conversion)
-        else:
-            xformat_function = lambda a,b: '%7.1e' %((a*dx + plot.xlim[0])*unit_conversion)
-            yformat_function = lambda a,b: '%7.1e' %((a*dy + plot.ylim[0])*unit_conversion)
-            
-        xticker = matplotlib.ticker.FuncFormatter(xformat_function)
-        yticker = matplotlib.ticker.FuncFormatter(yformat_function)
-        plot._axes.xaxis.set_major_formatter(xticker)
-        plot._axes.yaxis.set_major_formatter(yticker)
-        
-        xlabel = '%s (%s)' % (axis_labels[plot.data.axis][0],plot.im["Unit"])
-        ylabel = '%s (%s)' % (axis_labels[plot.data.axis][1],plot.im["Unit"])
-        xticksize = nx/4.
-        yticksize = ny/4.
-        plot._axes.xaxis.set_major_locator(matplotlib.ticker.FixedLocator([i*xticksize for i in range(0,5)]))
-        plot._axes.yaxis.set_major_locator(matplotlib.ticker.FixedLocator([i*yticksize for i in range(0,5)]))
-        
-        plot._axes.set_xlabel(xlabel,visible=True)
-        plot._axes.set_ylabel(ylabel,visible=True)
-        plot._figure.subplots_adjust(left=0.1,right=0.8)
-
 class TextLabelCallback(PlotCallback):
     """
     annotate_text(pos, text, data_coords=False, text_args = None)


https://bitbucket.org/yt_analysis/yt/commits/a6bd3cac6e9f/
Changeset:   a6bd3cac6e9f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-16 21:36:42
Summary:     First pass at updating the docs for FITS data
Affected #:  1 file

diff -r cd946b5b14aa2404ecf7b0e2ef780312efadfff7 -r a6bd3cac6e9f65e75444cbbfca039b515180d72f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -459,6 +459,132 @@
 
    pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
 
+.. _loading-fits-data:
+
+FITS Data
+---------
+
+FITS data is *mostly* supported and cared for by John ZuHone. In order to
+read FITS data, `AstroPy <http://www.astropy.org>`_ must be installed. FITS
+data cubes can be loaded in the same way by yt as other datasets. yt
+can read FITS image files that have the following (case-insensitive) suffixes:
+
+* fits
+* fts
+* fits.gz
+* fts.gz
+
+.. note::
+
+  AstroPy is necessary due to the requirements of both FITS file reading and
+  WCS coordinates. Since new releases of `PyFITS <http://www.stsci
+  .edu/institute/software_hardware/pyfits>`_ are to be discontinued, individual
+  installations of this package and the `PyWCS <http://stsdas.stsci
+  .edu/astrolib/pywcs/>`_ package are not supported.
+
+Though FITS datasets are composed of one data cube in the FITS file,
+upon being loaded into yt they are automatically decomposed into grids:
+
+.. code-block:: python
+
+  from yt.mods import *
+  ds = load("m33_hi.fits")
+  ds.print_stats()
+
+.. parsed-literal::
+
+  level	  # grids	    # cells	   # cells^3
+  ----------------------------------------------
+    0	     512	  981940800	         994
+  ----------------------------------------------
+             512	  981940800
+
+yt will generate its own domain decomposition, but the number of grids can be
+set manually by passing the ``nprocs`` parameter to the ``load`` call:
+
+.. code-block:: python
+
+  ds = load("m33_hi.fits", nprocs=1024)
+
+
+FITS Coordinates
+++++++++++++++++
+
+For FITS datasets, the unit of ``code_length`` is always the width of one
+pixel. yt will attempt to use the WCS information in the FITS header to
+construct information about the coordinate system, and provides support for
+the following dataset types:
+
+1. Rectilinear 2D/3D images with length units (e.g., Mpc, AU,
+   etc.) defined in the ``CUNITx`` keywords
+2. 2D images in the equatorial coordinate system (RA/Dec in the ``CTYPEx``
+   keywords)
+3. 3D images with equatorial coordinates and a third axis for another
+   quantity, such as velocity, frequency, wavelength, etc.
+4. 4D images, where the slices along the 4th axis are interpreted as
+   different fields.
+
+If your data is of the first case, yt will determine the length units based
+on the information in the header. If your data is of the second or third
+cases, no length units will be assigned, but the world coordinate information
+about the axes will be stored in separate fields. If your data is of the fourth
+type, the coordinates of the first three axes will be determined according to
+cases 1-3.
+
+Fields in FITS Datasets
++++++++++++++++++++++++
+
+Multiple fields can be included in a FITS dataset in several different ways.
+The first way, and the simplest, is if more than one image HDU is
+contained within the same file. The field names will be determined by the
+value of ``BTYPE`` in the header, and the field units will be determined by
+the value of ``BUNIT``. The second way is if a dataset has a fourth axis,
+with each slice along this axis corresponding to a different field. In this
+case, the field names will be determined by the value of the ``CTYPE4`` keyword
+and the index of the slice. So, for example, if ``BTYPE`` = ``"intensity"`` and
+``CTYPE4`` = ``"stokes"``, then the fields will be named
+``"intensity_stokes_1"``, ``"intensity_stokes_2"``, and so on.
+
+Additionally, fields corresponding to the WCS coordinates will be generated.
+They will be given the same names as the corresponding ``CTYPEx`` keywords,
+with the exception of any names that start with ``"RA"`` and ``"DEC"``,
+which will have field names shortened to simply ``"ra"`` and ``"dec"``. When
+queried, these fields will be generated from the pixel coordinates in the
+file using the WCS transformations provided by AstroPy.
+
+Additional Options
+++++++++++++++++++
+
+FITS data may include ``NaNs``. If you wish to mask this data out,
+you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+single floating-point number (applies to all fields) or a Python dictionary
+containing different mask values for different fields:
+
+.. code-block::
+
+  # passing a single float
+  ds = load("m33_hi.fits", nan_mask=0.0)
+
+  # passing a dict
+  ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+
+Generally, AstroPy may generate a lot of warnings about individual FITS
+files, many of which you may want to ignore. If you want to see these
+warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+
+Limitations
++++++++++++
+
+* Each FITS image from a single dataset, whether from one file or from one of
+  multiple files, must have the same dimensions and WCS information as the
+  first image in the primary file. If this is not the case,
+  yt will raise a warning and will not load this field.
+* yt will load data without WCS information and/or some missing header
+  keywords, but the resulting field information will necessarily be incomplete.
+  For example, field names may not be descriptive, and units will not be
+  correct. To get the full use out of yt for FITS files,
+  make sure that the header keywords mentioned above have sensible values.
+
 .. _loading-moab-data:
 
 MOAB Data


https://bitbucket.org/yt_analysis/yt/commits/e99f2ded848e/
Changeset:   e99f2ded848e
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-17 04:06:55
Summary:     Merge
Affected #:  34 files

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -129,7 +129,7 @@
         """
         if self.CoM is not None:
             return self.CoM
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         c = {}
         # We shift into a box where the origin is the left edge
         c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
@@ -199,7 +199,7 @@
         """
         if self.group_total_mass is not None:
             return self.group_total_mass
-        return self["ParticleMassMsun"].sum()
+        return self["particle_mass"].in_units('Msun').sum()
 
     def bulk_velocity(self):
         r"""Returns the mass-weighted average velocity in cm/s.
@@ -213,7 +213,7 @@
         """
         if self.bulk_vel is not None:
             return self.bulk_vel
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
@@ -234,7 +234,7 @@
         if self.rms_vel is not None:
             return self.rms_vel
         bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         sm = pm.sum()
         vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
@@ -331,7 +331,7 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["ParticleMassMsun"]:
+                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
         if 'creation_time' in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
@@ -464,7 +464,7 @@
         if self["particle_position_x"].size > 1:
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                np.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["particle_mass"][inds == index]).in_units('Msun')
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -750,7 +750,7 @@
             inds = np.digitize(dist, self.radial_bins) - 1
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    np.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["particle_mass"][inds == index]).in_units('Msun')
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -1356,7 +1356,7 @@
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun"]
+              ["particle_mass"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
         self.threshold = threshold
@@ -1368,7 +1368,7 @@
             RunHOP(self.particle_fields["particle_position_x"] / self.period[0],
                 self.particle_fields["particle_position_y"] / self.period[1],
                 self.particle_fields["particle_position_z"] / self.period[2],
-                self.particle_fields["ParticleMassMsun"],
+                self.particle_fields["particle_mass"].in_units('Msun'),
                 self.threshold)
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
@@ -1555,7 +1555,7 @@
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun", "particle_index"]
+              ["particle_mass", "particle_index"]
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
@@ -1589,8 +1589,8 @@
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
-            self.particle_fields['ParticleMassMsun'])
+        np.divide(self.particle_fields['particle_mass'].in_units('Msun'), self.total_mass,
+            self.particle_fields['particle_mass'])
         np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
         np.divide(self.particle_fields["particle_position_y"],
@@ -2190,7 +2190,7 @@
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+            total_mass = self.comm.mpi_allreduce((self._data_source["particle_mass"].in_units('Msun').astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
             self.padding = (np.zeros(3, dtype='float64'),
@@ -2386,9 +2386,9 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2409,10 +2409,10 @@
             sub_mass = total_mass
         elif dm_only:
             select = self._get_dm_indices()
-            sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
+            sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
+                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -27,6 +27,7 @@
      cm_per_km, erg_per_keV
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      op_names, parallel_capable
@@ -424,7 +425,8 @@
     def project_photons(self, L, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
-                        sky_center=None, responses=None):
+                        sky_center=None, responses=None,
+                        convolve_energies=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -452,8 +454,10 @@
         sky_center : array_like, optional
             Center RA, Dec of the events in degrees.
         responses : list of strings, optional
-            The names of the ARF and RMF files to convolve the photons with.
-
+            The names of the ARF and/or RMF files to convolve the photons with.
+        convolve_energies : boolean, optional
+            If this is set, the photon energies will be convolved with the RMF.
+            
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -495,8 +499,10 @@
         parameters = {}
         
         if responses is not None:
+            responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
-            parameters["RMF"] = responses[1]
+            if len(responses) == 2:
+                parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
             
         if (exp_time_new is None and area_new is None and
@@ -518,8 +524,13 @@
                 elo = f["SPECRESP"].data.field("ENERG_LO")
                 ehi = f["SPECRESP"].data.field("ENERG_HI")
                 eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
-                weights = self._normalize_arf(parameters["RMF"])
-                eff_area *= weights
+                if "RMF" in parameters:
+                    weights = self._normalize_arf(parameters["RMF"])
+                    eff_area *= weights
+                else:
+                    mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
+                                  "responses are normalized properly. If not, you may "+
+                                  "get inconsistent results.")
                 f.close()
                 Aratio = eff_area.max()/self.parameters["FiducialArea"]
             else:
@@ -618,7 +629,7 @@
             
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
 
-        if responses is not None:
+        if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
                 

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -16,6 +16,12 @@
 from yt.funcs import *
 from yt import units
 import h5py
+
+try:
+    import xspec
+except ImportError:
+    pass
+
 try:
     import xspec
     from scipy.integrate import cumtrapz

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -182,10 +182,14 @@
         LE   = self.domain_left_edge
         RE   = self.domain_right_edge
 
+        # Radmc3D wants the cell wall positions in cgs. Convert here:
+        LE_cgs = LE * self.pf.units['cm']
+        RE_cgs = RE * self.pf.units['cm']
+
         # calculate cell wall positions
-        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
-        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
-        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+        xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE_cgs[1], RE_cgs[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE_cgs[2], RE_cgs[2], dims[2]+1)]
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,11 +1,9 @@
 #!/usr/bin/env python
-import setuptools
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")
     config.add_subpackage("cosmological_observation")
@@ -14,11 +12,14 @@
     config.add_subpackage("halo_merger_tree")
     config.add_subpackage("halo_profiler")
     config.add_subpackage("level_sets")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
     config.add_subpackage("radial_column_density")
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
+    config.add_subpackage("sunrise_export")
     config.add_subpackage("sunyaev_zeldovich")
     config.add_subpackage("particle_trajectories")
     config.add_subpackage("photon_simulator")

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -44,7 +44,7 @@
     star_creation_time : Ordered array or list of floats
         The creation time for the stars in code units.
     volume : Float
-        The volume of the region for the specified list of stars.
+        The comoving volume of the region for the specified list of stars.
     bins : Integer
         The number of time bins used for binning the stars. Default = 300.
     
@@ -72,7 +72,7 @@
                 If data_source is not provided, all of these paramters need to be set:
                 star_mass (array, Msun),
                 star_creation_time (array, code units),
-                volume (float, Mpc**3).
+                volume (float, cMpc**3).
                 """)
                 return None
             self.mode = 'provided'
@@ -130,15 +130,15 @@
         """
         if self.mode == 'data_source':
             try:
-                vol = self._data_source.volume('mpc')
+                vol = self._data_source.volume('mpccm')
             except AttributeError:
                 # If we're here, this is probably a HOPHalo object, and we
                 # can get the volume this way.
                 ds = self._data_source.get_sphere()
-                vol = ds.volume('mpc')
+                vol = ds.volume('mpccm')
         elif self.mode == 'provided':
-            vol = self.volume
-        tc = self._pf["Time"] #time to seconds?
+            vol = self.volume('mpccm')
+        tc = self._pf["Time"]
         self.time = []
         self.lookback_time = []
         self.redshift = []
@@ -153,6 +153,7 @@
             self.redshift.append(self.cosm.z_from_t(time * tc))
             self.Msol_yr.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR))
+            # changed vol from mpc to mpccm used in literature
             self.Msol_yr_vol.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -52,6 +52,8 @@
     CylindricalCoordinateHandler
 from yt.geometry.spherical_coordinates import \
     SphericalCoordinateHandler
+from yt.geometry.geographic_coordinates import \
+    GeographicCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -291,7 +293,7 @@
             self.create_field_info()
             np.seterr(**oldsettings)
         return self._instantiated_index
-    
+
     _index_proxy = None
     @property
     def h(self):
@@ -366,6 +368,8 @@
             self.coordinates = PolarCoordinateHandler(self)
         elif self.geometry == "spherical":
             self.coordinates = SphericalCoordinateHandler(self)
+        elif self.geometry == "geographic":
+            self.coordinates = GeographicCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 
@@ -528,7 +532,7 @@
         source = self.all_data()
         max_val, maxi, mx, my, mz = \
             source.quantities["MaxLocation"](field)
-        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
+        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
         return max_val, np.array([mx, my, mz], dtype="float64")
 
@@ -629,7 +633,8 @@
             DW = np.zeros(3)
         else:
             DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
+        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                               DW.units.dimensions)
 
     _arr = None
     @property

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -43,7 +43,8 @@
 
 from .fields import \
     BoxlibFieldInfo, \
-    MaestroFieldInfo
+    MaestroFieldInfo, \
+    CastroFieldInfo
 
 from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
@@ -603,7 +604,8 @@
         tmp.extend((1,1))
         self.domain_dimensions = np.array(tmp)
         tmp = list(self.periodicity)
-        tmp[1:] = False
+        tmp[1] = False
+        tmp[2] = False
         self.periodicity = ensure_tuple(tmp)
         
     def _setup2d(self):
@@ -728,6 +730,8 @@
 
 class CastroDataset(BoxlibDataset):
 
+    _field_info_class = CastroFieldInfo
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -20,10 +20,6 @@
     mh, boltzmann_constant_cgs, amu_cgs
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from yt.fields.species_fields import \
-    add_species_field_by_fraction
-from yt.utilities.chemical_formulas import \
-    ChemicalFormula
 
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
@@ -114,6 +110,64 @@
                            function = _get_vel(ax),
                            units = "cm/s")
 
+class CastroFieldInfo(FieldInfoContainer):
+
+    known_other_fields = (
+        ("density", ("g/cm**3", ["density"], r"\rho")),
+        ("xmom", ("g*cm/s", ["momentum_x"], r"\rho u")),
+        ("ymom", ("g*cm/s", ["momentum_y"], r"\rho v")),
+        ("zmom", ("g*cm/s", ["momentum_z"], r"\rho w")),
+        # velocity components are not always present
+        ("x_velocity", ("cm/s", ["velocity_x"], r"u")),
+        ("y_velocity", ("cm/s", ["velocity_y"], r"v")),
+        ("z_velocity", ("cm/s", ["velocity_z"], r"w")),
+        ("rho_E", ("erg/cm**3", ["energy_density"], r"\rho E")),
+        # internal energy density (not just thermal)
+        ("rho_e", ("erg/cm**3", [], r"\rho e")),
+        ("Temp", ("K", ["temperature"], r"T")),
+        ("grav_x", ("cm/s**2", [], r"g\cdot e_x")),
+        ("grav_y", ("cm/s**2", [], r"g\cdot e_y")),
+        ("grav_z", ("cm/s**2", [], r"g\cdot e_z")),
+        ("pressure", ("dyne/cm**2", [], r"p")),
+        ("kineng", ("erg/cm**3", [], r"\frac{1}{2}\rho|U|**2")),
+        ("soundspeed", ("cm/s", ["sound_speed"], None)),
+        ("Machnumber", ("", ["mach_number"], None)),
+        ("entropy", ("erg/(g*K)", ["entropy"], r"s")),
+        ("magvort", ("1/s", ["vorticity_magnitude"], r"|\nabla \times U|")),
+        ("divu", ("1/s", [], r"\nabla \cdot U")),
+        ("eint_E", ("erg/g", [], r"e(E,U)")),
+        ("eint_e", ("erg/g", [], r"e")),
+        ("magvel", ("cm/s", ["velocity_magnitude"], r"|U|")),
+        ("radvel", ("cm/s", [], r"U\cdot e_r")),
+        ("magmom", ("g*cm/s", ["momentum_magnitude"], r"|\rho U|")),
+        ("maggrav", ("cm/s**2", [], r"|g|")),
+        ("phiGrav", ("erg/g", [], r"|\Phi|")),
+    )
+
+    def setup_fluid_fields(self):
+        # add X's
+        for _, field in self.pf.field_list:
+            if field.startswith("X("):
+                # We have a fraction
+                nice_name = field[2:-1]
+                self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
+                           units = "")
+                def _create_density_func(field_name):
+                    def _func(field, data):
+                        return data[field_name] * data["gas", "density"]
+                    return _func
+                func = _create_density_func(("gas", "%s_fraction" % nice_name))
+                self.add_field(name = ("gas", "%s_density" % nice_name),
+                               function = func,
+                               units = "g/cm**3")
+                # We know this will either have one letter, or two.
+                if field[3] in string.letters:
+                    element, weight = field[2:4], field[4:-1]
+                else:
+                    element, weight = field[2:3], field[3:-1]
+                weight = int(weight)
+                # Here we can, later, add number density.
+
 class MaestroFieldInfo(FieldInfoContainer):
 
     known_other_fields = (
@@ -166,7 +220,7 @@
     )
 
     def setup_fluid_fields(self):
-        # Add omegadots, units of 1/s
+        # pick the correct temperature field
         if self.pf.parameters["use_tfromp"]:
             self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
                        units = "K")
@@ -174,6 +228,7 @@
             self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
                        units = "K")
 
+        # Add X's and omegadots, units of 1/s
         for _, field in self.pf.field_list:
             if field.startswith("X("):
                 # We have a fraction

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -196,7 +196,7 @@
     _preload_implemented = True
 
     def __init__(self, pf, dataset_type):
-        
+
         self.dataset_type = dataset_type
         if pf.file_style != None:
             self._bn = pf.file_style
@@ -868,7 +868,8 @@
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_velocity", self.velocity_unit)
         DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
+        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                               DW.units.dimensions)
 
     def cosmology_get_units(self):
         """
@@ -984,8 +985,8 @@
     size = os.stat(f.name).st_size
     fullblocks, lastblock = divmod(size, blocksize)
 
-    # The first(end of file) block will be short, since this leaves 
-    # the rest aligned on a blocksize boundary.  This may be more 
+    # The first(end of file) block will be short, since this leaves
+    # the rest aligned on a blocksize boundary.  This may be more
     # efficient than having the last (first in file) block be short
     f.seek(-lastblock,2)
     yield f.read(lastblock)

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -343,7 +343,7 @@
         self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         if self.dimensionality < 3:
-            for d in (dimensionality)+range(3-dimensionality):
+            for d in [dimensionality]+range(3-dimensionality):
                 if self.domain_left_edge[d] == self.domain_right_edge[d]:
                     mylog.warning('Identical domain left edge and right edges '
                                   'along dummy dimension (%i), attempting to read anyway' % d)

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/geometry/geographic_coordinates.py
--- /dev/null
+++ b/yt/geometry/geographic_coordinates.py
@@ -0,0 +1,194 @@
+"""
+Geographic fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .coordinate_handler import \
+    CoordinateHandler, \
+    _unknown_coord, \
+    _get_coord_fields
+import yt.visualization._MPL as _MPL
+from yt.utilities.lib.misc_utilities import \
+    pixelize_cylinder, pixelize_aitoff
+
+class GeographicCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, pf, ordering = 'latlonalt'):
+        if ordering != 'latlonalt': raise NotImplementedError
+        super(GeographicCoordinateHandler, self).__init__(pf)
+
+    def setup_fields(self, registry):
+        # return the fields for r, z, theta
+        registry.add_field(("index", "dx"), function=_unknown_coord)
+        registry.add_field(("index", "dy"), function=_unknown_coord)
+        registry.add_field(("index", "dz"), function=_unknown_coord)
+        registry.add_field(("index", "x"), function=_unknown_coord)
+        registry.add_field(("index", "y"), function=_unknown_coord)
+        registry.add_field(("index", "z"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(0, "")
+        registry.add_field(("index", "dlatitude"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "latitude"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(1, "")
+        registry.add_field(("index", "dlongitude"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "longitude"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(2)
+        registry.add_field(("index", "daltitude"), function = f1,
+                           display_field = False,
+                           units = "code_length")
+        registry.add_field(("index", "altitude"), function = f2,
+                           display_field = False,
+                           units = "code_length")
+
+        def _SphericalVolume(field, data):
+            # r**2 sin theta dr dtheta dphi
+            # We can use the transformed coordinates here.
+            vol = data["index", "r"]**2.0
+            vol *= data["index", "dr"]
+            vol *= np.sin(data["index", "theta"])
+            vol *= data["index", "dtheta"]
+            vol *= data["index", "dphi"]
+            return vol
+        registry.add_field(("index", "cell_volume"),
+                 function=_SphericalVolume,
+                 units = "code_length**3")
+
+        # Altitude is the radius from the central zone minus the radius of the
+        # surface.
+        def _altitude_to_radius(field, data):
+            surface_height = data.get_field_parameter("surface_height")
+            if surface_height is None:
+                surface_height = getattr(data.pf, "surface_height", 0.0)
+            return data["altitude"] + surface_height
+        registry.add_field(("index", "r"),
+                 function=_altitude_to_radius,
+                 units = "code_length")
+        registry.alias(("index", "dr"), ("index", "daltitude"))
+
+        def _longitude_to_theta(field, data):
+            # longitude runs from -180 to 180.
+            return (data["longitude"] + 180) * np.pi/180.0
+        registry.add_field(("index", "theta"),
+                 function = _longitude_to_theta,
+                 units = "")
+        def _dlongitude_to_dtheta(field, data):
+            return data["dlongitude"] * np.pi/180.0
+        registry.add_field(("index", "dtheta"),
+                 function = _dlongitude_to_dtheta,
+                 units = "")
+
+        def _latitude_to_phi(field, data):
+            # latitude runs from -90 to 90
+            return (data["latitude"] + 90) * np.pi/180.0
+        registry.add_field(("index", "phi"),
+                 function = _latitude_to_phi,
+                 units = "")
+        def _dlatitude_to_dphi(field, data):
+            return data["dlatitude"] * np.pi/180.0
+        registry.add_field(("index", "dphi"),
+                 function = _dlatitude_to_dphi,
+                 units = "")
+
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
+        if dimension in (0, 1):
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                          antialias, dimension)
+        elif dimension == 2:
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
+        else:
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        buff = pixelize_aitoff(data_source["theta"], data_source["dtheta"]/2.0,
+                               data_source["phi"], data_source["dphi"]/2.0,
+                               size, data_source[field], None,
+                               None).transpose()
+        return buff
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
+                      dimension):
+        if dimension == 0:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'] / 2.0,
+                                     data_source['theta'],
+                                     data_source['dtheta'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+        elif dimension == 1:
+            buff = pixelize_cylinder(data_source['r'],
+                                     data_source['dr'] / 2.0,
+                                     data_source['phi'],
+                                     data_source['dphi'] / 2.0, # half-widths
+                                     size, data_source[field], bounds)
+        else:
+            raise RuntimeError
+        return buff
+
+
+    def convert_from_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    # Despite being mutables, we uses these here to be clear about how these
+    # are generated and to ensure that they are not re-generated unnecessarily
+    axis_name = { 0  : 'latitude',  1  : 'longitude',  2  : 'altitude',
+                 'latitude' : 'latitude',
+                 'longitude' : 'longitude', 
+                 'altitude' : 'altitude',
+                 'Latitude' : 'latitude',
+                 'Longitude' : 'longitude', 
+                 'Altitude' : 'altitude',
+                 'lat' : 'latitude',
+                 'lon' : 'longitude', 
+                 'alt' : 'altitude' }
+
+    axis_id = { 'latitude' : 0, 'longitude' : 1, 'altitude' : 2,
+                 0  : 0,  1  : 1,  2  : 2}
+
+    x_axis = { 'latitude' : 1, 'longitude' : 0, 'altitude' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'latitude' : 2, 'longitude' : 2, 'altitude' : 1,
+                0  : 2,  1  : 2,  2  : 1}
+
+    @property
+    def period(self):
+        return self.pf.domain_width
+

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 
+from yt.config import ytcfg
 from yt.funcs import *
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level, \
@@ -56,7 +57,19 @@
 
     def find_max_cell_location(self, field, finest_levels = 3):
         if finest_levels is not False:
-            gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+            # This prevents bad values for the case that the number of grids to
+            # search is smaller than the number of processors being applied to
+            # the task, by 
+            nproc = ytcfg.getint("yt", "__topcomm_parallel_size")
+            while 1:
+                gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+                if gi.sum() >= nproc:
+                    break
+                elif finest_levels >= self.max_level:
+                    raise YTTooParallel
+                else:
+                    finest_levels += 1
+                
             source = self.grid_collection([0.0]*3, self.grids[gi])
         else:
             source = self.all_data()

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/geometry/spherical_coordinates.py
--- a/yt/geometry/spherical_coordinates.py
+++ b/yt/geometry/spherical_coordinates.py
@@ -75,6 +75,7 @@
 
     def pixelize(self, dimension, data_source, field, bounds, size,
                  antialias = True, periodic = True):
+        self.period
         if dimension == 0:
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias, dimension, periodic)
@@ -102,22 +103,16 @@
                       dimension):
         if dimension == 1:
             buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'],
+                                     data_source['dr'] / 2.0,
                                      data_source['phi'],
                                      data_source['dphi'] / 2.0, # half-widths
                                      size, data_source[field], bounds)
         elif dimension == 2:
             buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'],
+                                     data_source['dr'] / 2.0,
                                      data_source['theta'],
                                      data_source['dtheta'] / 2.0, # half-widths
                                      size, data_source[field], bounds)
-            buff = pixelize_cylinder(data_source['r'],
-                                     data_source['dr'],
-                                     2.0*np.pi - data_source['theta'],
-                                     data_source['dtheta'] / 2.0, # half-widths
-                                     size, data_source[field], bounds,
-                                     input_img = buff)
         else:
             raise RuntimeError
         return buff

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1037,7 +1037,7 @@
         mpd.upload()
 
 class YTInstInfoCmd(YTCommand):
-    name = "instinfo"
+    name = ["instinfo", "version"]
     args = (
             dict(short="-u", long="--update-source", action="store_true",
                  default = False,
@@ -1055,6 +1055,7 @@
 
     def __call__(self, opts):
         import pkg_resources
+        import yt
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
         print
@@ -1071,10 +1072,11 @@
         vstring = get_yt_version()
         if vstring is not None:
             print
-            print "The current version of the code is:"
+            print "The current version and changeset for the code is:"
             print
             print "---"
-            print vstring.strip()
+            print "Version = %s" % yt.__version__
+            print "Changeset = %s" % vstring.strip()
             print "---"
             print
             if "site-packages" not in path:
@@ -1605,6 +1607,7 @@
 
     def __call__(self, opts):
         import pkg_resources
+        import yt
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
         print
@@ -1622,10 +1625,11 @@
         if "site-packages" not in path:
             vstring = get_hg_version(path)
             print
-            print "The current version of the code is:"
+            print "The current version and changeset for the code is:"
             print
             print "---"
-            print vstring.strip()
+            print "Version = %s" % yt.__version__
+            print "Changeset = %s" % vstring.strip()
             print "---"
             print
             print "This installation CAN be automatically updated."

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -352,6 +352,10 @@
 class YTEmptyProfileData(Exception):
     pass
 
+class YTTooParallel(YTException):
+    def __str__(self):
+        return "You've used too many processors for this dataset."
+
 class YTDuplicateFieldInProfile(Exception):
     def __init__(self, field, new_spec, old_spec):
         self.field = field

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -33,6 +33,7 @@
 
 cdef extern from "math.h": 
     double expf(double x) nogil 
+    int isnormal(double x) nogil
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -58,6 +59,7 @@
     cdef np.float64_t bv, dy, dd, tf, rv
     cdef int bin_id
     if dvs[fit.field_id] >= fit.bounds[1] or dvs[fit.field_id] <= fit.bounds[0]: return 0.0
+    if not isnormal(dvs[fit.field_id]): return 0.0
     bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
     bin_id = iclip(bin_id, 0, fit.nbins-2)
     dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 cimport cython
 cimport libc.math as math
+from fp_utils cimport fmin, fmax
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
@@ -529,6 +530,12 @@
 
     return img
 
+cdef void aitoff_thetaphi_to_xy(np.float64_t theta, np.float64_t phi,
+                                np.float64_t *x, np.float64_t *y):
+    cdef np.float64_t z = math.sqrt(1 + math.cos(phi) * math.cos(theta / 2.0))
+    x[0] = math.cos(phi) * math.sin(theta / 2.0) / z
+    y[0] = math.sin(phi) / z
+
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -539,15 +546,21 @@
                     buff_size,
                     np.ndarray[np.float64_t, ndim=1] field,
                     extents, input_img = None):
-    
+    # http://paulbourke.net/geometry/transformationprojection/
+    # longitude is -pi to pi
+    # latitude is -pi/2 to pi/2
+    # z^2 = 1 + cos(latitude) cos(longitude/2)
+    # x = cos(latitude) sin(longitude/2) / z
+    # y = sin(latitude) / z
     cdef np.ndarray[np.float64_t, ndim=2] img
     cdef int i, j, nf, fi
     cdef np.float64_t x, y, z, zb
     cdef np.float64_t dx, dy, inside
     cdef np.float64_t theta1, dtheta1, phi1, dphi1
-    cdef np.float64_t theta0, phi0
+    cdef np.float64_t theta0, phi0, theta_p, dtheta_p, phi_p, dphi_p
     cdef np.float64_t PI = np.pi
     cdef np.float64_t s2 = math.sqrt(2.0)
+    cdef np.float64_t xmax, ymax, xmin, ymin
     nf = field.shape[0]
     
     if input_img is None:
@@ -555,31 +568,66 @@
         img[:] = np.nan
     else:
         img = input_img
+    # Okay, here's our strategy.  We compute the bounds in x and y, which will
+    # be a rectangle, and then for each x, y position we check to see if it's
+    # within our theta.  This will cost *more* computations of the
+    # (x,y)->(theta,phi) calculation, but because we no longer have to search
+    # through the theta, phi arrays, it should be faster.
     dx = 2.0 / (img.shape[0] - 1)
     dy = 2.0 / (img.shape[1] - 1)
-    for i in range(img.shape[0]):
-        x = (-1.0 + i*dx)*s2*2.0
-        for j in range(img.shape[1]):
-            y = (-1.0 + j * dy)*s2
-            zb = (x*x/8.0 + y*y/2.0 - 1.0)
-            if zb > 0: continue
-            z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
-            z = z**0.5
-            # Longitude
-            phi0 = (2.0*math.atan(z*x/(2.0 * (2.0*z*z-1.0))) + PI)
-            # Latitude
-            # We shift it into co-latitude
-            theta0 = (math.asin(z*y) + PI/2.0)
-            # Now we just need to figure out which pixel contributes.
-            # We do not have a fast search.
-            for fi in range(nf):
-                theta1 = theta[fi]
-                dtheta1 = dtheta[fi]
-                if not (theta1 - dtheta1 <= theta0 <= theta1 + dtheta1):
+    for fi in range(nf):
+        theta_p = theta[fi] - PI
+        dtheta_p = dtheta[fi]
+        phi_p = phi[fi] - PI/2.0
+        dphi_p = dphi[fi]
+        # Four transformations
+        aitoff_thetaphi_to_xy(theta_p - dtheta_p, phi_p - dphi_p, &x, &y)
+        xmin = x
+        xmax = x
+        ymin = y
+        ymax = y
+        aitoff_thetaphi_to_xy(theta_p - dtheta_p, phi_p + dphi_p, &x, &y)
+        xmin = fmin(xmin, x)
+        xmax = fmax(xmax, x)
+        ymin = fmin(ymin, y)
+        ymax = fmax(ymax, y)
+        aitoff_thetaphi_to_xy(theta_p + dtheta_p, phi_p - dphi_p, &x, &y)
+        xmin = fmin(xmin, x)
+        xmax = fmax(xmax, x)
+        ymin = fmin(ymin, y)
+        ymax = fmax(ymax, y)
+        aitoff_thetaphi_to_xy(theta_p + dtheta_p, phi_p + dphi_p, &x, &y)
+        xmin = fmin(xmin, x)
+        xmax = fmax(xmax, x)
+        ymin = fmin(ymin, y)
+        ymax = fmax(ymax, y)
+        # Now we have the (projected rectangular) bounds.
+        xmin = (xmin + 1) # Get this into normalized image coords
+        xmax = (xmax + 1) # Get this into normalized image coords
+        ymin = (ymin + 1) # Get this into normalized image coords
+        ymax = (ymax + 1) # Get this into normalized image coords
+        x0 = <int> (xmin / dx)
+        x1 = <int> (xmax / dx) + 1
+        y0 = <int> (ymin / dy)
+        y1 = <int> (ymax / dy) + 1
+        for i in range(x0, x1):
+            x = (-1.0 + i*dx)*s2*2.0
+            for j in range(y0, y1):
+                y = (-1.0 + j * dy)*s2
+                zb = (x*x/8.0 + y*y/2.0 - 1.0)
+                if zb > 0: continue
+                z = (1.0 - (x/4.0)**2.0 - (y/2.0)**2.0)
+                z = z**0.5
+                # Longitude
+                theta0 = 2.0*math.atan(z*x/(2.0 * (2.0*z*z-1.0)))
+                # Latitude
+                # We shift it into co-latitude
+                phi0 = math.asin(z*y)
+                # Now we just need to figure out which pixel contributes.
+                # We do not have a fast search.
+                if not (theta_p - dtheta_p <= theta0 <= theta_p + dtheta_p):
                     continue
-                phi1 = phi[fi]
-                dphi1 = dphi[fi]
-                if not (phi1 - dphi1 <= phi0 <= phi1 + dphi1):
+                if not (phi_p - dphi_p <= phi0 <= phi_p + dphi_p):
                     continue
                 img[i, j] = field[fi]
     return img

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -15,6 +15,7 @@
 import pyx
 import numpy as np
 from matplotlib import cm
+import matplotlib.pyplot as plt
 from _mpl_imports import FigureCanvasAgg
 
 from yt.utilities.logger import ytLogger as mylog
@@ -27,6 +28,7 @@
 from .profile_plotter import PhasePlot
 from .plot_modifications import get_smallest_appropriate_unit
 
+
 class DualEPS(object):
     def __init__(self, figsize=(12,12)):
         r"""Initializes the DualEPS class to which we can progressively add layers
@@ -335,6 +337,26 @@
                     _ylabel = plot[k].axes.get_ylabel()
             if tickcolor == None:
                 _tickcolor = None
+        elif isinstance(plot, np.ndarray):
+            ax = plt.gca()
+            _xrange = ax.get_xlim()
+            _yrange = ax.get_ylim()
+            _xlog=False
+            _ylog=False
+            if bare_axes:
+                _xlabel = ""
+                _ylabel = ""
+            else:
+                if xlabel != None:
+                    _xlabel = xlabel
+                else:
+                    _xlabel = ax.get_xlabel()
+                if ylabel != None:
+                    _ylabel = ylabel
+                else:
+                    _ylabel = ax.get_ylabel()
+            if tickcolor == None:
+                _tickcolor = None
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -447,6 +469,13 @@
             # hack to account for non-square display ratios (not sure why)
             if isinstance(plot, PlotWindow):
                 shift = 12.0 / 340
+        elif isinstance(plot, np.ndarray):
+            fig = plt.figure()
+            iplot = plt.figimage(plot)
+            _p1 =  iplot.figure
+            _p1.set_size_inches(self.figsize[0], self.figsize[1])
+            ax = plt.gca();
+            _p1.add_axes(ax)
         else:
             raise RuntimeError("Unknown plot type")
 
@@ -527,6 +556,12 @@
 
         # Scale the colorbar
         shift = (0.5*(1.0-shrink[0])*size[0], 0.5*(1.0-shrink[1])*size[1])
+        # To facilitate strething rather than shrinking
+        # If stretched in both directions (makes no sense?) then y dominates. 
+        if(shrink[0] > 1.0):
+            shift = (0.05*self.figsize[0], 0.5*(1.0-shrink[1])*size[1])
+        if(shrink[1] > 1.0):
+            shift = (0.5*(1.0-shrink[0])*size[0], 0.05*self.figsize[1])
         size = (size[0] * shrink[0], size[1] * shrink[1])
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
@@ -681,6 +716,59 @@
 
 #=============================================================================
 
+    def arrow(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
+              color=pyx.color.cmyk.white,
+              linewidth=pyx.style.linewidth.normal):
+        r"""Draws an arrow in the current figure
+
+        Parameters
+        ----------
+        size : float
+            Length of arrow (base to tip) in units of the figure size.
+        label : string
+            Annotation label of the arrow.
+        loc : tuple of floats
+            Location of the left hand side of the arrow in units of
+            the figure size.
+        labelloc : string
+            Location of the label with respect to the line.  Can be
+            "top" or "bottom"
+        color : `pyx.color.*.*`
+            Color of the arrow.  Example: pyx.color.cymk.white
+        linewidth : `pyx.style.linewidth.*`
+            Width of the arrow.  Example: pyx.style.linewidth.normal
+
+        Examples
+        --------
+        >>> d = DualEPS()
+        >>> d.axis_box(xrange=(0,100), yrange=(1e-3,1), ylog=True)
+        >>> d.insert_image("arrow_image.jpg")
+        >>> d.arrow(size=0.2, label="Black Hole!", loc=(0.05, 0.1))
+        >>> d.save_fig()
+        """
+        line = pyx.path.line(self.figsize[0]*loc[0],
+                             self.figsize[1]*loc[1],
+                             self.figsize[0]*(loc[0]+size),
+                             self.figsize[1]*loc[1])
+        self.canvas.stroke(line, [linewidth, color, pyx.deco.earrow()])
+       
+
+        if labelloc == "bottom":
+            yoff = -0.1*size
+            valign = pyx.text.valign.top
+        else:
+            yoff = +0.1*size
+            valign = pyx.text.valign.bottom
+        if label != "":
+            self.canvas.text(self.figsize[0]*(loc[0]+0.5*size),
+                             self.figsize[1]*(loc[1]+yoff), label,
+                             [color, valign, pyx.text.halign.center])
+
+        
+
+
+#=============================================================================
+
     def scale_line(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
                    color=pyx.color.cmyk.white,
                    linewidth=pyx.style.linewidth.normal):
@@ -711,6 +799,7 @@
         >>> d.scale_line(size=0.2, label="1 kpc", loc=(0.05, 0.1))
         >>> d.save_fig()
         """
+        
         line = pyx.path.line(self.figsize[0]*loc[0],
                              self.figsize[1]*loc[1],
                              self.figsize[0]*(loc[0]+size),
@@ -781,7 +870,7 @@
         
 #=============================================================================
 
-    def save_fig(self, filename="test", format="eps"):
+    def save_fig(self, filename="test", format="eps", resolution=250):
         r"""Saves current figure to a file.
 
         Parameters
@@ -801,6 +890,10 @@
             self.canvas.writeEPSfile(filename)
         elif format == "pdf":
             self.canvas.writePDFfile(filename)
+        elif format == "png":
+             self.canvas.writeGSfile(filename+".png", "png16m", resolution=resolution)
+        elif format == "jpg":
+             self.canvas.writeGSfile(filename+".jpeg", "jpeg", resolution=resolution)
         else:
             raise RuntimeError("format %s unknown." % (format))
             
@@ -924,7 +1017,8 @@
     d = DualEPS(figsize=figsize)
     count = 0
     for j in range(nrow):
-        ypos = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos = i*(figsize[0] + margins[0])
             index = j*ncol + i
@@ -990,7 +1084,8 @@
             100.0 * d.canvas.bbox().bottom().t,
             100.0 * d.canvas.bbox().top().t - d.figsize[1])
     for j in range(nrow):
-        ypos0 = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos0 = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos0 = i*(figsize[0] + margins[0])
             index = j*ncol + i

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -7,6 +7,7 @@
 from functools import wraps
 from matplotlib.font_manager import FontProperties
 
+from ._mpl_imports import FigureCanvasAgg
 from .tick_locators import LogLocator, LinearLocator
 from .color_maps import yt_colormaps, is_colormap
 from .plot_modifications import \

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -177,6 +177,16 @@
             # Our default width here is the full domain
             width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
             center = pf.arr([0.0, 0.0, 0.0], "code_length")
+    elif pf.geometry == "geographic":
+        c_r = ((pf.domain_right_edge + pf.domain_left_edge)/2.0)[2]
+        center = pf.arr([0.0, 0.0, c_r], "code_length")
+        if axis == 2:
+            # latitude slice
+            width = pf.arr([360, 180], "code_length")
+        else:
+            width = [2.0*(pf.domain_right_edge[2] + pf.surface_height),
+                     2.0*(pf.domain_right_edge[2] + pf.surface_height)]
+            center[2] = 0.0
     else:
         raise NotImplementedError
     bounds = (center[x_dict[axis]]-width[0] / 2,
@@ -1304,8 +1314,6 @@
         self.set_axes_unit(axes_unit)
 
     def _recreate_frb(self):
-        if self._frb is not None:
-            raise NotImplementedError
         super(OffAxisProjectionPlot, self)._recreate_frb()
 
 _metadata_template = """

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -718,7 +718,6 @@
                 label.set_fontproperties(fp)
                 if self._font_color is not None:
                     label.set_color(self._font_color)
-
         self._plot_valid = True
 
     def save(self, name=None, mpl_kwargs=None):

diff -r a6bd3cac6e9f65e75444cbbfca039b515180d72f -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -620,7 +620,7 @@
         --------
 
         >>> tf = ColorTransferFunction( (-10.0, -5.0) )
-        >>> tf.sample_colormap(-7.0, 0.01, 'algae')
+        >>> tf.sample_colormap(-7.0, 0.01, colormap='algae')
         """
         if col_bounds is None:
             rel = (v - self.x_bounds[0])/(self.x_bounds[1] - self.x_bounds[0])


https://bitbucket.org/yt_analysis/yt/commits/e7d3db2db14d/
Changeset:   e7d3db2db14d
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-17 04:33:09
Summary:     Megaparsec, not milliparsec.
Affected #:  1 file

diff -r e99f2ded848e5dd7dbc0be0a9e5229e42f206131 -r e7d3db2db14d92291ec2afaf76874210bf2a1911 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -263,7 +263,7 @@
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30
         good_u = None
-        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
+        for unit in ['Mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
             vv = v * self.length_unit.in_units(unit)
             if vv < max_nu and vv > 1.0:
                 good_u = unit


https://bitbucket.org/yt_analysis/yt/commits/c0111a9ba0e0/
Changeset:   c0111a9ba0e0
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 04:15:03
Summary:     Make the coordinate input more flexible.
Affected #:  1 file

diff -r e7d3db2db14d92291ec2afaf76874210bf2a1911 -r c0111a9ba0e0b5bfd05233d1e224b8e37dc02bbf yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -15,6 +15,8 @@
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
 from yt.data_objects.construction_data_containers import YTCoveringGridBase
 from yt.frontends.fits.data_structures import ap
+from yt.units.yt_array import YTQuantity
+
 pyfits = ap.pyfits
 pywcs = ap.pywcs
 
@@ -286,12 +288,18 @@
         The axis of the slice. One of "x","y","z", or 0,1,2.
     fields : string or list of strings
         The fields to slice
-    coord : float
-        The coordinate in pixel units (code length) of the slice along *axis*.
+    coord : float, tuple, or YTQuantity
+        The coordinate of the slice along *axis*. Can be a (value,
+        unit) tuple, a YTQuantity, or a float. If a float, it will be
+        interpreted as in units of code_length.
     """
     def __init__(self, ds, axis, fields, coord, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis)
+        if isinstance(coord, tuple):
+            coord = ds.quan(coord[0], coord[1]).in_units("code_length").value
+        elif isinstance(coord, YTQuantity):
+            coord = coord.in_units("code_length").value
         slc = ds.slice(axis, coord, **kwargs)
         w, frb = construct_image(slc)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)


https://bitbucket.org/yt_analysis/yt/commits/e3961c36042b/
Changeset:   e3961c36042b
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 04:21:44
Summary:     A notebook doc for FITSImageBuffer and its subclasses
Affected #:  3 files

diff -r c0111a9ba0e0b5bfd05233d1e224b8e37dc02bbf -r e3961c36042b028a64deecbf0952908e54ee8a86 doc/source/visualizing/FITSImageBuffer.ipynb
--- /dev/null
+++ b/doc/source/visualizing/FITSImageBuffer.ipynb
@@ -0,0 +1,213 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:407a58e66a9cebe0495eb01f918d4f929609608d9d32642fe9c7a18abf19fd7f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageBuffer` class, which has subclasses `FITSSlice` and `FITSProjection` to write slices and projections directly to FITS. We'll test this out on an Athena dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import yt\n",
+      "from yt.utilities.fits_image import FITSImageBuffer, FITSSlice, FITSProjection"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
+      "                                                               \"time_unit\":(1.0,\"Myr\")})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset. `prj_fits` is a full-fledged FITS file in memory, specifically an [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) object. This means that we can use all of the methods inherited from `HDUList`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`info` shows us the contents of the virtual FITS file. We can also look at the header for the `\"temperature\"` image, like so:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. The projection can be written to disk using the `writeto` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"sloshing.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+      "slc2.set_log(\"temperature\", True)\n",
+      "slc2.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageBuffer` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc3 = ds.slice(0, 0.0)\n",
+      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
+      "fib = FITSImageBuffer(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, a 3D FITS cube can be created from a covering grid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
+      "fib = FITSImageBuffer(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r c0111a9ba0e0b5bfd05233d1e224b8e37dc02bbf -r e3961c36042b028a64deecbf0952908e54ee8a86 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -14,3 +14,4 @@
    mapserver
    streamlines
    colormaps/index
+   writing_fits_images

diff -r c0111a9ba0e0b5bfd05233d1e224b8e37dc02bbf -r e3961c36042b028a64deecbf0952908e54ee8a86 doc/source/visualizing/writing_fits_images.rst
--- /dev/null
+++ b/doc/source/visualizing/writing_fits_images.rst
@@ -0,0 +1,6 @@
+.. _writing_fits_images:
+
+Writing FITS Images
+==========================
+
+.. notebook:: FITSImageBuffer.ipynb
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/3e163655332a/
Changeset:   3e163655332a
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 14:42:30
Summary:     Merge
Affected #:  1 file

diff -r e3961c36042b028a64deecbf0952908e54ee8a86 -r 3e163655332a14669a1f1c00193c1627c46e06a9 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
    :maxdepth: 1
+
    halo_catalogs
    halo_finding
    halo_mass_function


https://bitbucket.org/yt_analysis/yt/commits/3604cfde8862/
Changeset:   3604cfde8862
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 15:43:38
Summary:     Allow for different nan masks for different fields.
Affected #:  2 files

diff -r 3e163655332a14669a1f1c00193c1627c46e06a9 -r 3604cfde886268f612ca4a24baa7279fed1c6085 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -268,10 +268,10 @@
                  slave_files = [],
                  nprocs = None,
                  storage_filename = None,
-                 mask_nans = False,
-                 folded_axis=None,
-                 folded_width=None,
-                 line_database=None,
+                 nan_mask = None,
+                 folded_axis = None,
+                 folded_width = None,
+                 line_database = None,
                  suppress_astropy_warnings = True):
         self.folded_axis = folded_axis
         self.folded_width = folded_width
@@ -285,7 +285,10 @@
         self.filenames = [filename] + slave_files
         self.num_files = len(self.filenames)
         self.fluid_types += ("fits",)
-        self.mask_nans = mask_nans
+        if nan_mask is None:
+            self.nan_mask = {}
+        elif isinstance(nan_mask, float):
+            self.nan_mask = {"all":nan_mask}
         self.nprocs = nprocs
         self._handle = ap.pyfits.open(self.filenames[0],
                                       memmap=True,

diff -r 3e163655332a14669a1f1c00193c1627c46e06a9 -r 3604cfde886268f612ca4a24baa7279fed1c6085 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -83,7 +83,10 @@
                         data = ds.data[idx,start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
                     else:
                         data = ds.data[start[2]:end[2],start[1]:end[1],start[0]:end[0]].transpose()
-                    if self.pf.mask_nans: data[np.isnan(data)] = 0.0
+                    if fname in self.pf.nan_mask:
+                        data[np.isnan(data)] = self.pf.nan_mask[fname]
+                    elif "all" in self.pf.nan_mask:
+                        data[np.isnan(data)] = self.pf.nan_mask["all"]
                     data = bzero + bscale*data
                     ind += g.select(selector, data.astype("float64"), rv[field], ind)
         return rv


https://bitbucket.org/yt_analysis/yt/commits/83a5858e91dc/
Changeset:   83a5858e91dc
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 16:24:07
Summary:     First stab at getting X-ray event data to work with the FITS frontend
Affected #:  1 file

diff -r 3604cfde886268f612ca4a24baa7279fed1c6085 -r 83a5858e91dc89a2fc500f735e9341043b562d5d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -302,13 +302,46 @@
                                                        do_not_scale_image_data=True,
                                                        ignore_blank=True))
 
-        self.first_image = 0 # Assumed for now
+        if self._handle[1].name == "EVENTS":
+            self.first_image = 1
+        else:
+            self.first_image = 0
         self.primary_header = self._handle[self.first_image].header
-        self.wcs = ap.pywcs.WCS(header=self.primary_header)
-        self.axis_names = {}
-        self.naxis = self.primary_header["naxis"]
-        for i, ax in enumerate("xyz"[:self.naxis]):
-            self.axis_names[self.primary_header["ctype%d" % (i+1)]] = ax
+        if self.first_image == 0:
+            self.wcs = ap.pywcs.WCS(header=self.primary_header)
+            self.axis_names = {}
+            self.naxis = self.primary_header["naxis"]
+            for i, ax in enumerate("xyz"[:self.naxis]):
+                self.axis_names[self.primary_header["ctype%d" % (i+1)]] = ax
+            self.dims = [self.primary_header["naxis%d" % (i+1)]
+                         for i in xrange(self.naxis)]
+
+        else:
+            self.naxis = 2
+            self.wcs = ap.pywcs.WCS(naxis=2)
+            self.axis_names = {}
+            events_info = {}
+            for k,v in self.primary_header.items():
+                if v in ["X","Y"]:
+                    num = k.strip("TTYPE")
+                    events_info[v] = (self.primary_header["TLMIN"+num],
+                                      self.primary_header["TLMAX"+num],
+                                      self.primary_header["TCTYP"+num],
+                                      self.primary_header["TCRVL"+num],
+                                      self.primary_header["TCDLT"+num],
+                                      self.primary_header["TCRPX"+num])
+                elif v == "ENERGY":
+                    num = k.strip("TTYPE")
+                    events_info[v] = self.primary_header["TUNIT"+num]
+            for ax in ["x","y"]:
+                self.axis_names[events_info[ax.upper][2]] = ax
+            self.wcs.wcs.cdelt = [events_info["x"][4],events_info["y"][4]]
+            self.wcs.wcs.crpix = [events_info["x"][5],events_info["y"][5]]
+            self.wcs.wcs.ctype = [events_info["x"][2],events_info["y"][2]]
+            self.wcs.wcs.cunit = ["deg","deg"]
+            self.wcs.wcs.crval = [events_info["x"][3],events_info["y"][3]]
+            self.dims = [events_info["x"][1]-events_info["x"][0],
+                         events_info["y"][1]-events_info["y"][0]]
         self.refine_by = 2
 
         Dataset.__init__(self, filename, dataset_type)
@@ -350,8 +383,7 @@
         # we take the 4th axis and assume it consists of different fields.
         if self.dimensionality == 4: self.dimensionality = 3
 
-        dims = [self.primary_header["naxis%d" % (i+1)] for i in xrange(self.naxis)]
-        self.domain_dimensions = np.array(dims)[:self.dimensionality]
+        self.domain_dimensions = np.array(self.dims)[:self.dimensionality]
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])
@@ -391,8 +423,9 @@
         # Check to see if this data is in (RA,Dec,?) format
         self.xyv_data = False
         x = np.zeros((self.dimensionality), dtype="bool")
-        for ap in axes_prefixes:
-            x += np_char.startswith(self.axis_names.keys()[:self.dimensionality], ap)
+        for apx in axes_prefixes:
+            x += np_char.startswith(self.axis_names.keys()[:self
+                                    .dimensionality], apx)
         if x.sum() == self.dimensionality: self._setup_xyv()
 
     def _setup_xyv(self):
@@ -453,6 +486,8 @@
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
                 fileh = ap.pyfits.open(args[0])
             valid = fileh[0].header["naxis"] >= 2
+            if len(fileh) > 1 and fileh[1].name == "EVENTS":
+                valid = fileh[1].header["naxis"] >= 2
             fileh.close()
             return valid
         except:


https://bitbucket.org/yt_analysis/yt/commits/0aa8680ae63f/
Changeset:   0aa8680ae63f
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 16:26:22
Summary:     Getting width units right in off-axis SZ projections
Affected #:  1 file

diff -r 3604cfde886268f612ca4a24baa7279fed1c6085 -r 0aa8680ae63f09dd05b5e973a061ba22d7046815 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,10 +19,8 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.fields.local_fields import add_field, derived_field
-from yt.data_objects.image_array import ImageArray
+from yt.units.yt_array import YTQuantity
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.utilities.definitions import inv_axis_names
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -197,7 +195,9 @@
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
         if iterable(width):
-            w = width[0]/self.pf.units[width[1]]
+            w = self.pf.quan(width[0], width[1]).in_units("code_length").value
+        elif isinstance(width, YTQuantity):
+            w = width.in_units("code_length").value
         else:
             w = width
         if center == "c":


https://bitbucket.org/yt_analysis/yt/commits/aa8d05986e49/
Changeset:   aa8d05986e49
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 22:19:41
Summary:     This is ugly, but I don't know of a better way to make sure this field gets recreated with the new parameters.
Affected #:  2 files

diff -r 0aa8680ae63f09dd05b5e973a061ba22d7046815 -r aa8d05986e49688eeb7d0adda2cd34952672ea21 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -147,7 +147,6 @@
         beta_par = generate_beta_par(L)
         self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
         proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
-        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -172,6 +171,8 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
+        self.pf.field_info.pop(("gas","beta_par"))
+
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
 
@@ -240,6 +241,8 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
+        self.pf.field_info.pop(("gas","beta_par"))
+
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
         # Bad hack, but we get NaNs if we don't do something like this

diff -r 0aa8680ae63f09dd05b5e973a061ba22d7046815 -r aa8d05986e49688eeb7d0adda2cd34952672ea21 yt/analysis_modules/xyv_cube/xyv_cube.py
--- a/yt/analysis_modules/xyv_cube/xyv_cube.py
+++ b/yt/analysis_modules/xyv_cube/xyv_cube.py
@@ -89,25 +89,26 @@
             vmin, vmax = dd.quantities.extrema("velocity_magnitude")
             self.v_bnd = -vmax, vmax
         else:
-            self.v_bnd = (ds.arr(velocity_bounds[0], velocity_bounds[2]),
-                     ds.arr(velocity_bounds[1], velocity_bounds[2]))
+            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
+                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
 
         vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
 
+        _vlos = create_vlos(orient.unit_vectors[2])
+        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+
         self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
             v1 = vbins[i]
             v2 = vbins[i+1]
             _intensity = create_intensity(v1, v2, field)
-            _vlos = create_vlos(orient.unit_vectors[2])
             ds.field_info.add_field(("gas","intensity"),
                                     function=_intensity, units=self.field_units)
-            ds.field_info.add_field(("gas","v_los"),
-                                    function=_vlos, units="cm/s")
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
             self.data[:,:,i] = prj[:,:]
+            ds.field_info.pop(("gas","intensity"))
             pbar.update(i)
 
         pbar.finish()


https://bitbucket.org/yt_analysis/yt/commits/90a3ee9b9ee0/
Changeset:   90a3ee9b9ee0
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 22:55:24
Summary:     This name is more in line with standard terminology
Affected #:  12 files

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -115,5 +115,5 @@
      PhotonModel, \
      ThermalPhotonModel
 
-from .xyv_cube.api import \
-    XYVCube
+from .ppv_cube.api import \
+    PPVCube

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/ppv_cube/api.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/api.py
@@ -0,0 +1,12 @@
+"""
+API for ppv_cube
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from ppv_cube import PPVCube

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/ppv_cube/setup.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ppv_cube', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/ppv_cube/xyv_cube.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/xyv_cube.py
@@ -0,0 +1,167 @@
+"""
+Generating PPV FITS cubes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.frontends.fits.data_structures import ap
+from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.funcs import get_pbar
+
+def create_intensity(vmin, vmax, ifield):
+    def _intensity(field, data):
+        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
+        f = np.zeros(data[ifield].shape)
+        f[idxs] = data[ifield][idxs]
+        return f
+    return _intensity
+
+def create_vlos(z_hat):
+    def _v_los(field, data):
+        vz = data["velocity_x"]*z_hat[0] + \
+             data["velocity_y"]*z_hat[1] + \
+             data["velocity_z"]*z_hat[2]
+        return -vz
+    return _v_los
+
+class PPVCube(object):
+    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
+                 dims=(100,100,100), velocity_bounds=None):
+        r""" Initialize a PPVCube object.
+
+        Parameters
+        ----------
+        ds : dataset
+            The dataset.
+        normal : array_like
+            The normal vector along with to make the projections.
+        field : string
+            The field to project.
+        width : float or tuple, optional
+            The width of the projection in length units. Specify a float
+            for code_length units or a tuple (value, units).
+        dims : tuple, optional
+            A 3-tuple of dimensions (nx,ny,nv) for the cube.
+        velocity_bounds : tuple, optional
+            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
+            integrate over. If None, the largest velocity of the
+            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
+
+        Examples
+        --------
+        >>> i = 60*np.pi/180.
+        >>> L = [0.0,np.sin(i),np.cos(i)]
+        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
+        ...                velocity_bounds=(-5.,4.,"km/s"))
+        """
+        self.ds = ds
+        self.field = field
+        self.width = width
+
+        self.nx = dims[0]
+        self.ny = dims[1]
+        self.nv = dims[2]
+
+        normal = np.array(normal)
+        normal /= np.sqrt(np.dot(normal, normal))
+        vecs = np.identity(3)
+        t = np.cross(normal, vecs).sum(axis=1)
+        ax = t.argmax()
+        north = np.cross(normal, vecs[ax,:]).ravel()
+        orient = Orientation(normal, north_vector=north)
+
+        dd = ds.all_data()
+
+        fd = dd._determine_fields(field)[0]
+
+        self.field_units = ds._get_field_info(fd).units
+
+        if velocity_bounds is None:
+            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
+            self.v_bnd = -vmax, vmax
+        else:
+            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
+                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+
+        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+
+        _vlos = create_vlos(orient.unit_vectors[2])
+        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+
+        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
+        pbar = get_pbar("Generating cube.", self.nv)
+        for i in xrange(self.nv):
+            v1 = vbins[i]
+            v2 = vbins[i+1]
+            _intensity = create_intensity(v1, v2, field)
+            ds.field_info.add_field(("gas","intensity"),
+                                    function=_intensity, units=self.field_units)
+            prj = off_axis_projection(ds, ds.domain_center, normal, width,
+                                      (self.nx, self.ny), "intensity")
+            self.data[:,:,i] = prj[:,:]
+            ds.field_info.pop(("gas","intensity"))
+            pbar.update(i)
+
+        pbar.finish()
+
+    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
+                   velocity_unit="m/s", sky_center=(30.,45.)):
+        r""" Write the PPVCube to a FITS file.
+
+        Parameters
+        ----------
+        filename : string
+            The name of the file to write.
+        clobber : boolean
+            Whether or not to clobber an existing file with the same name.
+        length_unit : tuple, optional
+            The length that corresponds to the width of the projection in
+            (value, unit) form. Accepts a length unit or 'deg'.
+        velocity_unit : string, optional
+            The units for the velocity axis.
+        sky_center : tuple, optional
+            The (RA, Dec) coordinate in degrees of the central pixel if
+            *length_unit* is 'deg'.
+
+        Examples
+        --------
+        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
+        ...                 velocity_unit="km/s")
+        """
+        if length_unit[1] == "deg":
+            center = sky_center
+            types = ["RA---SIN","DEC--SIN"]
+        else:
+            center = [0.0,0.0]
+            types = ["LINEAR","LINEAR"]
+
+        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
+
+        dx = length_unit[0]/self.nx
+        dy = length_unit[0]/self.ny
+        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
+
+        if length_unit[1] == "deg":
+            dx *= -1.
+
+        w = ap.pywcs.WCS(naxis=3)
+        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
+        w.wcs.cdelt = [dx,dy,dv]
+        w.wcs.crval = [center[0], center[1], v_center]
+        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
+        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
+
+        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib[0].header["bunit"] = self.field_units
+        fib[0].header["btype"] = self.field
+
+        fib.writeto(filename, clobber=clobber)

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -23,5 +23,5 @@
     config.add_subpackage("sunyaev_zeldovich")
     config.add_subpackage("particle_trajectories")
     config.add_subpackage("photon_simulator")
-    config.add_subpackage("xyv_cube")
+    config.add_subpackage("ppv_cube")
     return config

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/xyv_cube/api.py
--- a/yt/analysis_modules/xyv_cube/api.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-API for xyv_cube
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from xyv_cube import XYVCube

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/xyv_cube/setup.py
--- a/yt/analysis_modules/xyv_cube/setup.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('xyv_cube', parent_package, top_path)
-    #config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/analysis_modules/xyv_cube/xyv_cube.py
--- a/yt/analysis_modules/xyv_cube/xyv_cube.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Generating XYV FITS cubes
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.frontends.fits.data_structures import ap
-from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.visualization.volume_rendering.camera import off_axis_projection
-from yt.funcs import get_pbar
-
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
-def create_vlos(z_hat):
-    def _v_los(field, data):
-        vz = data["velocity_x"]*z_hat[0] + \
-             data["velocity_y"]*z_hat[1] + \
-             data["velocity_z"]*z_hat[2]
-        return -vz
-    return _v_los
-
-class XYVCube(object):
-    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
-                 dims=(100,100,100), velocity_bounds=None):
-        r""" Initialize a XYVCube object.
-
-        Parameters
-        ----------
-        ds : dataset
-            The dataset.
-        normal : array_like
-            The normal vector along with to make the projections.
-        field : string
-            The field to project.
-        width : float or tuple, optional
-            The width of the projection in length units. Specify a float
-            for code_length units or a tuple (value, units).
-        dims : tuple, optional
-            A 3-tuple of dimensions (nx,ny,nv) for the cube.
-        velocity_bounds : tuple, optional
-            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
-            integrate over. If None, the largest velocity of the
-            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
-
-        Examples
-        --------
-        >>> i = 60*np.pi/180.
-        >>> L = [0.0,np.sin(i),np.cos(i)]
-        >>> cube = XYVCube(ds, L, "density", width=(10.,"kpc"),
-        ...                velocity_bounds=(-5.,4.,"km/s"))
-        """
-        self.ds = ds
-        self.field = field
-        self.width = width
-
-        self.nx = dims[0]
-        self.ny = dims[1]
-        self.nv = dims[2]
-
-        normal = np.array(normal)
-        normal /= np.sqrt(np.dot(normal, normal))
-        vecs = np.identity(3)
-        t = np.cross(normal, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(normal, vecs[ax,:]).ravel()
-        orient = Orientation(normal, north_vector=north)
-
-        dd = ds.all_data()
-
-        fd = dd._determine_fields(field)[0]
-
-        self.field_units = ds._get_field_info(fd).units
-
-        if velocity_bounds is None:
-            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
-            self.v_bnd = -vmax, vmax
-        else:
-            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
-
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
-
-        _vlos = create_vlos(orient.unit_vectors[2])
-        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
-
-        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
-        pbar = get_pbar("Generating cube.", self.nv)
-        for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
-            prj = off_axis_projection(ds, ds.domain_center, normal, width,
-                                      (self.nx, self.ny), "intensity")
-            self.data[:,:,i] = prj[:,:]
-            ds.field_info.pop(("gas","intensity"))
-            pbar.update(i)
-
-        pbar.finish()
-
-    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
-                   velocity_unit="m/s", sky_center=(30.,45.)):
-        r""" Write the XYVCube to a FITS file.
-
-        Parameters
-        ----------
-        filename : string
-            The name of the file to write.
-        clobber : boolean
-            Whether or not to clobber an existing file with the same name.
-        length_unit : tuple, optional
-            The length that corresponds to the width of the projection in
-            (value, unit) form. Accepts a length unit or 'deg'.
-        velocity_unit : string, optional
-            The units for the velocity axis.
-        sky_center : tuple, optional
-            The (RA, Dec) coordinate in degrees of the central pixel if
-            *length_unit* is 'deg'.
-
-        Examples
-        --------
-        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
-        ...                 velocity_unit="km/s")
-        """
-        if length_unit[1] == "deg":
-            center = sky_center
-            types = ["RA---SIN","DEC--SIN"]
-        else:
-            center = [0.0,0.0]
-            types = ["LINEAR","LINEAR"]
-
-        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
-
-        dx = length_unit[0]/self.nx
-        dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
-
-        if length_unit[1] == "deg":
-            dx *= -1.
-
-        w = ap.pywcs.WCS(naxis=3)
-        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
-        w.wcs.cdelt = [dx,dy,dv]
-        w.wcs.crval = [center[0], center[1], v_center]
-        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
-        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
-
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
-        fib[0].header["bunit"] = self.field_units
-        fib[0].header["btype"] = self.field
-
-        fib.writeto(filename, clobber=clobber)

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -389,15 +389,15 @@
             self.nprocs = min(self.nprocs, 512)
 
         # Check to see if this data is in (RA,Dec,?) format
-        self.xyv_data = False
+        self.ppv_data = False
         x = np.zeros((self.dimensionality), dtype="bool")
         for ap in axes_prefixes:
             x += np_char.startswith(self.axis_names.keys()[:self.dimensionality], ap)
-        if x.sum() == self.dimensionality: self._setup_xyv()
+        if x.sum() == self.dimensionality: self._setup_ppv()
 
-    def _setup_xyv(self):
+    def _setup_ppv(self):
 
-        self.xyv_data = True
+        self.ppv_data = True
 
         end = min(self.dimensionality+1,4)
         ctypes = np.array([self.primary_header["CTYPE%d" % (i)] for i in xrange(1,end)])

diff -r aa8d05986e49688eeb7d0adda2cd34952672ea21 -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -36,7 +36,7 @@
         return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
                            str(data.pf.wcs_1d.wcs.cunit[0]))
 
-    def _setup_xyv_fields(self):
+    def _setup_ppv_fields(self):
         def world_f(axis, unit):
             def _world_f(field, data):
                 return data.pf.arr(self._get_2d_wcs(data, axis), unit)
@@ -57,8 +57,8 @@
 
     def setup_fluid_fields(self):
 
-        if self.pf.xyv_data:
-            self._setup_xyv_fields()
+        if self.pf.ppv_data:
+            self._setup_ppv_fields()
             return
 
         def world_f(axis, unit):


https://bitbucket.org/yt_analysis/yt/commits/fb2c43b53447/
Changeset:   fb2c43b53447
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 23:38:31
Summary:     Missed this
Affected #:  5 files

diff -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c -r fb2c43b534470f51deabea9997b864640f8e88c5 yt/analysis_modules/ppv_cube/ppv_cube.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -0,0 +1,167 @@
+"""
+Generating PPV FITS cubes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.frontends.fits.data_structures import ap
+from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.funcs import get_pbar
+
+def create_intensity(vmin, vmax, ifield):
+    def _intensity(field, data):
+        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
+        f = np.zeros(data[ifield].shape)
+        f[idxs] = data[ifield][idxs]
+        return f
+    return _intensity
+
+def create_vlos(z_hat):
+    def _v_los(field, data):
+        vz = data["velocity_x"]*z_hat[0] + \
+             data["velocity_y"]*z_hat[1] + \
+             data["velocity_z"]*z_hat[2]
+        return -vz
+    return _v_los
+
+class PPVCube(object):
+    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
+                 dims=(100,100,100), velocity_bounds=None):
+        r""" Initialize a PPVCube object.
+
+        Parameters
+        ----------
+        ds : dataset
+            The dataset.
+        normal : array_like
+            The normal vector along with to make the projections.
+        field : string
+            The field to project.
+        width : float or tuple, optional
+            The width of the projection in length units. Specify a float
+            for code_length units or a tuple (value, units).
+        dims : tuple, optional
+            A 3-tuple of dimensions (nx,ny,nv) for the cube.
+        velocity_bounds : tuple, optional
+            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
+            integrate over. If None, the largest velocity of the
+            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
+
+        Examples
+        --------
+        >>> i = 60*np.pi/180.
+        >>> L = [0.0,np.sin(i),np.cos(i)]
+        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
+        ...                velocity_bounds=(-5.,4.,"km/s"))
+        """
+        self.ds = ds
+        self.field = field
+        self.width = width
+
+        self.nx = dims[0]
+        self.ny = dims[1]
+        self.nv = dims[2]
+
+        normal = np.array(normal)
+        normal /= np.sqrt(np.dot(normal, normal))
+        vecs = np.identity(3)
+        t = np.cross(normal, vecs).sum(axis=1)
+        ax = t.argmax()
+        north = np.cross(normal, vecs[ax,:]).ravel()
+        orient = Orientation(normal, north_vector=north)
+
+        dd = ds.all_data()
+
+        fd = dd._determine_fields(field)[0]
+
+        self.field_units = ds._get_field_info(fd).units
+
+        if velocity_bounds is None:
+            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
+            self.v_bnd = -vmax, vmax
+        else:
+            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
+                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+
+        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+
+        _vlos = create_vlos(orient.unit_vectors[2])
+        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+
+        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
+        pbar = get_pbar("Generating cube.", self.nv)
+        for i in xrange(self.nv):
+            v1 = vbins[i]
+            v2 = vbins[i+1]
+            _intensity = create_intensity(v1, v2, field)
+            ds.field_info.add_field(("gas","intensity"),
+                                    function=_intensity, units=self.field_units)
+            prj = off_axis_projection(ds, ds.domain_center, normal, width,
+                                      (self.nx, self.ny), "intensity")
+            self.data[:,:,i] = prj[:,:]
+            ds.field_info.pop(("gas","intensity"))
+            pbar.update(i)
+
+        pbar.finish()
+
+    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
+                   velocity_unit="m/s", sky_center=(30.,45.)):
+        r""" Write the PPVCube to a FITS file.
+
+        Parameters
+        ----------
+        filename : string
+            The name of the file to write.
+        clobber : boolean
+            Whether or not to clobber an existing file with the same name.
+        length_unit : tuple, optional
+            The length that corresponds to the width of the projection in
+            (value, unit) form. Accepts a length unit or 'deg'.
+        velocity_unit : string, optional
+            The units for the velocity axis.
+        sky_center : tuple, optional
+            The (RA, Dec) coordinate in degrees of the central pixel if
+            *length_unit* is 'deg'.
+
+        Examples
+        --------
+        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
+        ...                 velocity_unit="km/s")
+        """
+        if length_unit[1] == "deg":
+            center = sky_center
+            types = ["RA---SIN","DEC--SIN"]
+        else:
+            center = [0.0,0.0]
+            types = ["LINEAR","LINEAR"]
+
+        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
+
+        dx = length_unit[0]/self.nx
+        dy = length_unit[0]/self.ny
+        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
+
+        if length_unit[1] == "deg":
+            dx *= -1.
+
+        w = ap.pywcs.WCS(naxis=3)
+        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
+        w.wcs.cdelt = [dx,dy,dv]
+        w.wcs.crval = [center[0], center[1], v_center]
+        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
+        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
+
+        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib[0].header["bunit"] = self.field_units
+        fib[0].header["btype"] = self.field
+
+        fib.writeto(filename, clobber=clobber)

diff -r 90a3ee9b9ee0b9c7de5b91082602fbfae1f09f7c -r fb2c43b534470f51deabea9997b864640f8e88c5 yt/analysis_modules/ppv_cube/xyv_cube.py
--- a/yt/analysis_modules/ppv_cube/xyv_cube.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Generating PPV FITS cubes
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.frontends.fits.data_structures import ap
-from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.visualization.volume_rendering.camera import off_axis_projection
-from yt.funcs import get_pbar
-
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
-def create_vlos(z_hat):
-    def _v_los(field, data):
-        vz = data["velocity_x"]*z_hat[0] + \
-             data["velocity_y"]*z_hat[1] + \
-             data["velocity_z"]*z_hat[2]
-        return -vz
-    return _v_los
-
-class PPVCube(object):
-    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
-                 dims=(100,100,100), velocity_bounds=None):
-        r""" Initialize a PPVCube object.
-
-        Parameters
-        ----------
-        ds : dataset
-            The dataset.
-        normal : array_like
-            The normal vector along with to make the projections.
-        field : string
-            The field to project.
-        width : float or tuple, optional
-            The width of the projection in length units. Specify a float
-            for code_length units or a tuple (value, units).
-        dims : tuple, optional
-            A 3-tuple of dimensions (nx,ny,nv) for the cube.
-        velocity_bounds : tuple, optional
-            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
-            integrate over. If None, the largest velocity of the
-            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
-
-        Examples
-        --------
-        >>> i = 60*np.pi/180.
-        >>> L = [0.0,np.sin(i),np.cos(i)]
-        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
-        ...                velocity_bounds=(-5.,4.,"km/s"))
-        """
-        self.ds = ds
-        self.field = field
-        self.width = width
-
-        self.nx = dims[0]
-        self.ny = dims[1]
-        self.nv = dims[2]
-
-        normal = np.array(normal)
-        normal /= np.sqrt(np.dot(normal, normal))
-        vecs = np.identity(3)
-        t = np.cross(normal, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(normal, vecs[ax,:]).ravel()
-        orient = Orientation(normal, north_vector=north)
-
-        dd = ds.all_data()
-
-        fd = dd._determine_fields(field)[0]
-
-        self.field_units = ds._get_field_info(fd).units
-
-        if velocity_bounds is None:
-            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
-            self.v_bnd = -vmax, vmax
-        else:
-            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
-
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
-
-        _vlos = create_vlos(orient.unit_vectors[2])
-        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
-
-        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
-        pbar = get_pbar("Generating cube.", self.nv)
-        for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
-            prj = off_axis_projection(ds, ds.domain_center, normal, width,
-                                      (self.nx, self.ny), "intensity")
-            self.data[:,:,i] = prj[:,:]
-            ds.field_info.pop(("gas","intensity"))
-            pbar.update(i)
-
-        pbar.finish()
-
-    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
-                   velocity_unit="m/s", sky_center=(30.,45.)):
-        r""" Write the PPVCube to a FITS file.
-
-        Parameters
-        ----------
-        filename : string
-            The name of the file to write.
-        clobber : boolean
-            Whether or not to clobber an existing file with the same name.
-        length_unit : tuple, optional
-            The length that corresponds to the width of the projection in
-            (value, unit) form. Accepts a length unit or 'deg'.
-        velocity_unit : string, optional
-            The units for the velocity axis.
-        sky_center : tuple, optional
-            The (RA, Dec) coordinate in degrees of the central pixel if
-            *length_unit* is 'deg'.
-
-        Examples
-        --------
-        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
-        ...                 velocity_unit="km/s")
-        """
-        if length_unit[1] == "deg":
-            center = sky_center
-            types = ["RA---SIN","DEC--SIN"]
-        else:
-            center = [0.0,0.0]
-            types = ["LINEAR","LINEAR"]
-
-        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
-
-        dx = length_unit[0]/self.nx
-        dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
-
-        if length_unit[1] == "deg":
-            dx *= -1.
-
-        w = ap.pywcs.WCS(naxis=3)
-        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
-        w.wcs.cdelt = [dx,dy,dv]
-        w.wcs.crval = [center[0], center[1], v_center]
-        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
-        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
-
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
-        fib[0].header["bunit"] = self.field_units
-        fib[0].header["btype"] = self.field
-
-        fib.writeto(filename, clobber=clobber)


https://bitbucket.org/yt_analysis/yt/commits/61793d6c5e9b/
Changeset:   61793d6c5e9b
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 23:38:52
Summary:     An extra cell we didn't need
Affected #:  5 files

diff -r fb2c43b534470f51deabea9997b864640f8e88c5 -r 61793d6c5e9bb8648c1857860f660a1cba966a38 doc/source/visualizing/FITSImageBuffer.ipynb
--- a/doc/source/visualizing/FITSImageBuffer.ipynb
+++ b/doc/source/visualizing/FITSImageBuffer.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:407a58e66a9cebe0495eb01f918d4f929609608d9d32642fe9c7a18abf19fd7f"
+  "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -197,14 +197,6 @@
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
     }
    ],
    "metadata": {}


https://bitbucket.org/yt_analysis/yt/commits/5af3d60c3dfb/
Changeset:   5af3d60c3dfb
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-18 23:45:34
Summary:     PPVCube notebook doc.
Affected #:  7 files

diff -r 61793d6c5e9bb8648c1857860f660a1cba966a38 -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -0,0 +1,229 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:20464f4ff5125b848cb3934f78e207b648090473be98ab1315da63d3d290a760"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk galaxy. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Density: $\\rho(r) \\propto r^{\\alpha}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where for simplicity we won't worry about the normalizations of these profiles. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
+      "nx,ny,nz = (256,256,256)\n",
+      "R = 10. # kpc\n",
+      "r_0 = 3. # kpc\n",
+      "beta = 1.4\n",
+      "alpha = -1.\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates\n",
+      "dens = np.zeros((nx,ny,nz))\n",
+      "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
+      "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
+      "velx = np.zeros((nx,ny,nz))\n",
+      "vely = np.zeros((nx,ny,nz))\n",
+      "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "data[\"density\"] = (dens,\"g/cm**3\")\n",
+      "data[\"velocity_x\"] = (velx, \"km/s\")\n",
+      "data[\"velocity_y\"] = (vely, \"km/s\")\n",
+      "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc.set_log(\"velocity_x\", False)\n",
+      "slc.set_log(\"velocity_y\", False)\n",
+      "slc.set_log(\"velocity_magnitude\", False)\n",
+      "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the y-axis. We'll create a normal vector:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "i = 60.*np.pi/180.\n",
+      "L = [0.0,np.sin(i),np.sin(i)]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to specify the dimensions of the data cube, and optionally we may choose the bounds in line-of-sight velocity that the data will be binned into. Otherwise, the bounds will simply be set to the negative and positive of the largest speed in the dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Following this, we can now write this cube to a FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube.write_fits(\"cube.fits\", clobber=True, length_unit=(5.0,\"deg\"), velocity_unit=\"km/s\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"cube.fits\")\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# To figure out what the domain center and width is in pixel (code length) units:\n",
+      "print pf.domain_center\n",
+      "print pf.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=[100.5,50.5,10.0]) # \"z\" slice is in pixel (code length) units\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=[100.5,50.5,40.0])\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 61793d6c5e9bb8648c1857860f660a1cba966a38 -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 doc/source/analyzing/analysis_modules/ppv_cubes.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/ppv_cubes.rst
@@ -0,0 +1,4 @@
+Creating Position-Position-Velocity FITS Cubes
+-------------------------------------------------
+
+.. notebook:: PPVCube.ipynb

diff -r 61793d6c5e9bb8648c1857860f660a1cba966a38 -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -18,3 +18,4 @@
    sunyaev_zeldovich
    radial_column_density
    photon_simulator
+   ppv_cubes


https://bitbucket.org/yt_analysis/yt/commits/d1046b287d10/
Changeset:   d1046b287d10
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-19 00:25:38
Summary:     Missed this
Affected #:  5 files

diff -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 yt/analysis_modules/ppv_cube/api.py
--- a/yt/analysis_modules/ppv_cube/api.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-API for ppv_cube
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from ppv_cube import PPVCube

diff -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Generating PPV FITS cubes
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.frontends.fits.data_structures import ap
-from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.visualization.volume_rendering.camera import off_axis_projection
-from yt.funcs import get_pbar
-
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
-def create_vlos(z_hat):
-    def _v_los(field, data):
-        vz = data["velocity_x"]*z_hat[0] + \
-             data["velocity_y"]*z_hat[1] + \
-             data["velocity_z"]*z_hat[2]
-        return -vz
-    return _v_los
-
-class PPVCube(object):
-    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
-                 dims=(100,100,100), velocity_bounds=None):
-        r""" Initialize a PPVCube object.
-
-        Parameters
-        ----------
-        ds : dataset
-            The dataset.
-        normal : array_like
-            The normal vector along with to make the projections.
-        field : string
-            The field to project.
-        width : float or tuple, optional
-            The width of the projection in length units. Specify a float
-            for code_length units or a tuple (value, units).
-        dims : tuple, optional
-            A 3-tuple of dimensions (nx,ny,nv) for the cube.
-        velocity_bounds : tuple, optional
-            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
-            integrate over. If None, the largest velocity of the
-            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
-
-        Examples
-        --------
-        >>> i = 60*np.pi/180.
-        >>> L = [0.0,np.sin(i),np.cos(i)]
-        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
-        ...                velocity_bounds=(-5.,4.,"km/s"))
-        """
-        self.ds = ds
-        self.field = field
-        self.width = width
-
-        self.nx = dims[0]
-        self.ny = dims[1]
-        self.nv = dims[2]
-
-        normal = np.array(normal)
-        normal /= np.sqrt(np.dot(normal, normal))
-        vecs = np.identity(3)
-        t = np.cross(normal, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(normal, vecs[ax,:]).ravel()
-        orient = Orientation(normal, north_vector=north)
-
-        dd = ds.all_data()
-
-        fd = dd._determine_fields(field)[0]
-
-        self.field_units = ds._get_field_info(fd).units
-
-        if velocity_bounds is None:
-            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
-            self.v_bnd = -vmax, vmax
-        else:
-            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
-
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
-
-        _vlos = create_vlos(orient.unit_vectors[2])
-        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
-
-        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
-        pbar = get_pbar("Generating cube.", self.nv)
-        for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
-            prj = off_axis_projection(ds, ds.domain_center, normal, width,
-                                      (self.nx, self.ny), "intensity")
-            self.data[:,:,i] = prj[:,:]
-            ds.field_info.pop(("gas","intensity"))
-            pbar.update(i)
-
-        pbar.finish()
-
-    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
-                   velocity_unit="m/s", sky_center=(30.,45.)):
-        r""" Write the PPVCube to a FITS file.
-
-        Parameters
-        ----------
-        filename : string
-            The name of the file to write.
-        clobber : boolean
-            Whether or not to clobber an existing file with the same name.
-        length_unit : tuple, optional
-            The length that corresponds to the width of the projection in
-            (value, unit) form. Accepts a length unit or 'deg'.
-        velocity_unit : string, optional
-            The units for the velocity axis.
-        sky_center : tuple, optional
-            The (RA, Dec) coordinate in degrees of the central pixel if
-            *length_unit* is 'deg'.
-
-        Examples
-        --------
-        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
-        ...                 velocity_unit="km/s")
-        """
-        if length_unit[1] == "deg":
-            center = sky_center
-            types = ["RA---SIN","DEC--SIN"]
-        else:
-            center = [0.0,0.0]
-            types = ["LINEAR","LINEAR"]
-
-        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
-
-        dx = length_unit[0]/self.nx
-        dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
-
-        if length_unit[1] == "deg":
-            dx *= -1.
-
-        w = ap.pywcs.WCS(naxis=3)
-        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
-        w.wcs.cdelt = [dx,dy,dv]
-        w.wcs.crval = [center[0], center[1], v_center]
-        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
-        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
-
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
-        fib[0].header["bunit"] = self.field_units
-        fib[0].header["btype"] = self.field
-
-        fib.writeto(filename, clobber=clobber)

diff -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 yt/analysis_modules/ppv_cube/setup.py
--- a/yt/analysis_modules/ppv_cube/setup.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('ppv_cube', parent_package, top_path)
-    #config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 5af3d60c3dfb782f2da22b9b0d00453017f419b5 -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -289,6 +289,8 @@
             self.nan_mask = {}
         elif isinstance(nan_mask, float):
             self.nan_mask = {"all":nan_mask}
+        elif isinstance(nan_mask, dict):
+            self.nan_mask = nan_mask
         self.nprocs = nprocs
         self._handle = ap.pyfits.open(self.filenames[0],
                                       memmap=True,


https://bitbucket.org/yt_analysis/yt/commits/f4117d6bf558/
Changeset:   f4117d6bf558
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-20 07:14:02
Summary:     Merge
Affected #:  17 files

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -0,0 +1,229 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:20464f4ff5125b848cb3934f78e207b648090473be98ab1315da63d3d290a760"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk galaxy. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Density: $\\rho(r) \\propto r^{\\alpha}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where for simplicity we won't worry about the normalizations of these profiles. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
+      "nx,ny,nz = (256,256,256)\n",
+      "R = 10. # kpc\n",
+      "r_0 = 3. # kpc\n",
+      "beta = 1.4\n",
+      "alpha = -1.\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates\n",
+      "dens = np.zeros((nx,ny,nz))\n",
+      "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
+      "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
+      "velx = np.zeros((nx,ny,nz))\n",
+      "vely = np.zeros((nx,ny,nz))\n",
+      "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "data[\"density\"] = (dens,\"g/cm**3\")\n",
+      "data[\"velocity_x\"] = (velx, \"km/s\")\n",
+      "data[\"velocity_y\"] = (vely, \"km/s\")\n",
+      "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc.set_log(\"velocity_x\", False)\n",
+      "slc.set_log(\"velocity_y\", False)\n",
+      "slc.set_log(\"velocity_magnitude\", False)\n",
+      "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the y-axis. We'll create a normal vector:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "i = 60.*np.pi/180.\n",
+      "L = [0.0,np.sin(i),np.sin(i)]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to specify the dimensions of the data cube, and optionally we may choose the bounds in line-of-sight velocity that the data will be binned into. Otherwise, the bounds will simply be set to the negative and positive of the largest speed in the dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Following this, we can now write this cube to a FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube.write_fits(\"cube.fits\", clobber=True, length_unit=(5.0,\"deg\"), velocity_unit=\"km/s\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pf = load(\"cube.fits\")\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# To figure out what the domain center and width is in pixel (code length) units:\n",
+      "print pf.domain_center\n",
+      "print pf.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=[100.5,50.5,10.0]) # \"z\" slice is in pixel (code length) units\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=[100.5,50.5,40.0])\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 doc/source/analyzing/analysis_modules/ppv_cubes.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/ppv_cubes.rst
@@ -0,0 +1,4 @@
+Creating Position-Position-Velocity FITS Cubes
+-------------------------------------------------
+
+.. notebook:: PPVCube.ipynb

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -18,3 +18,4 @@
    sunyaev_zeldovich
    radial_column_density
    photon_simulator
+   ppv_cubes

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 doc/source/visualizing/FITSImageBuffer.ipynb
--- a/doc/source/visualizing/FITSImageBuffer.ipynb
+++ b/doc/source/visualizing/FITSImageBuffer.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:407a58e66a9cebe0495eb01f918d4f929609608d9d32642fe9c7a18abf19fd7f"
+  "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -197,14 +197,6 @@
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
     }
    ],
    "metadata": {}

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -115,5 +115,5 @@
      PhotonModel, \
      ThermalPhotonModel
 
-from .xyv_cube.api import \
-    XYVCube
+from .ppv_cube.api import \
+    PPVCube

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/ppv_cube/api.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/api.py
@@ -0,0 +1,12 @@
+"""
+API for ppv_cube
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from ppv_cube import PPVCube

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/ppv_cube/ppv_cube.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -0,0 +1,167 @@
+"""
+Generating PPV FITS cubes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.frontends.fits.data_structures import ap
+from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.funcs import get_pbar
+
+def create_intensity(vmin, vmax, ifield):
+    def _intensity(field, data):
+        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
+        f = np.zeros(data[ifield].shape)
+        f[idxs] = data[ifield][idxs]
+        return f
+    return _intensity
+
+def create_vlos(z_hat):
+    def _v_los(field, data):
+        vz = data["velocity_x"]*z_hat[0] + \
+             data["velocity_y"]*z_hat[1] + \
+             data["velocity_z"]*z_hat[2]
+        return -vz
+    return _v_los
+
+class PPVCube(object):
+    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
+                 dims=(100,100,100), velocity_bounds=None):
+        r""" Initialize a PPVCube object.
+
+        Parameters
+        ----------
+        ds : dataset
+            The dataset.
+        normal : array_like
+            The normal vector along with to make the projections.
+        field : string
+            The field to project.
+        width : float or tuple, optional
+            The width of the projection in length units. Specify a float
+            for code_length units or a tuple (value, units).
+        dims : tuple, optional
+            A 3-tuple of dimensions (nx,ny,nv) for the cube.
+        velocity_bounds : tuple, optional
+            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
+            integrate over. If None, the largest velocity of the
+            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
+
+        Examples
+        --------
+        >>> i = 60*np.pi/180.
+        >>> L = [0.0,np.sin(i),np.cos(i)]
+        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
+        ...                velocity_bounds=(-5.,4.,"km/s"))
+        """
+        self.ds = ds
+        self.field = field
+        self.width = width
+
+        self.nx = dims[0]
+        self.ny = dims[1]
+        self.nv = dims[2]
+
+        normal = np.array(normal)
+        normal /= np.sqrt(np.dot(normal, normal))
+        vecs = np.identity(3)
+        t = np.cross(normal, vecs).sum(axis=1)
+        ax = t.argmax()
+        north = np.cross(normal, vecs[ax,:]).ravel()
+        orient = Orientation(normal, north_vector=north)
+
+        dd = ds.all_data()
+
+        fd = dd._determine_fields(field)[0]
+
+        self.field_units = ds._get_field_info(fd).units
+
+        if velocity_bounds is None:
+            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
+            self.v_bnd = -vmax, vmax
+        else:
+            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
+                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+
+        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+
+        _vlos = create_vlos(orient.unit_vectors[2])
+        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+
+        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
+        pbar = get_pbar("Generating cube.", self.nv)
+        for i in xrange(self.nv):
+            v1 = vbins[i]
+            v2 = vbins[i+1]
+            _intensity = create_intensity(v1, v2, field)
+            ds.field_info.add_field(("gas","intensity"),
+                                    function=_intensity, units=self.field_units)
+            prj = off_axis_projection(ds, ds.domain_center, normal, width,
+                                      (self.nx, self.ny), "intensity")
+            self.data[:,:,i] = prj[:,:]
+            ds.field_info.pop(("gas","intensity"))
+            pbar.update(i)
+
+        pbar.finish()
+
+    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
+                   velocity_unit="m/s", sky_center=(30.,45.)):
+        r""" Write the PPVCube to a FITS file.
+
+        Parameters
+        ----------
+        filename : string
+            The name of the file to write.
+        clobber : boolean
+            Whether or not to clobber an existing file with the same name.
+        length_unit : tuple, optional
+            The length that corresponds to the width of the projection in
+            (value, unit) form. Accepts a length unit or 'deg'.
+        velocity_unit : string, optional
+            The units for the velocity axis.
+        sky_center : tuple, optional
+            The (RA, Dec) coordinate in degrees of the central pixel if
+            *length_unit* is 'deg'.
+
+        Examples
+        --------
+        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
+        ...                 velocity_unit="km/s")
+        """
+        if length_unit[1] == "deg":
+            center = sky_center
+            types = ["RA---SIN","DEC--SIN"]
+        else:
+            center = [0.0,0.0]
+            types = ["LINEAR","LINEAR"]
+
+        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
+
+        dx = length_unit[0]/self.nx
+        dy = length_unit[0]/self.ny
+        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
+
+        if length_unit[1] == "deg":
+            dx *= -1.
+
+        w = ap.pywcs.WCS(naxis=3)
+        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
+        w.wcs.cdelt = [dx,dy,dv]
+        w.wcs.crval = [center[0], center[1], v_center]
+        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
+        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
+
+        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib[0].header["bunit"] = self.field_units
+        fib[0].header["btype"] = self.field
+
+        fib.writeto(filename, clobber=clobber)

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/ppv_cube/setup.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ppv_cube', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -23,5 +23,5 @@
     config.add_subpackage("sunyaev_zeldovich")
     config.add_subpackage("particle_trajectories")
     config.add_subpackage("photon_simulator")
-    config.add_subpackage("xyv_cube")
+    config.add_subpackage("ppv_cube")
     return config

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,10 +19,8 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.fields.local_fields import add_field, derived_field
-from yt.data_objects.image_array import ImageArray
+from yt.units.yt_array import YTQuantity
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.utilities.definitions import inv_axis_names
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -149,7 +147,6 @@
         beta_par = generate_beta_par(L)
         self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
         proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
-        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -174,6 +171,8 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
+        self.pf.field_info.pop(("gas","beta_par"))
+
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
 
@@ -197,7 +196,9 @@
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
         if iterable(width):
-            w = width[0]/self.pf.units[width[1]]
+            w = self.pf.quan(width[0], width[1]).in_units("code_length").value
+        elif isinstance(width, YTQuantity):
+            w = width.in_units("code_length").value
         else:
             w = width
         if center == "c":
@@ -240,6 +241,8 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
+        self.pf.field_info.pop(("gas","beta_par"))
+
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
         # Bad hack, but we get NaNs if we don't do something like this

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/xyv_cube/api.py
--- a/yt/analysis_modules/xyv_cube/api.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-API for xyv_cube
-"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from xyv_cube import XYVCube

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/xyv_cube/setup.py
--- a/yt/analysis_modules/xyv_cube/setup.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('xyv_cube', parent_package, top_path)
-    #config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/analysis_modules/xyv_cube/xyv_cube.py
--- a/yt/analysis_modules/xyv_cube/xyv_cube.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Generating XYV FITS cubes
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.frontends.fits.data_structures import ap
-from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageBuffer
-from yt.visualization.volume_rendering.camera import off_axis_projection
-from yt.funcs import get_pbar
-
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = np.logical_and(data["v_los"] >= vmin, data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
-def create_vlos(z_hat):
-    def _v_los(field, data):
-        vz = data["velocity_x"]*z_hat[0] + \
-             data["velocity_y"]*z_hat[1] + \
-             data["velocity_z"]*z_hat[2]
-        return -vz
-    return _v_los
-
-class XYVCube(object):
-    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
-                 dims=(100,100,100), velocity_bounds=None):
-        r""" Initialize a XYVCube object.
-
-        Parameters
-        ----------
-        ds : dataset
-            The dataset.
-        normal : array_like
-            The normal vector along with to make the projections.
-        field : string
-            The field to project.
-        width : float or tuple, optional
-            The width of the projection in length units. Specify a float
-            for code_length units or a tuple (value, units).
-        dims : tuple, optional
-            A 3-tuple of dimensions (nx,ny,nv) for the cube.
-        velocity_bounds : tuple, optional
-            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
-            integrate over. If None, the largest velocity of the
-            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
-
-        Examples
-        --------
-        >>> i = 60*np.pi/180.
-        >>> L = [0.0,np.sin(i),np.cos(i)]
-        >>> cube = XYVCube(ds, L, "density", width=(10.,"kpc"),
-        ...                velocity_bounds=(-5.,4.,"km/s"))
-        """
-        self.ds = ds
-        self.field = field
-        self.width = width
-
-        self.nx = dims[0]
-        self.ny = dims[1]
-        self.nv = dims[2]
-
-        normal = np.array(normal)
-        normal /= np.sqrt(np.dot(normal, normal))
-        vecs = np.identity(3)
-        t = np.cross(normal, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(normal, vecs[ax,:]).ravel()
-        orient = Orientation(normal, north_vector=north)
-
-        dd = ds.all_data()
-
-        fd = dd._determine_fields(field)[0]
-
-        self.field_units = ds._get_field_info(fd).units
-
-        if velocity_bounds is None:
-            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
-            self.v_bnd = -vmax, vmax
-        else:
-            self.v_bnd = (ds.arr(velocity_bounds[0], velocity_bounds[2]),
-                     ds.arr(velocity_bounds[1], velocity_bounds[2]))
-
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
-
-        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
-        pbar = get_pbar("Generating cube.", self.nv)
-        for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            _vlos = create_vlos(orient.unit_vectors[2])
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
-            ds.field_info.add_field(("gas","v_los"),
-                                    function=_vlos, units="cm/s")
-            prj = off_axis_projection(ds, ds.domain_center, normal, width,
-                                      (self.nx, self.ny), "intensity")
-            self.data[:,:,i] = prj[:,:]
-            pbar.update(i)
-
-        pbar.finish()
-
-    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
-                   velocity_unit="m/s", sky_center=(30.,45.)):
-        r""" Write the XYVCube to a FITS file.
-
-        Parameters
-        ----------
-        filename : string
-            The name of the file to write.
-        clobber : boolean
-            Whether or not to clobber an existing file with the same name.
-        length_unit : tuple, optional
-            The length that corresponds to the width of the projection in
-            (value, unit) form. Accepts a length unit or 'deg'.
-        velocity_unit : string, optional
-            The units for the velocity axis.
-        sky_center : tuple, optional
-            The (RA, Dec) coordinate in degrees of the central pixel if
-            *length_unit* is 'deg'.
-
-        Examples
-        --------
-        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"),
-        ...                 velocity_unit="km/s")
-        """
-        if length_unit[1] == "deg":
-            center = sky_center
-            types = ["RA---SIN","DEC--SIN"]
-        else:
-            center = [0.0,0.0]
-            types = ["LINEAR","LINEAR"]
-
-        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units(velocity_unit).value
-
-        dx = length_unit[0]/self.nx
-        dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units(velocity_unit).value/self.nv
-
-        if length_unit[1] == "deg":
-            dx *= -1.
-
-        w = ap.pywcs.WCS(naxis=3)
-        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
-        w.wcs.cdelt = [dx,dy,dv]
-        w.wcs.crval = [center[0], center[1], v_center]
-        w.wcs.cunit = [length_unit[1],length_unit[1],velocity_unit]
-        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
-
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
-        fib[0].header["bunit"] = self.field_units
-        fib[0].header["btype"] = self.field
-
-        fib.writeto(filename, clobber=clobber)

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -289,6 +289,8 @@
             self.nan_mask = {}
         elif isinstance(nan_mask, float):
             self.nan_mask = {"all":nan_mask}
+        elif isinstance(nan_mask, dict):
+            self.nan_mask = nan_mask
         self.nprocs = nprocs
         self._handle = ap.pyfits.open(self.filenames[0],
                                       memmap=True,
@@ -421,16 +423,19 @@
             self.nprocs = min(self.nprocs, 512)
 
         # Check to see if this data is in (RA,Dec,?) format
-        self.xyv_data = False
+        self.ppv_data = False
         x = np.zeros((self.dimensionality), dtype="bool")
         for apx in axes_prefixes:
             x += np_char.startswith(self.axis_names.keys()[:self
                                     .dimensionality], apx)
         if x.sum() == self.dimensionality: self._setup_xyv()
+        for ap in axes_prefixes:
+            x += np_char.startswith(self.axis_names.keys()[:self.dimensionality], ap)
+        if x.sum() == self.dimensionality: self._setup_ppv()
 
-    def _setup_xyv(self):
+    def _setup_ppv(self):
 
-        self.xyv_data = True
+        self.ppv_data = True
 
         end = min(self.dimensionality+1,4)
         ctypes = np.array([self.primary_header["CTYPE%d" % (i)] for i in xrange(1,end)])

diff -r 83a5858e91dc89a2fc500f735e9341043b562d5d -r f4117d6bf5583a881a16d4a5bc222c895e3a8f66 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -36,7 +36,7 @@
         return data.pf.arr(data.pf.wcs_1d.wcs_pix2world(data["z"], 1)[0],
                            str(data.pf.wcs_1d.wcs.cunit[0]))
 
-    def _setup_xyv_fields(self):
+    def _setup_ppv_fields(self):
         def world_f(axis, unit):
             def _world_f(field, data):
                 return data.pf.arr(self._get_2d_wcs(data, axis), unit)
@@ -57,8 +57,8 @@
 
     def setup_fluid_fields(self):
 
-        if self.pf.xyv_data:
-            self._setup_xyv_fields()
+        if self.pf.ppv_data:
+            self._setup_ppv_fields()
             return
 
         def world_f(axis, unit):


https://bitbucket.org/yt_analysis/yt/commits/c8fe35134dfc/
Changeset:   c8fe35134dfc
Branch:      yt-3.0
User:        jzuhone
Date:        2014-04-20 07:24:25
Summary:     Merge
Affected #:  30 files

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,6 +154,9 @@
 from yt.convenience import \
     load, simulation
 
+from yt.testing import \
+    run_nose
+
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/api.py
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -20,6 +20,9 @@
 from .halo_callbacks import \
      add_callback
 
+from .halo_finding_methods import \
+     add_finding_method
+
 from .halo_filters import \
      add_filter
      

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/finding_methods.py
--- a/yt/analysis_modules/halo_analysis/finding_methods.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .operator_registry import \
-    hf_registry
-
-class HaloFindingMethod(object):
-    pass

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -30,20 +30,9 @@
 from .operator_registry import \
      callback_registry, \
      filter_registry, \
-     hf_registry, \
+     finding_method_registry, \
      quantity_registry
 
-from yt.analysis_modules.halo_finding.halo_objects import \
-    FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
-    HaloCatalogDataset
-from yt.frontends.stream.data_structures import \
-    load_particles
-from yt.frontends.halo_catalogs.rockstar.data_structures import \
-    RockstarDataset
-from yt.analysis_modules.halo_finding.rockstar.api import \
-    RockstarHaloFinder
-
 class HaloCatalog(ParallelAnalysisInterface):
     r"""Create a HaloCatalog: an object that allows for the creation and association
     of data with a set of halo objects.
@@ -103,7 +92,7 @@
 
     See Also
     --------
-    add_callback, add_filter, add_quantity
+    add_callback, add_filter, add_finding_method, add_quantity
     
     """
     
@@ -113,7 +102,6 @@
         ParallelAnalysisInterface.__init__(self)
         self.halos_pf = halos_pf
         self.data_pf = data_pf
-        self.finder_method = finder_method
         self.output_dir = ensure_dir(output_dir)
         if os.path.basename(self.output_dir) != ".":
             self.output_prefix = os.path.basename(self.output_dir)
@@ -133,6 +121,10 @@
                 data_source = data_pf.h.all_data()
         self.data_source = data_source
 
+        if finder_method is not None:
+            finder_method = finding_method_registry.find(finder_method)
+        self.finder_method = finder_method            
+        
         # all of the analysis actions to be performed: callbacks, filters, and quantities
         self.actions = []
         # fields to be written to the halo catalog
@@ -358,16 +350,14 @@
 
         if self.halos_pf is None:
             # Find the halos and make a dataset of them
-            particles_pf = self.find_halos()
+            self.halos_pf = self.finder_method(self.data_pf)
 
             # Assign pf and data sources appropriately
-            self.halos_pf = particles_pf
-            self.data_source = particles_pf.all_data()
+            self.data_source = self.halos_pf.all_data()
 
             # Add all of the default quantities that all halos must have
             self.add_default_quantities('all')
 
-
         my_index = np.argsort(self.data_source["particle_identifier"])
         for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
             new_halo = Halo(self)
@@ -400,80 +390,6 @@
         if save_catalog:
             self.save_catalog()
 
-    def find_halos(self):
-
-        finder_method = (self.finder_method).lower()
-
-        if finder_method == "hop":
-            halo_list = HOPHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-
-        elif finder_method == "fof":
-            halo_list = FOFHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-            
-        elif finder_method == 'rockstar':
-            rh = RockstarHaloFinder(self.data_pf, 
-                outbase='{0}/rockstar_halos'.format(self.output_prefix))
-            rh.run()
-            halos_pf = RockstarDataset('{0}/rockstar_halos/halos_0.0.bin'.format(self.output_prefix))
-            halos_pf.create_field_info()
-        else:
-            raise RuntimeError("finder_method must be 'fof', 'hop', or 'rockstar'")
-
-        for attr in ["current_redshift", "current_time",
-                     "domain_dimensions",
-                     "cosmological_simulation", "omega_lambda",
-                     "omega_matter", "hubble_constant"]:
-            attr_val = getattr(self.data_pf, attr)
-            setattr(halos_pf, attr, attr_val)
-        halos_pf.current_time = halos_pf.current_time.in_cgs()
-
-        return halos_pf
-
-    def _parse_old_halo_list(self, halo_list):
-
-
-        data_pf = self.data_pf
-        num_halos = len(halo_list)
-
-        # Set up fields that we want to pull from identified halos and their units
-        new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
-            'particle_position_y','particle_position_z',
-            'virial_radius']
-        new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
-        # Set up a dictionary based on those fields 
-        # with empty arrays where we will fill in their values
-        halo_properties = { f : (np.zeros(num_halos),unit) \
-            for f, unit in zip(new_fields,new_units)}
-
-        # Iterate through the halos pulling out their positions and virial quantities
-        # and filling in the properties dictionary
-        for i,halo in enumerate(halo_list):
-            halo_properties['particle_identifier'][0][i] = i
-            halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
-            halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
-            com = halo.center_of_mass().in_cgs()
-            halo_properties['particle_position_x'][0][i] = com[0]
-            halo_properties['particle_position_y'][0][i] = com[1]
-            halo_properties['particle_position_z'][0][i] = com[2]
-
-        # Define a bounding box based on original data pf
-        bbox = np.array([data_pf.domain_left_edge.in_cgs(),
-                data_pf.domain_right_edge.in_cgs()]).T
-
-        # Create a pf with the halos as particles
-        particle_pf = load_particles(halo_properties, 
-                bbox=bbox, length_unit = 1, mass_unit=1)
-
-        # Create the field info dictionary so we can reference those fields
-        particle_pf.create_field_info()
-
-        return particle_pf
-
-
     def save_catalog(self):
         "Write out hdf5 file with all halo quantities."
 
@@ -513,4 +429,3 @@
         self.add_quantity("particle_position_z", field_type=field_type)
         self.add_quantity("virial_radius", field_type=field_type)
 
-

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -13,6 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
+from yt.utilities.spatial import KDTree
+
 from .halo_callbacks import HaloCallback
 from .operator_registry import filter_registry
 
@@ -58,3 +62,44 @@
     return eval("%s %s %s" % (h_value, operator, value))
 
 add_filter("quantity_value", quantity_value)
+
+def _not_subhalo(halo, field_type="halos"):
+    """
+    Only return true if this halo is not a subhalo.
+    
+    This is used for halo finders such as Rockstar that output parent
+    and subhalos together.
+    """
+
+    if not hasattr(halo.halo_catalog, "parent_dict"):
+        halo.halo_catalog.parent_dict = \
+          create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
+    return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
+add_filter("not_subhalo", _not_subhalo)
+
+def create_parent_dict(data_source, ptype="halos"):
+    """
+    Create a dictionary of halo parents to allow for filtering of subhalos.
+
+    For a pair of halos whose distance is smaller than the radius of at least 
+    one of the halos, the parent is defined as the halo with the larger radius.
+    Parent halos (halos with no parents of their own) have parent index values of -1.
+    """
+    pos = np.rollaxis(
+        np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_y"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
+    rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
+    ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
+    parents = -1 * np.ones_like(ids, dtype="int")
+    my_tree = KDTree(pos)
+
+    for i in xrange(ids.size):
+        neighbors = np.array(
+            my_tree.query_ball_point(pos[i], rad[i], p=2))
+        if neighbors.size > 1:
+            parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
+
+    parents[ids == parents] = -1
+    parent_dict = dict(zip(ids, parents))
+    return parent_dict

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- /dev/null
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -0,0 +1,132 @@
+"""
+Halo Finding methods
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.halo_finding.halo_objects import \
+    FOFHaloFinder, HOPHaloFinder
+from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+    HaloCatalogDataset
+from yt.frontends.stream.data_structures import \
+    load_particles
+
+from .operator_registry import \
+    finding_method_registry
+
+def add_finding_method(name, function):
+    finding_method_registry[name] = HaloFindingMethod(function)
+    
+class HaloFindingMethod(object):
+    r"""
+    A halo finding method is a callback that performs halo finding on a 
+    dataset and returns a new dataset that is the loaded halo finder output.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, ds):
+        return self.function(ds, *self.args, **self.kwargs)
+
+def _hop_method(pf):
+    r"""
+    Run the Hop halo finding method.
+    """
+    
+    halo_list = HOPHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("hop", _hop_method)
+
+def _fof_method(pf):
+    r"""
+    Run the FoF halo finding method.
+    """
+
+    halo_list = FOFHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("fof", _fof_method)
+
+def _rockstar_method(pf):
+    r"""
+    Run the Rockstar halo finding method.
+    """
+
+    from yt.frontends.halo_catalogs.rockstar.data_structures import \
+     RockstarDataset
+    from yt.analysis_modules.halo_finding.rockstar.api import \
+     RockstarHaloFinder
+    
+    rh = RockstarHaloFinder(pf)
+    rh.run()
+    halos_pf = RockstarDataset("rockstar_halos/halos_0.0.bin")
+    halos_pf.create_field_info()
+    return halos_pf
+add_finding_method("rockstar", _rockstar_method)
+
+def _parse_old_halo_list(data_pf, halo_list):
+    r"""
+    Convert the halo list into a loaded dataset.
+    """
+
+    num_halos = len(halo_list)
+
+    # Set up fields that we want to pull from identified halos and their units
+    new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
+        'particle_position_y','particle_position_z',
+        'virial_radius']
+    new_units = [ '', 'g', 'cm', 'cm','cm','cm']
+
+    # Set up a dictionary based on those fields 
+    # with empty arrays where we will fill in their values
+    halo_properties = { f : (np.zeros(num_halos),unit) \
+        for f, unit in zip(new_fields,new_units)}
+
+    # Iterate through the halos pulling out their positions and virial quantities
+    # and filling in the properties dictionary
+    for i,halo in enumerate(halo_list):
+        halo_properties['particle_identifier'][0][i] = i
+        halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
+        halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
+
+        com = halo.center_of_mass().in_cgs()
+        halo_properties['particle_position_x'][0][i] = com[0]
+        halo_properties['particle_position_y'][0][i] = com[1]
+        halo_properties['particle_position_z'][0][i] = com[2]
+
+    # Define a bounding box based on original data pf
+    bbox = np.array([data_pf.domain_left_edge.in_cgs(),
+            data_pf.domain_right_edge.in_cgs()]).T
+
+    # Create a pf with the halos as particles
+    particle_pf = load_particles(halo_properties, 
+            bbox=bbox, length_unit = 1, mass_unit=1)
+
+    # Create the field info dictionary so we can reference those fields
+    particle_pf.create_field_info()
+
+    for attr in ["current_redshift", "current_time",
+                 "domain_dimensions",
+                 "cosmological_simulation", "omega_lambda",
+                 "omega_matter", "hubble_constant"]:
+        attr_val = getattr(data_pf, attr)
+        setattr(particle_pf, attr, attr_val)
+    particle_pf.current_time = particle_pf.current_time.in_cgs()
+    
+    return particle_pf

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ b/yt/analysis_modules/halo_analysis/operator_registry.py
@@ -27,5 +27,5 @@
 
 callback_registry = OperatorRegistry()
 filter_registry = OperatorRegistry()
-hf_registry = OperatorRegistry()
+finding_method_registry = OperatorRegistry()
 quantity_registry = OperatorRegistry()

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -110,7 +110,9 @@
         if self._name == "RockstarHalo":
             ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
         elif self._name == "LoadedHalo":
-            ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
+            ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
         sp_pid = ds['particle_index']
         self._ds_sort = sp_pid.argsort()
         sp_pid = sp_pid[self._ds_sort]
@@ -217,7 +219,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return np.array([vx, vy, vz]) / pm.sum()
+        return self.pf.arr([vx, vy, vz], vx.units) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -331,9 +333,11 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
+                   + ["particle_index"]:
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
-        if 'creation_time' in self.data.pf.field_list:
+	handle.create_dataset("/%s/particle_mass" % gn,
+		data=self["particle_mass"].in_units('Msun'))
+        if ('io','creation_time') in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
                 data=self['creation_time'])
         n = handle["/%s" % gn]
@@ -848,6 +852,7 @@
         self._saved_fields = {}
         self._ds_sort = None
         self._particle_mask = None
+	self._pid_sort = None
 
 
     def __getitem__(self, key):
@@ -865,14 +870,28 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                field_data = field_data[field_data.argsort()]
+                #this is an index for turning data sorted by particle index 
+		#into the same order as the fields on disk
+		self._pid_sort = field_data.argsort().argsort()
+	    #convert to YTArray using the data from disk
+	    if key == 'particle_mass':
+		field_data = self.pf.arr(field_data, 'Msun')
+	    else:
+	        field_data = self.pf.arr(field_data, 
+		    self.pf._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
         # We won't store this field below in saved_fields because
         # that would mean keeping two copies of it, one in the yt
         # machinery and one here.
-        ds = self.pf.sphere(self.CoM, 1.05 * self.max_radius)
-        return np.take(ds[key][self._ds_sort], self.particle_mask)
+        ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
+	# If particle_mask hasn't been called once then _ds_sort won't have
+	# the proper values set yet
+        if self._particle_mask is None:
+	    self.particle_mask
+        return ds[key][self._ds_sort][self.particle_mask][self._pid_sort]
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1087,10 +1106,10 @@
         gc.collect()
 
     def _get_dm_indices(self):
-        if 'creation_time' in self._data_source.index.field_list:
+        if ('io','creation_time') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on creation time")
             return (self._data_source["creation_time"] <= 0)
-        elif 'particle_type' in self._data_source.index.field_list:
+        elif ('io','particle_type') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
         else:
@@ -2141,7 +2160,7 @@
         elif fancy_padding and self._distributed:
             LE_padding = np.empty(3, dtype='float64')
             RE_padding = np.empty(3, dtype='float64')
-            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            avg_spacing = (vol / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
             for dim in xrange(3):
@@ -2388,7 +2407,7 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2412,7 +2431,7 @@
             sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
+                self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -53,7 +53,7 @@
         self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
         self.index = particle_fields.pop("particle_index")
-        self.mass = particle_fields.pop("ParticleMassMsun")
+        self.mass = particle_fields.pop("particle_mass")
         self.padded_particles = []
         self.nMerge = 4
         self.tree = tree

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -478,9 +478,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        self._data_source = self.pf.region(self.center,
-            self.left_edge - self.base_dds,
-            self.right_edge + self.base_dds)
+        LE = self.left_edge - self.base_dds
+        RE = self.right_edge + self.base_dds
+        if not all(self.pf.periodicity):
+            for i in range(3):
+                if self.pf.periodicity[i]: continue
+                LE[i] = max(LE[i], self.pf.domain_left_edge[i])
+                RE[i] = min(RE[i], self.pf.domain_right_edge[i])
+        self._data_source = self.pf.region(self.center, LE, RE)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
         self._pdata_source = self.pf.region(self.center,

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -625,7 +625,7 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        vel_unit = getattr(self, "code_velocity",
+        vel_unit = getattr(self, "velocity_unit",
                     self.length_unit / self.time_unit)
         self.unit_registry.modify("code_velocity", vel_unit)
         # domain_width does not yet exist

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -25,11 +25,13 @@
 from yt.funcs import *
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
+from .field_plugin_registry import \
+    register_field_plugin
 
 _primordial_mass_fraction = \
   {"H": primordial_H_mass_fraction,
    "He" : (1 - primordial_H_mass_fraction)}
-    
+
 # See YTEP-0003 for details, but we want to ensure these fields are all
 # populated:
 #
@@ -159,3 +161,20 @@
     if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
         return 1
     return int(my_split[loc + 1])
+
+ at register_field_plugin
+def setup_species_fields(registry, ftype = "gas", slice_info = None):
+    # We have to check what type of field this is -- if it's particles, then we
+    # set particle_type to True.
+    particle_type = ftype not in registry.pf.fluid_types
+    for species in registry.species_names:
+        # These are all the species we should be looking for fractions or
+        # densities of.
+        if (ftype, "%s_density" % species) in registry:
+            func = add_species_field_by_density
+        elif (ftype, "%s_fraction" % species) in registry:
+            func = add_species_field_by_fraction
+        else:
+            # Skip it
+            continue
+        func(registry, ftype, species, particle_type)

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -20,7 +20,7 @@
 
 from .fields import \
       SkeletonFieldInfo, \
-      add_flash_field
+      add_skeleton_field
 
 from .io import \
       IOHandlerSkeleton

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,36 +13,30 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import stat
 import numpy as np
-import weakref
 
-from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
-from yt.data_objects.index import \
-    AMRHierarchy
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import cm_per_mpc
-from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
-    #__slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = None
+    def __init__(self, id, index, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename=index.index_filename,
+                              index=index)
+        self.Parent = []
         self.Children = []
         self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
 
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -50,7 +44,6 @@
 class SkeletonHierarchy(AMRHierarchy):
 
     grid = SkeletonGrid
-    float_type = np.float64
     
     def __init__(self, pf, dataset_type='skeleton'):
         self.dataset_type = dataset_type
@@ -66,6 +59,10 @@
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,
         # on-disk fields.
+        # NOTE: Each should be a tuple, where the first element is the on-disk
+        # fluid type or particle type.  Convention suggests that the on-disk
+        # fluid type is usually the dataset_type and the on-disk particle type
+        # (for a single population of particles) is "io".
         pass
     
     def _count_grids(self):
@@ -96,30 +93,34 @@
 
 class SkeletonDataset(Dataset):
     _index_class = SkeletonHierarchy
-    _fieldinfo_fallback = SkeletonFieldInfo
-    _fieldinfo_known = KnownSkeletonFields
-    _handle = None
+    _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton',
-                 storage_filename = None,
-                 conversion_override = None):
-
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-
+    def __init__(self, filename, dataset_type='skeleton'):
+        self.fluid_types += ('skeleton',)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-    def _set_units(self):
-        # This needs to set up the dictionaries that convert from code units to
-        # CGS.  The needed items are listed in the second entry:
-        #   self.time_units         <= sec_conversion
-        #   self.conversion_factors <= mpc_conversion
-        #   self.units              <= On-disk fields
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        # self.length_unit = self.quan(1.0, "cm")
+        # self.mass_unit = self.quan(1.0, "g")
+        # self.time_unit = self.quan(1.0, "s")
+        # self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
         pass
 
     def _parse_parameter_file(self):
-        # This needs to set up the following items:
+        # This needs to set up the following items.  Note that these are all
+        # assumed to be in code units; domain_left_edge and domain_right_edge
+        # will be updated to be in code units at a later time.  This includes
+        # the cosmological parameters.
         #
         #   self.unique_identifier
         #   self.parameters             <= full of code-specific items of use

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/_skeleton/definitions.py
--- a/yt/frontends/_skeleton/definitions.py
+++ b/yt/frontends/_skeleton/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,79 +13,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+from yt.funcs import mylog
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.utilities.physical_constants import \
-    kboltz
+    FieldInfoContainer
 
-# The first field container is where any fields that exist on disk go, along
-# with their conversion factors, display names, etc.
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
 
-KnownSkeletonFields = FieldInfoContainer()
-add_skeleton_field = KnownSkeletonFields.add_field
+class SkeletonFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = SkeletonFieldInfo.add_field
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-# Often, we want to translate between fields on disk and fields in yt.  This
-# construct shows how to do that.  Note that we use TranslationFunc.
+    def __init__(self, pf):
+        super(SkeletonFieldInfo, self).__init__(pf)
+        # If you want, you can check self.field_list
 
-translation_dict = {"x-velocity": "velx",
-                    "y-velocity": "vely",
-                    "z-velocity": "velz",
-                    "Density": "dens",
-                    "Temperature": "temp",
-                    "Pressure" : "pres", 
-                    "Grav_Potential" : "gpot",
-                    "particle_position_x" : "particle_posx",
-                    "particle_position_y" : "particle_posy",
-                    "particle_position_z" : "particle_posz",
-                    "particle_velocity_x" : "particle_velx",
-                    "particle_velocity_y" : "particle_vely",
-                    "particle_velocity_z" : "particle_velz",
-                    "particle_index" : "particle_tag",
-                    "Electron_Fraction" : "elec",
-                    "HI_Fraction" : "h   ",
-                    "HD_Fraction" : "hd  ",
-                    "HeI_Fraction": "hel ",
-                    "HeII_Fraction": "hep ",
-                    "HeIII_Fraction": "hepp",
-                    "HM_Fraction": "hmin",
-                    "HII_Fraction": "hp  ",
-                    "H2I_Fraction": "htwo",
-                    "H2II_Fraction": "htwp",
-                    "DI_Fraction": "deut",
-                    "DII_Fraction": "dplu",
-                    "ParticleMass": "particle_mass",
-                    "Flame_Fraction": "flam"}
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the parameter file.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
 
-for f,v in translation_dict.items():
-    if v not in KnownSkeletonFields:
-        pfield = v.startswith("particle")
-        add_skeleton_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    if f.endswith("_Fraction") :
-        dname = "%s\/Fraction" % f.split("_")[0]
-    else :
-        dname = f                    
-    ff = KnownSkeletonFields[v]
-    pfield = f.startswith("particle")
-    add_field(f, TranslationFunc(v),
-              take_log=KnownSkeletonFields[v].take_log,
-              units = ff.units, display_name=dname,
-              particle_type = pfield)
-
-# Here's an example of adding a new field:
-
-add_skeleton_field("dens", function=NullFunc, take_log=True,
-                convert_function=_get_convert("dens"),
-                units=r"g / cm**3")
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -23,12 +23,31 @@
     _particle_reader = False
     _dataset_type = "skeleton"
 
-    def _read_data(self, grid, field):
-        # This must return the array, of size/shape grid.ActiveDimensions, that
-        # corresponds to 'field'.
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # If this is not implemented, the IO handler will just slice a
-        # _read_data item.
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
         pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        pass
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -243,12 +243,17 @@
                 self.add_field(name = ("gas", "%s_density" % nice_name),
                                function = func,
                                units = "g/cm**3")
-                # We know this will either have one letter, or two.
-                if field[3] in string.letters:
-                    element, weight = field[2:4], field[4:-1]
-                else:
-                    element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+                # Most of the time our species will be of the form
+                # element name + atomic weight (e.g. C12), but
+                # sometimes we make up descriptive names (e.g. ash)
+                if any(char.isdigit() for char in field):
+                    # We know this will either have one letter, or two.
+                    if field[3] in string.letters:
+                        element, weight = field[2:4], field[4:-1]
+                    else:
+                        element, weight = field[2:3], field[3:-1]
+                    weight = int(weight)
+
                 # Here we can, later, add number density.
             if field.startswith("omegadot("):
                 nice_name = field[9:-1]

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -124,7 +124,6 @@
         if species != "Electron":
             self.alias(("gas", "%s_density" % yt_name),
                        ("enzo", "%s_Density" % species))
-        add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
         species_names = [fn.rsplit("_Density")[0] for ft, fn in 

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -24,6 +24,32 @@
 _fields = ("temperature", "density", "velocity_magnitude",
            "velocity_divergence")
 
+def check_color_conservation(pf):
+    species_names = pf.field_info.species_names
+    dd = pf.all_data()
+    dens_yt = dd["density"].copy()
+    # Enumerate our species here
+    for s in sorted(species_names):
+        if s == "El": continue
+        dens_yt -= dd["%s_density" % s]
+    dens_yt -= dd["metal_density"]
+    delta_yt = np.abs(dens_yt / dd["density"])
+
+    # Now we compare color conservation to Enzo's color conservation
+    dd = pf.all_data()
+    dens_enzo = dd["Density"].copy()
+    for f in sorted(pf.field_list):
+        if not f[1].endswith("_Density") or \
+               f[1].startswith("Dark_Matter_")  or \
+               f[1].startswith("Electron_") or \
+               f[1].startswith("SFR_") or \
+               f[1].startswith("Forming_Stellar_") or \
+               f[1].startswith("Star_Particle_"):
+            continue
+        dens_enzo -= dd[f]
+    delta_enzo = np.abs(dens_enzo / dd["Density"])
+    return assert_almost_equal, delta_yt, delta_enzo
+
 m7 = "DD0010/moving7_0010"
 @requires_pf(m7)
 def test_moving7():
@@ -37,7 +63,15 @@
 @requires_pf(g30, big_data=True)
 def test_galaxy0030():
     pf = data_dir_load(g30)
+    yield check_color_conservation(pf)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+
+ecp = "enzo_cosmology_plus/DD0046/DD0046"
+ at requires_pf(ecp, big_data=True)
+def test_ecp():
+    pf = data_dir_load(ecp)
+    # Now we test our species fields
+    yield check_color_conservation(pf)

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -31,7 +31,8 @@
 from yt.utilities.physical_constants import mh
 from yt.fields.species_fields import \
     add_species_field_by_fraction, \
-    add_species_field_by_density
+    add_species_field_by_density, \
+    setup_species_fields
 
 from yt.fields.particle_fields import \
     add_volume_weighted_smoothed_field
@@ -63,9 +64,30 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
+        # These are metallicity fields that get discovered for FIRE simulations
+        ("Metallicity_00", ("", ["metallicity"], None)),
+        ("Metallicity_01", ("", ["He_fraction"], None)),
+        ("Metallicity_02", ("", ["C_fraction"], None)),
+        ("Metallicity_03", ("", ["N_fraction"], None)),
+        ("Metallicity_04", ("", ["O_fraction"], None)),
+        ("Metallicity_05", ("", ["Ne_fraction"], None)),
+        ("Metallicity_06", ("", ["Mg_fraction"], None)),
+        ("Metallicity_07", ("", ["Si_fraction"], None)),
+        ("Metallicity_08", ("", ["S_fraction"], None)),
+        ("Metallicity_09", ("", ["Ca_fraction"], None)),
+        ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
+    def __init__(self, *args, **kwargs):
+        super(SPHFieldInfo, self).__init__(*args, **kwargs)
+        # Special case for FIRE
+        if ("PartType0", "Metallicity_00") in self.field_list:
+            self.species_names += ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
+                "Ca", "Fe"]
 
+    def setup_particle_fields(self, ptype, *args, **kwargs):
+        super(SPHFieldInfo, self).setup_particle_fields(ptype, *args, **kwargs)
+        setup_species_fields(self, ptype)
 
 class TipsyFieldInfo(SPHFieldInfo):
 

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -113,7 +113,9 @@
                     elif field in self._element_names:
                         rfield = 'ElementAbundance/' + field
                         data = g[rfield][:][mask,...]
-
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
                     else:
                         data = g[field][:][mask,...]
 
@@ -190,6 +192,10 @@
                     for j in gp.keys():
                         kk = j
                         fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
                 else:
                     kk = k
                     if not hasattr(g[kk], "shape"): continue

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -94,6 +94,7 @@
 # Add LaTeX representations for units with trivial representations.
 latex_symbol_lut = {
     "unitary" : "",
+    "dimensionless" : "",
     "code_length" : "\\rm{code}\/\\rm{length}",
     "code_time" : "\\rm{code}\/\\rm{time}",
     "code_mass" : "\\rm{code}\/\\rm{mass}",

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -47,20 +47,18 @@
 
         # Validate
         if not isinstance(cgs_value, float):
-            raise UnitParseError("cgs_value must be a float, got a %s." \
+            raise UnitParseError("cgs_value must be a float, got a %s."
                                  % type(cgs_value))
-        
+
         validate_dimensions(dimensions)
 
         # Add to symbol lut
         if tex_repr is None:
-            latex_symbol_lut[symbol] = "\\rm{" + symbol + "}"
-        else:
-            latex_symbol_lut[symbol] = tex_repr
+            tex_repr = "\\rm{" + symbol + "}"
+        latex_symbol_lut.setdefault(symbol, tex_repr)
 
         # Add to lut
-        if tex_repr is None: tex_repr = symbol
-        self.lut.update( {symbol: (cgs_value, dimensions)} )
+        self.lut.update({symbol: (cgs_value, dimensions)})
 
     def remove(self, symbol):
         """

diff -r d1046b287d10a028e36a7b1aaf0cfc9f6bc59c63 -r c8fe35134dfc5306be995a30b9451efdf2195085 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -251,10 +251,15 @@
         # Left blank to be overriden in subclasses
         pass
 
-    def _switch_pf(self, new_pf):
+    def _switch_pf(self, new_pf, data_source=None):
         ds = self.data_source
         name = ds._type_name
         kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        if data_source is not None:
+            if name != "proj":
+                raise RuntimeError("The data_source keyword argument "
+                                   "is only defined for projections.")
+            kwargs['data_source'] = data_source
         new_ds = getattr(new_pf, name)(**kwargs)
         self.pf = new_pf
         self.data_source = new_ds

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list