[yt-svn] commit/yt: 19 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue May 20 05:06:56 PDT 2014


19 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3fceec20bf2c/
Changeset:   3fceec20bf2c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-12 02:29:09
Summary:     This is causing failures on Windows.
Affected #:  1 file

diff -r b785710c410756663601d72535060577d2df5895 -r 3fceec20bf2ce03a9771e97e69d3087a204f7eda yt/frontends/halo_catalogs/setup.py
--- a/yt/frontends/halo_catalogs/setup.py
+++ b/yt/frontends/halo_catalogs/setup.py
@@ -6,5 +6,6 @@
     config = Configuration('halo_catalogs', parent_package, top_path)
     config.add_subpackage("halo_catalog")
     config.add_subpackage("rockstar")
+    config.add_subpackage("owls_subfind")    
     config.make_config_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/4c7e5f8bc9c0/
Changeset:   4c7e5f8bc9c0
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-12 02:40:28
Summary:     Merge
Affected #:  1 file

diff -r 788d2640d9f4ba392b0fb20d95a1da3085192355 -r 4c7e5f8bc9c00dadd1fd191f93f8800132278a97 yt/frontends/halo_catalogs/setup.py
--- a/yt/frontends/halo_catalogs/setup.py
+++ b/yt/frontends/halo_catalogs/setup.py
@@ -7,5 +7,6 @@
     config.add_subpackage("halo_catalog")
     config.add_subpackage("owls_subfind")
     config.add_subpackage("rockstar")
+    config.add_subpackage("owls_subfind")    
     config.make_config_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/e5526e861aac/
Changeset:   e5526e861aac
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-12 02:45:12
Summary:     Look before you leap
Affected #:  1 file

diff -r 4c7e5f8bc9c00dadd1fd191f93f8800132278a97 -r e5526e861aacb758f382e2c9c5c6a34c441876f8 yt/frontends/halo_catalogs/setup.py
--- a/yt/frontends/halo_catalogs/setup.py
+++ b/yt/frontends/halo_catalogs/setup.py
@@ -7,6 +7,5 @@
     config.add_subpackage("halo_catalog")
     config.add_subpackage("owls_subfind")
     config.add_subpackage("rockstar")
-    config.add_subpackage("owls_subfind")    
     config.make_config_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/d145b3a5f0c4/
Changeset:   d145b3a5f0c4
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-12 04:29:11
Summary:     Updated photon simulator docs
Affected #:  1 file

diff -r e5526e861aacb758f382e2c9c5c6a34c441876f8 -r d145b3a5f0c414197bf6e5074afca5cfe7888682 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -36,20 +36,20 @@
 .. code:: python
 
     from yt.mods import *
-    from yt.analysis_modules.api import *
+    from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk", 
-              parameters={"TimeUnits":3.1557e13,
-                          "LengthUnits":3.0856e24,
-                          "DensityUnits":6.770424595218825e-27})
+    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+              parameters={"time_unit":(1.0,"Myr"),
+                          "length_unit":(1.0,"Mpc"),
+                          "mass_unit":(1.0e14,"Msun")}) 
 
 First, to get a sense of what the resulting image will look like, let's
-make a new ``yt`` field called ``"DensitySquared"``, since the X-ray
+make a new ``yt`` field called ``"density_squared"``, since the X-ray
 emission is proportional to :math:`\rho^2`, and a weak function of
 temperature and metallicity.
 
@@ -57,14 +57,14 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("DensitySquared", function=_density_squared)
+    add_field("density_squared", function=_density_squared)
 
 Then we'll project this field along the z-axis.
 
 .. code:: python
 
-    prj = ProjectionPlot(pf, "z", ["DensitySquared"], width=(500., "kpc"))
-    prj.set_cmap("DensitySquared", "gray_r")
+    prj = ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc"))
+    prj.set_cmap("density_squared", "gray_r")
     prj.show()
 
 .. image:: _images/dsquared.png
@@ -89,7 +89,7 @@
 
 .. code:: python
 
-    sp = pf.sphere("c", (250., "kpc"))
+    sp = ds.sphere("c", (250., "kpc"))
 
 This will serve as our ``data_source`` that we will use later. Next, we
 need to create the ``SpectralModel`` instance that will determine how
@@ -258,11 +258,6 @@
     events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
                                      sky_center=(187.5,12.333), responses=[ARF,RMF])
 
-.. parsed-literal::
-
-    WARNING:yt:This routine has not been tested to work with all RMFs. YMMV.
-
-
 Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
 deviation to scatter the photon sky positions around with, providing a
 crude representation of a PSF.
@@ -282,17 +277,17 @@
 
 .. code:: python
 
-    {'eobs': array([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
-             9.73534237,  10.21614256]), 
-     'xsky': array([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
-            187.49897546,  187.47307048]), 
-     'ysky': array([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
-            12.33327653,  12.32955225]), 
-     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]), 
+    {'eobs': YTArray([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
+             9.73534237,  10.21614256]) keV, 
+     'xsky': YTArray([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
+            187.49897546,  187.47307048]) degree, 
+     'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
+            12.33327653,  12.32955225]) degree, 
+     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]) (dimensionless), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633])}
+     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633]) (dimensionless)}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -436,7 +431,7 @@
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
-   pf = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
+   ds = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
 
 where for simplicity we have set the velocities to zero, though we
 could have created a realistic velocity field as well. Now, we
@@ -445,7 +440,7 @@
 
 .. code:: python
 
-   sphere = pf.sphere(pf.domain_center, 1.0/pf["mpc"])
+   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5


https://bitbucket.org/yt_analysis/yt/commits/8fb9452ba170/
Changeset:   8fb9452ba170
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-12 07:53:06
Summary:     Updating installation and development docs to reflect Anaconda option and support for Windows
Affected #:  2 files

diff -r d145b3a5f0c414197bf6e5074afca5cfe7888682 -r 8fb9452ba170a78947917fdb2ac27f31ea8a0af9 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -148,19 +148,31 @@
 
 .. code-block:: bash
 
-   python2.7 setup.py develop
+  $ python2.7 setup.py develop
 
 If you have previously "installed" via ``setup.py install`` you have to
 re-install:
 
 .. code-block:: bash
 
-   python2.7 setup.py install
+  $ python2.7 setup.py install
 
-Only one of these two options is needed.  yt may require you to specify the
-location to libpng and hdf5.  This can be done through files named ``png.cfg``
-and ``hdf5.cfg``.  If you are using the installation script, these will already
-exist.
+Only one of these two options is needed.
+
+If you plan to develop yt on Windows, we recommend using the `MinGW <http://www.mingw.org/>`_ gcc
+compiler that can be installed using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the syntax for the
+setup command is slightly different; you must type:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 develop
+
+or
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 install
 
 Making and Sharing Changes
 ++++++++++++++++++++++++++

diff -r d145b3a5f0c414197bf6e5074afca5cfe7888682 -r 8fb9452ba170a78947917fdb2ac27f31ea8a0af9 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -14,8 +14,7 @@
 be time-consuming, yt provides an installation script which downloads and builds
 a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
-other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
-supported.
+other Unix-like systems (XSEDE resources, clusters, etc.).
 
 Since the install is fully-isolated, if you get tired of having yt on your 
 system, you can just delete its directory, and yt and all of its dependencies
@@ -83,14 +82,71 @@
 will also need to set ``LD_LIBRARY_PATH`` and ``PYTHONPATH`` to contain 
 ``$YT_DEST/lib`` and ``$YT_DEST/python2.7/site-packages``, respectively.
 
+.. _testing-installation:
+
+Testing Your Installation
+-------------------------
+
+To test to make sure everything is installed properly, try running yt at
+the command line:
+
+.. code-block:: bash
+
+  $ yt --help
+
+If this works, you should get a list of the various command-line options for
+yt, which means you have successfully installed yt.  Congratulations!
+
+If you get an error, follow the instructions it gives you to debug the problem.
+Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you
+figure it out.
+
+.. _updating-yt:
+
+Updating yt and its dependencies
+--------------------------------
+
+With many active developers, code development sometimes occurs at a furious
+pace in yt.  To make sure you're using the latest version of the code, run
+this command at a command-line:
+
+.. code-block:: bash
+
+  $ yt update
+
+Additionally, if you want to make sure you have the latest dependencies
+associated with yt and update the codebase simultaneously, type this:
+
+.. code-block:: bash
+
+  $ yt update --all
+
+.. _removing-yt:
+
+Removing yt and its dependencies
+--------------------------------
+
+Because yt and its dependencies are installed in an isolated directory when
+you use the script installer, you can easily remove yt and all of its
+dependencies cleanly.  Simply remove the install directory and its
+subdirectories and you're done.  If you *really* had problems with the
+code, this is a last defense for solving: remove and then fully
+:ref:`re-install <installing-yt>` from the install script again.
+
+.. _alternative-installation:
+
 Alternative Installation Methods
 --------------------------------
 
+.. _pip-installation:
+
+Installing yt Using pip or from Source
+++++++++++++++++++++++++++++++++++++++
+
 If you want to forego the use of the install script, you need to make sure you
 have yt's dependencies installed on your system.  These include: a C compiler,
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
-install yt as:
+``HDF5``, ``python``, ``cython``, ``NumPy``, ``matplotlib``, and ``h5py``. From here,
+you can use ``pip`` (which comes with ``Python``) to install yt as:
 
 .. code-block:: bash
 
@@ -110,67 +166,37 @@
 It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
 Please refer to ``setuptools`` documentation for the additional options.
 
-Provided that the required dependencies are in a predictable location, yt should
-be able to find them automatically. However, you can manually specify prefix used
-for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
-``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
-environmental variables respectively, e.g.
+If you choose this installation method, you do not need to run the activation
+script as it is unnecessary.
+
+.. _anaconda-installation:
+
+Installing yt Using Anaconda
+++++++++++++++++++++++++++++
+
+Perhaps the quickest way to get yt up and running is to install it using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_, which will provide you with a
+easy-to-use environment for installing Python packages. To install a bare-bones Python
+installation with yt, first visit http://repo.continuum.io/miniconda/ and download a recent
+version of the ``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
+system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  $ echo '/usr/local' > hdf5.cfg
-  $ export FTYPE_DIR=/opt/freetype
+  $ bash Miniconda-3.3.0-Linux-x86_64.sh
 
-If you choose this installation method, you do not need to run the activation
-script as it is unnecessary.
-
-.. _testing-installation:
-
-Testing Your Installation
--------------------------
-
-To test to make sure everything is installed properly, try running yt at 
-the command line:
+Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  $ yt --help
+  $ conda install yt
 
-If this works, you should get a list of the various command-line options for
-yt, which means you have successfully installed yt.  Congratulations!  
+which will install yt along with all of its dependencies.
 
-If you get an error, follow the instructions it gives you to debug the problem.  
-Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you 
-figure it out.
+.. _windows-installation:
 
-.. _updating-yt:
+Installing yt on Windows
+++++++++++++++++++++++++
 
-Updating yt and its dependencies
---------------------------------
-
-With many active developers, code development sometimes occurs at a furious 
-pace in yt.  To make sure you're using the latest version of the code, run
-this command at a command-line:
-
-.. code-block:: bash
-
-  $ yt update
-
-Additionally, if you want to make sure you have the latest dependencies 
-associated with yt and update the codebase simultaneously, type this:
-
-.. code-block:: bash
-
-  $ yt update --all
-
-.. _removing-yt:
-
-Removing yt and its dependencies
---------------------------------
-
-Because yt and its dependencies are installed in an isolated directory when
-you use the script installer, you can easily remove yt and all of its 
-dependencies cleanly.  Simply remove the install directory and its 
-subdirectories and you're done.  If you *really* had problems with the
-code, this is a last defense for solving: remove and then fully
-:ref:`re-install <installing-yt>` from the install script again.
+Installation on Microsoft Windows is only supported for Windows XP Service Pack 3 and
+higher (both 32-bit and 64-bit) using Anaconda.
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/5e8a1819b079/
Changeset:   5e8a1819b079
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-13 06:44:09
Summary:     A more sane way to handle duplicate field names that doesn't automatically error out
Affected #:  1 file

diff -r 8fb9452ba170a78947917fdb2ac27f31ea8a0af9 -r 5e8a1819b0796916815d3245c3bfa278d061b21d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -136,6 +136,7 @@
         self._file_map = {}
         self._ext_map = {}
         self._scale_map = {}
+        dup_field_index = {}
         # Since FITS header keywords are case-insensitive, we only pick a subset of
         # prefixes, ones that we expect to end up in headers.
         known_units = dict([(unit.lower(),unit) for unit in self.pf.unit_registry.lut])
@@ -162,13 +163,19 @@
                         if fname is None: fname = "image_%d" % (j)
                     if self.pf.num_files > 1 and fname.startswith("image"):
                         fname += "_file_%d" % (i)
+                    if ("fits", fname) in self.field_list:
+                        if fname in dup_field_index:
+                            dup_field_index[fname] += 1
+                        else:
+                            dup_field_index[fname] = 1
+                        mylog.warning("This field has the same name as a previously loaded " +
+                                      "field. Changing the name from %s to %s_%d. To avoid " %
+                                      (fname, fname, dup_field_index[fname]) +
+                                      " this, change one of the BTYPE header keywords.")
+                        fname += "_%d" % (dup_field_index[fname])
                     for k in xrange(naxis4):
                         if naxis4 > 1:
                             fname += "_%s_%d" % (hdu.header["CTYPE4"], k+1)
-                        if fname in self.field_list:
-                            mylog.error("You have two fields with the same name. Change one of " +
-                                        "the names in the BTYPE header keyword to distinguish " +
-                                        "them.")
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j


https://bitbucket.org/yt_analysis/yt/commits/b960bc492dbb/
Changeset:   b960bc492dbb
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-13 23:32:14
Summary:     Merge
Affected #:  8 files

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -3,10 +3,10 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
-sp = ds.sphere("center", (15.0, "kpc"))
+# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
+# that this sphere is very small compared to the size of our final plot,
+# and it has a non-axially aligned L vector.
+sp = ds.sphere("m", (1.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
@@ -14,5 +14,5 @@
 print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
 p.save()

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -139,12 +139,14 @@
             return
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
             center = self.pf.arr(center, 'code_length')
-        elif center in ("c", "center"):
-            center = self.pf.domain_center
-        elif center == ("max"): # is this dangerous for race conditions?
-            center = self.pf.h.find_max("density")[1]
-        elif center.startswith("max_"):
-            center = self.pf.h.find_max(center[4:])[1]
+        elif isinstance(center, basestring):
+            if center.lower() in ("c", "center"):
+                center = self.pf.domain_center
+             # is this dangerous for race conditions?
+            elif center.lower() in ("max", "m"):
+                center = self.pf.h.find_max(("gas", "density"))[1]
+            elif center.startswith("max_"):
+                center = self.pf.h.find_max(center[4:])[1]
         else:
             center = np.array(center, dtype='float64')
         self.center = self.pf.arr(center, 'code_length')

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -447,12 +447,12 @@
         >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
-            assert_valid_width_tuple(width)
+            validate_width_tuple(width)
             width = self.pf.quan(width[0], width[1])
         if height is None:
             height = width
         elif iterable(height):
-            assert_valid_width_tuple(height)
+            validate_width_tuple(height)
             height = self.pf.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -206,6 +206,7 @@
         else:
             self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
+        self.parameters.update(sp)
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
         self.domain_dimensions = sp["domain_dimensions"][:]

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -660,17 +660,14 @@
     if not os.path.exists(path):
         only_on_root(os.makedirs, path)
     return path
-        
-def assert_valid_width_tuple(width):
-    try:
-        assert iterable(width) and len(width) == 2, \
-            "width (%s) is not a two element tuple" % width
-        valid = isinstance(width[0], numeric_type) and isinstance(width[1], str)
+
+def validate_width_tuple(width):
+    if not iterable(width) or len(width) != 2:
+        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
+    if not isinstance(width[0], numeric_type) and isinstance(width[1], basestring):
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
-        assert valid, msg
-    except AssertionError, e:
-        raise YTInvalidWidthError(e)
+        raise YTInvalidWidthError(msg)
 
 def camelcase_to_underscore(name):
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -1,3 +1,17 @@
+"""
+A base class for "image" plots with colorbars.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import __builtin__
 import base64
 import numpy as np

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -17,8 +17,6 @@
 import matplotlib
 import types
 import sys
-import os
-from yt.extern.six.moves import builtins, StringIO
 import warnings
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
@@ -39,12 +37,19 @@
     ImagePlotContainer, log_transform, linear_transform, \
     invalidate_data, invalidate_plot, apply_callback
 
+from yt.data_objects.time_series import \
+    DatasetSeries
+from yt.extern.six.moves import \
+    StringIO
 from yt.funcs import \
     mylog, iterable, ensure_list, \
-    fix_axis, assert_valid_width_tuple
-from yt.units.unit_object import Unit
+    fix_axis, validate_width_tuple
+from yt.units.unit_object import \
+    Unit
 from yt.units.unit_registry import \
-     UnitParseError
+    UnitParseError
+from yt.units.yt_array import \
+    YTArray, YTQuantity
 from yt.utilities.png_writer import \
     write_png_to_string
 from yt.utilities.definitions import \
@@ -57,10 +62,6 @@
     YTCannotParseUnitDisplayName, \
     YTUnitConversionError
 
-from yt.data_objects.time_series import \
-    DatasetSeries
-from yt.units.yt_array import YTArray, YTQuantity
-
 # Some magic for dealing with pyparsing being included or not
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
@@ -82,18 +83,10 @@
     else:
         return u
 
-def assert_valid_width_tuple(width):
-    if not iterable(width) or len(width) != 2:
-        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
-    if not isinstance(width[0], Number) and isinstance(width[1], basestring):
-        msg = "width (%s) is invalid. " % str(width)
-        msg += "Valid widths look like this: (12, 'au')"
-        raise YTInvalidWidthError(msg)
-
 def validate_iterable_width(width, pf, unit=None):
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
-        assert_valid_width_tuple(width[0])
-        assert_valid_width_tuple(width[1])
+        validate_width_tuple(width[0])
+        validate_width_tuple(width[1])
         return (pf.quan(width[0][0], fix_unitary(width[0][1])),
                 pf.quan(width[1][0], fix_unitary(width[1][1])))
     elif isinstance(width[0], Number) and isinstance(width[1], Number):
@@ -102,11 +95,11 @@
     elif isinstance(width[0], YTQuantity) and isinstance(width[1], YTQuantity):
         return (pf.quan(width[0]), pf.quan(width[1]))
     else:
-        assert_valid_width_tuple(width)
+        validate_width_tuple(width)
         # If width and unit are both valid width tuples, we
         # assume width controls x and unit controls y
         try:
-            assert_valid_width_tuple(unit)
+            validate_width_tuple(unit)
             return (pf.quan(width[0], fix_unitary(width[1])),
                     pf.quan(unit[0], fix_unitary(unit[1])))
         except YTInvalidWidthError:
@@ -137,7 +130,7 @@
         raise YTInvalidWidthError(width)
     if depth is not None:
         if iterable(depth):
-            assert_valid_width_tuple(depth)
+            validate_width_tuple(depth)
             depth = (pf.quan(depth[0], fix_unitary(depth[1])), )
         elif isinstance(depth, Number):
             depth = (pf.quan(depth, 'code_length',
@@ -180,8 +173,8 @@
     elif pf.geometry == "spherical":
         if axis == 0:
             width = pf.domain_width[1], pf.domain_width[2]
-            center = 0.5*(pf.domain_left_edge +
-                pf.domain_right_edge).in_units("code_length")
+            center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
+            center.convert_to_units("code_length")
         else:
             # Our default width here is the full domain
             width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
@@ -217,7 +210,8 @@
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
 
-    bounds = tuple( ( (2*(i%2))-1)*width[i//2]/2 for i in range(len(width)*2))
+    w = tuple(el.in_units('unitary') for el in width)
+    bounds = tuple(((2*(i % 2))-1)*w[i//2]/2 for i in range(len(w)*2))
 
     return (bounds, center)
 
@@ -343,10 +337,9 @@
             bounds = self.xlim+self.ylim
         if self._frb_generator is ObliqueFixedResolutionBuffer:
             bounds = np.array(bounds)
-        self.frb = self._frb_generator(self.data_source,
-                                        bounds, self.buff_size,
-                                        self.antialias,
-                                        periodic=self._periodic)
+
+        self.frb = self._frb_generator(self.data_source, bounds, self.buff_size,
+                                       self.antialias, periodic=self._periodic)
         if old_fields is None:
             self.frb._get_data_source_fields()
         else:
@@ -400,8 +393,7 @@
         if len(deltas) != 2:
             raise RuntimeError(
                 "The pan function accepts a two-element sequence.\n"
-                "Received %s." % (deltas, )
-                )
+                "Received %s." % (deltas, ))
         if isinstance(deltas[0], Number) and isinstance(deltas[1], Number):
             deltas = (self.pf.quan(deltas[0], 'code_length'),
                       self.pf.quan(deltas[1], 'code_length'))
@@ -413,8 +405,7 @@
         else:
             raise RuntimeError(
                 "The arguments of the pan function must be a sequence of floats,\n"
-                "quantities, or (float, unit) tuples. Received %s." % (deltas, )
-                )
+                "quantities, or (float, unit) tuples. Received %s." % (deltas, ))
         self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])
         self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])
         return self
@@ -480,10 +471,10 @@
             self.ylim = tuple(bounds[2:4])
             if len(bounds) == 6:
                 self.zlim = tuple(bounds[4:6])
-        mylog.info("xlim = %f %f" %self.xlim)
-        mylog.info("ylim = %f %f" %self.ylim)
+        mylog.info("xlim = %f %f" % self.xlim)
+        mylog.info("ylim = %f %f" % self.ylim)
         if hasattr(self,'zlim'):
-            mylog.info("zlim = %f %f" %self.zlim)
+            mylog.info("zlim = %f %f" % self.zlim)
 
     @invalidate_data
     def set_width(self, width, unit = None):
@@ -634,12 +625,11 @@
         Examples
         --------
 
-        >>> p = ProjectionPlot(pf, "y", "density")
-        >>> p.show()
+        >>> from yt import load
+        >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> p = ProjectionPlot(ds, "y", "Density")
         >>> p.set_axes_unit("kpc")
-        >>> p.show()
-        >>> p.set_axes_unit(None)
-        >>> p.show()
+
         """
         # blind except because it could be in conversion_factors or units
         if unit_name is not None:
@@ -694,8 +684,8 @@
             xllim, xrlim = self.xlim
             yllim, yrlim = self.ylim
         elif origin[2] == 'domain':
-            xax = pf.coordinates.x_axis[axis_index]
-            yax = pf.coordinates.y_axis[axis_index]
+            xax = self.pf.coordinates.x_axis[axis_index]
+            yax = self.pf.coordinates.y_axis[axis_index]
             xllim = self.pf.domain_left_edge[xax]
             xrlim = self.pf.domain_right_edge[xax]
             yllim = self.pf.domain_left_edge[yax]
@@ -706,8 +696,8 @@
         else:
             mylog.warn("origin = {0}".format(origin))
             msg = \
-              ('origin keyword "{0}" not recognized, must declare "domain" '
-               'or "center" as the last term in origin.').format(self.origin)
+                ('origin keyword "{0}" not recognized, must declare "domain" '
+                 'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -756,7 +746,8 @@
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
-                self.aspect = np.float64(self.pf.quan(1.0, unit_y)/(self.pf.quan(1.0, unit_x)))
+                self.aspect = np.float64(self.pf.quan(1.0, unit_y) /
+                                         self.pf.quan(1.0, unit_x))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -771,11 +762,10 @@
             image = self.frb[f]
 
             if image.max() == image.min():
-              if self._field_transform[f] == log_transform:
-                mylog.warning("Plot image for field %s has zero dynamic " \
-                              "range. Min = Max = %d." % \
-                              (f, image.max()))
-                mylog.warning("Switching to linear colorbar scaling.")
+                if self._field_transform[f] == log_transform:
+                    mylog.warning("Plot image for field %s has zero dynamic "
+                                  "range. Min = Max = %d." % (f, image.max()))
+                    mylog.warning("Switching to linear colorbar scaling.")
                 self._field_transform[f] = linear_transform
 
             fp = self._font_properties
@@ -893,10 +883,9 @@
             if self._font_color is not None:
                 ax = self.plots[f].axes
                 cbax = self.plots[f].cb.ax
-                labels = \
-                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
-                  cbax.yaxis.get_ticklabels() + \
-                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
+                labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
+                labels += cbax.yaxis.get_ticklabels()
+                labels += [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
                 for label in labels:
                     label.set_color(self._font_color)
 
@@ -1013,8 +1002,9 @@
 
     This will save an image the the file 'sliceplot_Density
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
+    >>> from yt import load
+    >>> ds = load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> p = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
     >>> p.save('sliceplot')
 
     """
@@ -1138,11 +1128,12 @@
     Examples
     --------
 
-    This is a very simple way of creating a projection plot.
+    Create a projection plot with a width of 20 kiloparsecs centered on the
+    center of the simulation box:
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
-    >>> p.save('sliceplot')
+    >>> from yt import load
+    >>> ds = load('IsolateGalaxygalaxy0030/galaxy0030')
+    >>> p = ProjectionPlot(ds, "z", "density", width=(20, "kpc"))
 
     """
     _plot_type = 'Projection'
@@ -1159,8 +1150,8 @@
         (bounds, center) = get_window_parameters(axis, center, width, pf)
         if field_parameters is None: field_parameters = {}
         proj = pf.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source,
-                         field_parameters = field_parameters, style = proj_style)
+                       center=center, data_source=data_source,
+                       field_parameters = field_parameters, style = proj_style)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize, window_size=window_size, aspect=aspect)
         if axes_unit is None:
@@ -1409,14 +1400,14 @@
         else:
             fields = self.frb.data.keys()
             addl_keys = {}
-        if self._colorbar_valid == False:
+        if self._colorbar_valid is False:
             addl_keys['colorbar_image'] = self._get_cbar_image()
             self._colorbar_valid = True
         min_zoom = 200*self.pf.index.get_smallest_dx() * self.pf['unitary']
         for field in fields:
             to_plot = apply_colormap(self.frb[field],
-                func = self._field_transform[field],
-                cmap_name = self._colormaps[field])
+                                     func = self._field_transform[field],
+                                     cmap_name = self._colormaps[field])
             pngs = self._apply_modifications(to_plot)
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
@@ -1482,7 +1473,7 @@
         nx = self.frb.buff_size[0]/skip
         ny = self.frb.buff_size[1]/skip
         new_frb = FixedResolutionBuffer(self.frb.data_source,
-                        self.frb.bounds, (nx,ny))
+                                        self.frb.bounds, (nx,ny))
 
         axis = self.frb.data_source.axis
         xax = self.frb.data_source.pf.coordinates.x_axis[axis]
@@ -1543,17 +1534,16 @@
         self.set_center((new_x, new_y))
 
     def get_field_units(self, field, strip_mathml = True):
-        ds = self.frb.data_source
-        pf = self.pf
+        source = self.data_source
         field = self._check_field(field)
-        finfo = self.data_source.pf._get_field_info(*field)
-        if ds._type_name in ("slice", "cutting"):
+        finfo = source.pf._get_field_info(*field)
+        if source._type_name in ("slice", "cutting"):
             units = finfo.get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
-                                        ds.proj_style == "mip"):
-            units = finfo.get_units()
-        elif ds._type_name == "proj":
-            units = finfo.get_projected_units()
+        elif source._type_name == "proj":
+            if source.weight_field is not None or source.proj_style in ("mip", "sum"):
+                units = finfo.get_units()
+            else:
+                units = finfo.get_projected_units()
         else:
             units = ""
         if strip_mathml:
@@ -1686,7 +1676,7 @@
     axis : int or one of 'x', 'y', 'z'
          An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
          or the axis name itself.  If specified, this will replace normal.
-         
+
     The following are nominally keyword arguments passed onto the respective
     slice plot objects generated by this function.
 
@@ -1772,10 +1762,12 @@
     Examples
     --------
 
-    >>> slc = SlicePlot(pf, "x", "Density", center=[0.2,0.3,0.4])
-    >>> slc = SlicePlot(pf, 2, "Temperature")
-    >>> slc = SlicePlot(pf, [0.4,0.2,-0.1], "Pressure",
-                        north_vector=[0.2,-0.3,0.1])
+    >>> from yt import load
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> slc = SlicePlot(ds, "x", "density", center=[0.2,0.3,0.4])
+    >>>
+    >>> slc = SlicePlot(ds, [0.4, 0.2, -0.1], "pressure",
+    ...                 north_vector=[0.2,-0.3,0.1])
 
     """
     # Make sure we are passed a normal
@@ -1797,23 +1789,23 @@
         else:
             normal = np.array(normal)
             np.divide(normal, np.dot(normal,normal), normal)
-        
+
     # by now the normal should be properly set to get either a On/Off Axis plot
     if iterable(normal) and not isinstance(normal, basestring):
         # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
-        if 'origin' in kwargs: 
+        if 'origin' in kwargs:
             msg = "Ignoring 'origin' keyword as it is ill-defined for " \
                   "an OffAxisSlicePlot object."
             mylog.warn(msg)
             del kwargs['origin']
-        
+
         return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
     else:
         # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs
-        if 'north_vector' in kwargs: 
+        if 'north_vector' in kwargs:
             msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
                   "an AxisAlignedSlicePlot object."
             mylog.warn(msg)
             del kwargs['north_vector']
-        
+
         return AxisAlignedSlicePlot(pf, normal, fields, *args, **kwargs)

diff -r 5e8a1819b0796916815d3245c3bfa278d061b21d -r b960bc492dbbe88905b491058ee427211dcb342a yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -16,6 +16,7 @@
 
 import __builtin__
 import base64
+import os
 import types
 
 from functools import wraps
@@ -230,7 +231,8 @@
             The output file keyword.
         
         """
-        if not self._plot_valid: self._setup_plots()
+        if not self._plot_valid:
+            self._setup_plots()
         unique = set(self.figures.values())
         if len(unique) < len(self.figures):
             figiter = izip(xrange(len(unique)), sorted(unique))
@@ -677,9 +679,11 @@
             cax = None
             draw_colorbar = True
             draw_axes = True
+            zlim = (None, None)
             if f in self.plots:
                 draw_colorbar = self.plots[f]._draw_colorbar
                 draw_axes = self.plots[f]._draw_axes
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
                 if self.plots[f].figure is not None:
                     fig = self.plots[f].figure
                     axes = self.plots[f].axes
@@ -688,13 +692,14 @@
             x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
             x_title, y_title, z_title = self._get_field_title(f, self.profile)
 
-            if z_scale == 'log':
-                zmin = data[data > 0.0].min()
-                self._field_transform[f] = log_transform
-            else:
-                zmin = data.min()
-                self._field_transform[f] = linear_transform
-            zlim = [zmin, data.max()]
+            if zlim == (None, None):
+                if z_scale == 'log':
+                    zmin = data[data > 0.0].min()
+                    self._field_transform[f] = log_transform
+                else:
+                    zmin = data.min()
+                    self._field_transform[f] = linear_transform
+                zlim = [zmin, data.max()]
 
             fp = self._font_properties
             f = self.profile.data_source._determine_fields(f)[0]
@@ -740,9 +745,11 @@
         >>> plot.save(mpl_kwargs={'bbox_inches':'tight'})
         
         """
-
-        if not self._plot_valid: self._setup_plots()
-        if mpl_kwargs is None: mpl_kwargs = {}
+        names = []
+        if not self._plot_valid:
+            self._setup_plots()
+        if mpl_kwargs is None:
+            mpl_kwargs = {}
         xfn = self.profile.x_field
         yfn = self.profile.y_field
         if isinstance(xfn, types.TupleType):
@@ -751,17 +758,28 @@
             yfn = yfn[1]
         for f in self.profile.field_data:
             _f = f
-            if isinstance(f, types.TupleType): _f = _f[1]
+            if isinstance(f, types.TupleType):
+                _f = _f[1]
             middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
+            if name[-1] == os.sep and not os.path.isdir(name):
+                os.mkdir(name)
             if name is None:
                 prefix = self.profile.pf
-                name = "%s.png" % prefix
+            elif os.path.isdir(name) and name != str(self.profile.pf):
+                prefix = name + (os.sep if name[-1] != os.sep else '')
+                prefix += str(self.profile.pf)
+            else:
+                prefix = name
             suffix = get_image_suffix(name)
-            prefix = name[:name.rfind(suffix)]
+            if suffix != '':
+                for k, v in self.plots.iteritems():
+                    names.append(v.save(name, mpl_kwargs))
+                return names
+
             fn = "%s_%s%s" % (prefix, middle, suffix)
-            if not suffix:
-                suffix = ".png"
+            names.append(fn)
             self.plots[f].save(fn, mpl_kwargs)
+        return names
 
     @invalidate_plot
     def set_title(self, field, title):


https://bitbucket.org/yt_analysis/yt/commits/bbbffb1c6ce6/
Changeset:   bbbffb1c6ce6
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-14 15:49:45
Summary:     First pass at restoring pixel widths to the spectral axis
Affected #:  6 files

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
+  "signature": "sha256:fd81595bf17660628bae6ef6838a903226fba834e785f811f7f97c88092c37b0"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -109,14 +109,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's WCS information to determine which pixel values correspond to these slices. First, we'll pick the reference RA and Dec values from the WCS:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print ds.domain_left_edge[2], ds.domain_center[2], ds.domain_right_edge[2]"
+      "ra, dec = ds.wcs.wcs.crval[:2]"
      ],
      "language": "python",
      "metadata": {},
@@ -126,15 +126,50 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now, we'll choose a few values for the velocity within this range:"
+      "Second, we'll find the slice pixel using `wcs_world2pix`, were we pick a new velocity value:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -250000.\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-250000.]], 1)[0][2]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we can use this new center to create a new slice:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do this a few more times for different values of the velocity:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-100000.]], 1)[0][2]\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -146,21 +181,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -100000.\n",
-      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -150000.\n",
+      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-150000.]], 1)[0][2]\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -179,14 +200,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also make a projection of all the emission along the line of sight:"
+      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], proj_style=\"sum\", origin=\"native\")\n",
       "prj.show()"
      ],
      "language": "python",
@@ -197,13 +218,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -217,7 +217,7 @@
         # If nprocs > 1, decompose the domain into virtual grids
         if self.num_grids > 1:
             if self.pf.z_axis_decomp:
-                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                dz = pf.quan(1.0, "code_length")
                 self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
                                                             self.num_grids).astype("int")
                 self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
@@ -406,13 +406,23 @@
             self.events_data = False
             self.first_image = 0
             self.primary_header = self._handle[self.first_image].header
-            self.wcs = _astropy.pywcs.WCS(header=self.primary_header)
             self.naxis = self.primary_header["naxis"]
             self.axis_names = [self.primary_header["ctype%d" % (i+1)]
                                for i in xrange(self.naxis)]
             self.dims = [self.primary_header["naxis%d" % (i+1)]
                          for i in xrange(self.naxis)]
 
+            wcs = _astropy.pywcs.WCS(header=self.primary_header)
+            if self.naxis == 4:
+                self.wcs = _astropy.pywcs.WCS(naxis=3)
+                self.wcs.wcs.crpix = wcs.wcs.crpix[:3]
+                self.wcs.wcs.cdelt = wcs.wcs.cdelt[:3]
+                self.wcs.wcs.crval = wcs.wcs.crval[:3]
+                self.wcs.wcs.cunit = [str(unit) for unit in wcs.wcs.cunit][:3]
+                self.wcs.wcs.ctype = [type for type in wcs.wcs.ctype][:3]
+            else:
+                self.wcs = wcs
+
         self.refine_by = 2
 
         Dataset.__init__(self, fn, dataset_type)
@@ -578,29 +588,28 @@
             self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.lon_axis],
                                      self.wcs.wcs.ctype[self.lat_axis]]
 
-            x0 = self.wcs.wcs.crpix[self.vel_axis]
-            dz = self.wcs.wcs.cdelt[self.vel_axis]
-            z0 = self.wcs.wcs.crval[self.vel_axis]
+            self._p0 = self.wcs.wcs.crpix[self.vel_axis]
+            self._dz = self.wcs.wcs.cdelt[self.vel_axis]
+            self._z0 = self.wcs.wcs.crval[self.vel_axis]
             self.vel_unit = str(self.wcs.wcs.cunit[self.vel_axis])
 
-            if dz < 0.0:
-                self.reversed = True
-                le = self.dims[self.vel_axis]+0.5
-                re = 0.5
-            else:
-                le = 0.5
-                re = self.dims[self.vel_axis]+0.5
-            self.domain_left_edge[self.vel_axis] = (le-x0)*dz + z0
-            self.domain_right_edge[self.vel_axis] = (re-x0)*dz + z0
-            if self.reversed: dz *= -1
-
             if self.line_width is not None:
+                if self._dz < 0.0:
+                    self.reversed = True
+                    le = self.dims[self.vel_axis]+0.5
+                else:
+                    le = 0.5
                 self.line_width = self.line_width.in_units(self.vel_unit)
-                self.freq_begin = self.domain_left_edge[self.vel_axis]
-                nz = np.rint(self.line_width.value/dz).astype("int")
-                self.line_width = dz*nz
-                self.domain_left_edge[self.vel_axis] = -self.line_width/2.
-                self.domain_right_edge[self.vel_axis] = self.line_width/2.
+                self.freq_begin = (le-self._p0)*self._dz + self._z0
+                # We now reset these so that they are consistent
+                # with the new setup
+                self._dz = np.abs(self._dz)
+                self._p0 = 0.0
+                self._z0 = 0.0
+                nz = np.rint(self.line_width.value/self._dz).astype("int")
+                self.line_width = self._dz*nz
+                self.domain_left_edge[self.vel_axis] = -0.5*float(nz)
+                self.domain_right_edge[self.vel_axis] = 0.5*float(nz)
                 self.domain_dimensions[self.vel_axis] = nz
 
         else:

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -44,7 +44,8 @@
         if self.pf.dimensionality == 3:
             def _vel_los(field, data):
                 axis = "xyz"[data.pf.vel_axis]
-                return data.pf.arr(data[axis].ndarray_view(),data.pf.vel_unit)
+                vz = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
+                return data.pf.arr(vz, data.pf.vel_unit)
             self.add_field(("fits",self.pf.vel_name), function=_vel_los,
                            units=self.pf.vel_unit)
 

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,8 +26,10 @@
         self._handle = pf._handle
         if self.pf.line_width is not None:
             self.line_db = self.pf.line_database
+            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.vel_axis]
         else:
             self.line_db = None
+            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -92,7 +94,7 @@
                     if self.line_db is not None and fname in self.line_db:
                         my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
                         my_off = my_off - 0.5*self.pf.line_width
-                        my_off = int((my_off-self.pf.freq_begin)/dx[self.pf.vel_axis].value)
+                        my_off = int((my_off-self.pf.freq_begin)/self.dz)
                         my_off = max(my_off, 0)
                         my_off = min(my_off, self.pf.dims[self.pf.vel_axis]-1)
                         start[self.pf.vel_axis] += my_off

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ b/yt/geometry/ppv_coordinates.py
@@ -45,6 +45,14 @@
         self.default_unit_label[pf.lat_axis] = "pixel"
         self.default_unit_label[pf.vel_axis] = pf.vel_unit
 
+        def _vel_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [(pp.value-self.pf._p0)*self.pf._dz+self.pf._z0
+                    for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.vel_axis] = _vel_axis
+
     def convert_to_cylindrical(self, coord):
         raise NotImplementedError
 

diff -r b960bc492dbbe88905b491058ee427211dcb342a -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -734,6 +734,8 @@
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
+            xax = self.pf.coordinates.x_axis[axis_index]
+            yax = self.pf.coordinates.y_axis[axis_index]
 
             if self._axes_unit_names is None:
                 unit = get_smallest_appropriate_unit(
@@ -830,8 +832,6 @@
                           r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
                 axis_names = self.pf.coordinates.axis_name
-                xax = self.pf.coordinates.x_axis[axis_index]
-                yax = self.pf.coordinates.y_axis[axis_index]
                 if hasattr(self.pf.coordinates, "axis_default_unit_label"):
                     axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
                                         self.pf.coordinates.axis_default_unit_name[yax]]
@@ -841,6 +841,17 @@
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
+            if hasattr(self.pf.coordinates, "axis_field"):
+                if xax in self.pf.coordinates.axis_field:
+                    xmin, xmax = self.pf.coordinates.axis_field[xax](0, self.xlim, self.ylim)
+                else:
+                    xmin, xmax = [float(x) for x in extentx]
+                if yax in self.pf.coordinates.axis_field:
+                    ymin, ymax = self.pf.coordinates.axis_field[yax](1, self.xlim, self.ylim)
+                else:
+                    ymin, ymax = [float(y) for y in extenty]
+                self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
+
             for label in (self.plots[f].axes.get_xticklabels() +
                           self.plots[f].axes.get_yticklabels() +
                           [self.plots[f].axes.xaxis.get_offset_text(),


https://bitbucket.org/yt_analysis/yt/commits/44cefa9de45d/
Changeset:   44cefa9de45d
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-14 16:40:19
Summary:     Controlling the aspect of the spectral axis with spectral_factor
Affected #:  1 file

diff -r bbbffb1c6ce6faba5609f02ef04473a4a53fd38e -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -217,7 +217,7 @@
         # If nprocs > 1, decompose the domain into virtual grids
         if self.num_grids > 1:
             if self.pf.z_axis_decomp:
-                dz = pf.quan(1.0, "code_length")
+                dz = pf.quan(1.0, "code_length")*pf.spectral_factor
                 self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
                                                             self.num_grids).astype("int")
                 self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
@@ -310,6 +310,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 spectral_factor = 1.0,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -322,10 +323,13 @@
         self.specified_parameters = parameters
 
         self.z_axis_decomp = z_axis_decomp
+        self.spectral_factor = spectral_factor
 
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
+            mylog.info("For line folding, spectral_factor = 1.0")
+            self.spectral_factor = 1.0
         else:
             self.line_width = None
 
@@ -363,8 +367,8 @@
                 else:
                     fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                 f = _astropy.pyfits.open(fn, memmap=True,
-                                   do_not_scale_image_data=True,
-                                   ignore_blank=True)
+                                         do_not_scale_image_data=True,
+                                         ignore_blank=True)
                 self._fits_files.append(f)
 
         if len(self._handle) > 1 and self._handle[1].name == "EVENTS":
@@ -611,6 +615,10 @@
                 self.domain_left_edge[self.vel_axis] = -0.5*float(nz)
                 self.domain_right_edge[self.vel_axis] = 0.5*float(nz)
                 self.domain_dimensions[self.vel_axis] = nz
+            else:
+                Dz = self.domain_right_edge[self.vel_axis]-self.domain_left_edge[self.vel_axis]
+                self.domain_right_edge[self.vel_axis] = self.domain_left_edge[self.vel_axis] + \
+                                                        self.spectral_factor*Dz
 
         else:
 


https://bitbucket.org/yt_analysis/yt/commits/4614f5901e02/
Changeset:   4614f5901e02
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-15 16:38:05
Summary:     Further implementation of making the spectral axis in pixel units, and including a spectral_factor.

Also, changed the naming conventions to reflect the more general name "spectral" rather than "velocity" for the spectral axis.
Affected #:  9 files

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:fd81595bf17660628bae6ef6838a903226fba834e785f811f7f97c88092c37b0"
+  "signature": "sha256:7048fd20080135148a69c33e08b33d551b43b0462d1fe5298bddeac93f8fa050"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -23,7 +23,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-velocity\" cubes of radio data. "
+      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-spectrum\" cubes of radio data. "
      ]
     },
     {
@@ -109,32 +109,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's WCS information to determine which pixel values correspond to these slices. First, we'll pick the reference RA and Dec values from the WCS:"
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's `spec2pixel` method to determine which pixels to slice on:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ra, dec = ds.wcs.wcs.crval[:2]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Second, we'll find the slice pixel using `wcs_world2pix`, were we pick a new velocity value:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
+      "import astropy.units as u\n",
       "new_center = ds.domain_center\n",
-      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-250000.]], 1)[0][2]"
+      "new_center[2] = ds.spec2pixel(-250000.*u.m/u.s)"
      ],
      "language": "python",
      "metadata": {},
@@ -169,7 +153,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-100000.]], 1)[0][2]\n",
+      "new_center[2] = ds.spec2pixel(-100000.*u.m/u.s)\n",
+      "print new_center[2]\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -181,7 +166,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = ds.wcs.wcs_world2pix([[ra,dec,-150000.]], 1)[0][2]\n",
+      "new_center[2] = ds.spec2pixel(-150000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -225,8 +210,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\")\n",
       "slc.show()"
      ],
      "language": "python",
@@ -237,8 +221,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\")\n",
       "slc.show()"
      ],
      "language": "python",
@@ -476,6 +459,34 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "(ds.domain_right_edge[2].v-ds._p0)*ds._dz + ds._z0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds._z0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,8 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
-from yt.geometry.ppv_coordinates import \
-    PPVCoordinateHandler
+from yt.geometry.pps_coordinates import \
+    PPSCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -361,8 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
-        elif self.geometry == "ppv":
-            self.coordinates = PPVCoordinateHandler(self)
+        elif self.geometry == "pps":
+            self.coordinates = PPSCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -44,11 +44,15 @@
 
 lon_prefixes = ["X","RA","GLON"]
 lat_prefixes = ["Y","DEC","GLAT"]
-vel_prefixes = ["V","ENER","FREQ","WAV"]
 delimiters = ["*", "/", "-", "^"]
 delimiters += [str(i) for i in xrange(10)]
 regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
+spec_names = {"V":"Velocity",
+              "FREQ":"Frequency",
+              "ENER":"Energy",
+              "WAV":"Wavelength"}
+
 field_from_unit = {"Jy":"intensity",
                    "K":"temperature"}
 
@@ -234,7 +238,7 @@
                 dims = np.array(pf.domain_dimensions)
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    dims[pf.vel_axis] = 1
+                    dims[pf.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.pf.arr(gle, "code_length")
@@ -242,9 +246,9 @@
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
+                    self.grid_left_edge[:,pf.spec_axis] = pf.domain_left_edge[pf.spec_axis]
+                    self.grid_right_edge[:,pf.spec_axis] = pf.domain_right_edge[pf.spec_axis]
+                    self.grid_dimensions[:,pf.spec_axis] = pf.domain_dimensions[pf.spec_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
@@ -310,7 +314,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
-                 spectral_factor = 1.0,
+                 spectral_factor = None,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -462,11 +466,12 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         if "beam_size" in self.specified_parameters:
+            beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value
         else:
             beam_size = 1.0
         self.unit_registry.add("beam",beam_size,dimensions=dimensions.solid_angle)
-        if self.ppv_data:
+        if self.pps_data:
             units = self.wcs_2d.wcs.cunit[0]
             if units == "deg": units = "degree"
             if units == "rad": units = "radian"
@@ -541,17 +546,17 @@
         self.reversed = False
 
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
-        self.ppv_data = False
+        self.pps_data = False
         x = 0
-        for p in lon_prefixes+lat_prefixes+vel_prefixes:
+        for p in lon_prefixes+lat_prefixes+spec_names.keys():
             y = np_char.startswith(self.axis_names[:self.dimensionality], p)
             x += y.sum()
-        if x == self.dimensionality: self._setup_ppv()
+        if x == self.dimensionality: self._setup_pps()
 
-    def _setup_ppv(self):
+    def _setup_pps(self):
 
-        self.ppv_data = True
-        self.geometry = "ppv"
+        self.pps_data = True
+        self.geometry = "pps"
 
         end = min(self.dimensionality+1,4)
         if self.events_data:
@@ -577,11 +582,11 @@
 
         if self.wcs.naxis > 2:
 
-            self.vel_axis = np.zeros((end-1), dtype="bool")
-            for p in vel_prefixes:
-                self.vel_axis += np_char.startswith(ctypes, p)
-            self.vel_axis = np.where(self.vel_axis)[0][0]
-            self.vel_name = ctypes[self.vel_axis].split("-")[0].lower()
+            self.spec_axis = np.zeros((end-1), dtype="bool")
+            for p in spec_names.keys():
+                self.spec_axis += np_char.startswith(ctypes, p)
+            self.spec_axis = np.where(self.spec_axis)[0][0]
+            self.spec_name = spec_names[ctypes[self.spec_axis].split("-")[0][0]]
 
             self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
             self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.lon_axis, self.lat_axis]]
@@ -592,18 +597,18 @@
             self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.lon_axis],
                                      self.wcs.wcs.ctype[self.lat_axis]]
 
-            self._p0 = self.wcs.wcs.crpix[self.vel_axis]
-            self._dz = self.wcs.wcs.cdelt[self.vel_axis]
-            self._z0 = self.wcs.wcs.crval[self.vel_axis]
-            self.vel_unit = str(self.wcs.wcs.cunit[self.vel_axis])
+            self._p0 = self.wcs.wcs.crpix[self.spec_axis]
+            self._dz = self.wcs.wcs.cdelt[self.spec_axis]
+            self._z0 = self.wcs.wcs.crval[self.spec_axis]
+            self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
             if self.line_width is not None:
                 if self._dz < 0.0:
                     self.reversed = True
-                    le = self.dims[self.vel_axis]+0.5
+                    le = self.dims[self.spec_axis]+0.5
                 else:
                     le = 0.5
-                self.line_width = self.line_width.in_units(self.vel_unit)
+                self.line_width = self.line_width.in_units(self.spec_unit)
                 self.freq_begin = (le-self._p0)*self._dz + self._z0
                 # We now reset these so that they are consistent
                 # with the new setup
@@ -612,24 +617,40 @@
                 self._z0 = 0.0
                 nz = np.rint(self.line_width.value/self._dz).astype("int")
                 self.line_width = self._dz*nz
-                self.domain_left_edge[self.vel_axis] = -0.5*float(nz)
-                self.domain_right_edge[self.vel_axis] = 0.5*float(nz)
-                self.domain_dimensions[self.vel_axis] = nz
+                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
+                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
+                self.domain_dimensions[self.spec_axis] = nz
             else:
-                Dz = self.domain_right_edge[self.vel_axis]-self.domain_left_edge[self.vel_axis]
-                self.domain_right_edge[self.vel_axis] = self.domain_left_edge[self.vel_axis] + \
+                if self.spectral_factor is None:
+                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                             self.lat_axis]]))
+                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
                                                         self.spectral_factor*Dz
-
+                self._dz /= self.spectral_factor
+                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
         else:
 
             self.wcs_2d = self.wcs
-            self.vel_axis = 2
-            self.vel_name = "z"
-            self.vel_unit = "code length"
+            self.spec_axis = 2
+            self.spec_name = "z"
+            self.spec_unit = "code length"
+
+    def spec2pixel(self, spec_value):
+        sv = self.arr(spec_value).in_units(self.spec_unit)
+        return self.arr((sv.v-self._z0)/self._dz+self._p0,
+                        "code_length")
+
+    def pixel2spec(self, pixel_value):
+        pv = self.arr(pixel_value, "code_length")
+        return self.arr((pv.v-self._p0)*self._dz+self._z0,
+                        self.spec_unit)
 
     def __del__(self):
-        for file in self._fits_files:
-            file.close()
+        for f in self._fits_files:
+            f.close()
             del file
         self._handle.close()
         del self._handle

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -23,7 +23,7 @@
         for field in pf.field_list:
             if field[0] == "fits": self[field].take_log = False
 
-    def _setup_ppv_fields(self):
+    def _setup_pps_fields(self):
 
         def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
@@ -42,18 +42,18 @@
             self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
 
         if self.pf.dimensionality == 3:
-            def _vel_los(field, data):
-                axis = "xyz"[data.pf.vel_axis]
-                vz = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
-                return data.pf.arr(vz, data.pf.vel_unit)
-            self.add_field(("fits",self.pf.vel_name), function=_vel_los,
-                           units=self.pf.vel_unit)
+            def _spec(field, data):
+                axis = "xyz"[data.pf.spec_axis]
+                sp = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
+                return data.pf.arr(sp, data.pf.spec_unit)
+            self.add_field(("fits","spectral"), function=_spec,
+                           units=self.pf.spec_unit, display_name=self.pf.spec_name)
 
     def setup_fluid_fields(self):
 
-        if self.pf.ppv_data:
+        if self.pf.pps_data:
             def _pixel(field, data):
                 return data.pf.arr(data["ones"], "pixel")
             self.add_field(("fits","pixel"), function=_pixel, units="pixel")
-            self._setup_ppv_fields()
+            self._setup_pps_fields()
             return

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,7 +26,7 @@
         self._handle = pf._handle
         if self.pf.line_width is not None:
             self.line_db = self.pf.line_database
-            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.vel_axis]
+            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.spec_axis]
         else:
             self.line_db = None
             self.dz = 1.
@@ -92,19 +92,19 @@
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
+                        my_off = self.line_db.get(fname).in_units(self.pf.spec_unit).value
                         my_off = my_off - 0.5*self.pf.line_width
                         my_off = int((my_off-self.pf.freq_begin)/self.dz)
                         my_off = max(my_off, 0)
-                        my_off = min(my_off, self.pf.dims[self.pf.vel_axis]-1)
-                        start[self.pf.vel_axis] += my_off
-                        end[self.pf.vel_axis] += my_off
+                        my_off = min(my_off, self.pf.dims[self.pf.spec_axis]-1)
+                        start[self.pf.spec_axis] += my_off
+                        end[self.pf.spec_axis] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in xrange(3)]
                     if self.pf.reversed:
-                        new_start = self.pf.dims[self.pf.vel_axis]-1-start[self.pf.vel_axis]
-                        new_end = max(self.pf.dims[self.pf.vel_axis]-1-end[self.pf.vel_axis],0)
-                        slices[self.pf.vel_axis] = slice(new_start,new_end,-1)
+                        new_start = self.pf.dims[self.pf.spec_axis]-1-start[self.pf.spec_axis]
+                        new_end = max(self.pf.dims[self.pf.spec_axis]-1-end[self.pf.spec_axis],0)
+                        slices[self.pf.spec_axis] = slice(new_start,new_end,-1)
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -116,8 +116,8 @@
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
                     if self.line_db is not None:
-                        nz1 = data.shape[self.pf.vel_axis]
-                        nz2 = g.ActiveDimensions[self.pf.vel_axis]
+                        nz1 = data.shape[self.pf.spec_axis]
+                        nz2 = g.ActiveDimensions[self.pf.spec_axis]
                         if nz1 != nz2:
                             old_data = data.copy()
                             data = np.zeros(g.ActiveDimensions)

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -14,7 +14,6 @@
 from yt.fields.derived_field import ValidateSpatial
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
-from yt.visualization._mpl_imports import FigureCanvasAgg
 import os
 
 def _make_counts(emin, emax):
@@ -130,26 +129,17 @@
             raise NotImplementedError("WCS axes are not implemented for oblique plots.")
         if not hasattr(pw.pf, "wcs_2d"):
             raise NotImplementedError("WCS axes are not implemented for this dataset.")
-        if pw.data_source.axis != pw.pf.vel_axis:
+        if pw.data_source.axis != pw.pf.spec_axis:
             raise NotImplementedError("WCS axes are not implemented for this axis.")
-        self.pf = pw.pf
+        self.plots = {}
         self.pw = pw
-        self.plots = {}
-        self.wcs_axes = []
         for f in pw.plots:
             rect = pw.plots[f]._get_best_layout()[1]
             fig = pw.plots[f].figure
-            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
-            fig.add_axes(ax)
-            self.wcs_axes.append(ax)
-        self._setup_plots()
-
-    def _setup_plots(self):
-        pw = self.pw
-        for f, ax in zip(pw.plots, self.wcs_axes):
-            wcs = ax.wcs.wcs
-            pw.plots[f].axes.get_xaxis().set_visible(False)
-            pw.plots[f].axes.get_yaxis().set_visible(False)
+            ax = fig.axes[0]
+            wcs_ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(wcs_ax)
+            wcs = pw.pf.wcs_2d.wcs
             xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
             yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
             xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
@@ -157,18 +147,18 @@
             ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
                                   wcs.cunit[yax])
             fp = pw._font_properties
-            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
-            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
-            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
-            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
-            ax.coords[0].ticklabels.set_fontproperties(fp)
-            ax.coords[1].ticklabels.set_fontproperties(fp)
-            self.plots[f] = pw.plots[f]
-        self.pw = pw
-        self.pf = self.pw.pf
-
-    def refresh(self):
-        self._setup_plots(self)
+            wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            wcs_ax.coords[0].ticklabels.set_fontproperties(fp)
+            wcs_ax.coords[1].ticklabels.set_fontproperties(fp)
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            wcs_ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            wcs_ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            wcs_ax.coords.frame._update_cache = []
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            self.plots[f] = fig
 
     def keys(self):
         return self.plots.keys()

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/geometry/pps_coordinates.py
--- /dev/null
+++ b/yt/geometry/pps_coordinates.py
@@ -0,0 +1,65 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class PPSCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(PPSCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
+                                   ["Image\ x", "Image\ y", pf.spec_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.spec_axis] = pf.spec_unit
+
+        def _spec_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [self.pf.pixel2spec(pp).v for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.spec_axis] = _spec_axis
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Cartesian fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from .cartesian_coordinates import \
-    CartesianCoordinateHandler
-
-class PPVCoordinateHandler(CartesianCoordinateHandler):
-
-    def __init__(self, pf):
-        super(PPVCoordinateHandler, self).__init__(pf)
-
-        self.axis_name = {}
-        self.axis_id = {}
-
-        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
-                                   ["Image\ x", "Image\ y", pf.vel_name]):
-            lower_ax = "xyz"[axis]
-            upper_ax = lower_ax.upper()
-
-            self.axis_name[axis] = axis_name
-            self.axis_name[lower_ax] = axis_name
-            self.axis_name[upper_ax] = axis_name
-            self.axis_name[axis_name] = axis_name
-
-            self.axis_id[lower_ax] = axis
-            self.axis_id[axis] = axis
-            self.axis_id[axis_name] = axis
-
-        self.default_unit_label = {}
-        self.default_unit_label[pf.lon_axis] = "pixel"
-        self.default_unit_label[pf.lat_axis] = "pixel"
-        self.default_unit_label[pf.vel_axis] = pf.vel_unit
-
-        def _vel_axis(ax, x, y):
-            p = (x,y)[ax]
-            return [(pp.value-self.pf._p0)*self.pf._dz+self.pf._z0
-                    for pp in p]
-
-        self.axis_field = {}
-        self.axis_field[self.pf.vel_axis] = _vel_axis
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
-
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}

diff -r 44cefa9de45d7e862b4b057648bd46aa2c1424a1 -r 4614f5901e02e71b17bd61e028814494bc969427 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -163,7 +163,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian" or pf.geometry == "ppv":
+    if pf.geometry == "cartesian" or pf.geometry == "pps":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -744,7 +744,7 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            # For some plots we may set aspect by hand, such as for PPV data.
+            # For some plots we may set aspect by hand, such as for PPS data.
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
@@ -851,7 +851,7 @@
                 else:
                     ymin, ymax = [float(y) for y in extenty]
                 self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
-
+                self.plots[f].axes.set_aspect("auto")
             for label in (self.plots[f].axes.get_xticklabels() +
                           self.plots[f].axes.get_yticklabels() +
                           [self.plots[f].axes.xaxis.get_offset_text(),


https://bitbucket.org/yt_analysis/yt/commits/38a1a502d5df/
Changeset:   38a1a502d5df
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-15 16:38:56
Summary:     Some stray cells
Affected #:  1 file

diff -r 4614f5901e02e71b17bd61e028814494bc969427 -r 38a1a502d5df896c78d68af1c6eef0935353e5db doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:7048fd20080135148a69c33e08b33d551b43b0462d1fe5298bddeac93f8fa050"
+  "signature": "sha256:af10462a2a656015309ffc74e415bade3910ba7c7ccca5e15cfa98eca7ccadf4"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -82,7 +82,7 @@
      "input": [
       "from yt.frontends.fits.misc import PlotWindowWCS\n",
       "wcs_slc = PlotWindowWCS(slc)\n",
-      "wcs_slc.show()"
+      "wcs_slc[\"intensity\"]"
      ],
      "language": "python",
      "metadata": {},
@@ -459,34 +459,6 @@
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "(ds.domain_right_edge[2].v-ds._p0)*ds._dz + ds._z0"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds._z0"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
     }
    ],
    "metadata": {}


https://bitbucket.org/yt_analysis/yt/commits/a2f87c1d4a0a/
Changeset:   a2f87c1d4a0a
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-15 20:52:14
Summary:     Merge
Affected #:  3 files

diff -r 38a1a502d5df896c78d68af1c6eef0935353e5db -r a2f87c1d4a0af44e6eaea8fb3c07ae33d8187989 yt/utilities/lib/ragged_arrays.pyx
--- /dev/null
+++ b/yt/utilities/lib/ragged_arrays.pyx
@@ -0,0 +1,97 @@
+"""
+Some simple operations for operating on ragged arrays
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef fused numpy_dt:
+    np.float32_t
+    np.float64_t
+    np.int32_t
+    np.int64_t
+
+cdef numpy_dt r_min(numpy_dt a, numpy_dt b):
+    if a < b: return a
+    return b
+
+cdef numpy_dt r_max(numpy_dt a, numpy_dt b):
+    if a > b: return a
+    return b
+
+cdef numpy_dt r_add(numpy_dt a, numpy_dt b):
+    return a + b
+
+cdef numpy_dt r_subtract(numpy_dt a, numpy_dt b):
+    return a - b
+
+cdef numpy_dt r_multiply(numpy_dt a, numpy_dt b):
+    return a * b
+
+ at cython.cdivision(True)
+cdef numpy_dt r_divide(numpy_dt a, numpy_dt b):
+    return a / b
+
+def index_unop(np.ndarray[numpy_dt, ndim=1] values,
+              np.ndarray[np.int64_t, ndim=1] indices,
+              np.ndarray[np.int64_t, ndim=1] sizes,
+              operation):
+    cdef numpy_dt mi, ma
+    if numpy_dt == np.float32_t:
+        dt = "float32"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.float64_t:
+        dt = "float64"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.int32_t:
+        dt = "int32"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    elif numpy_dt == np.int64_t:
+        dt = "int64"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    cdef np.ndarray[numpy_dt] out_values = np.zeros(sizes.size, dtype=dt)
+    cdef numpy_dt (*func)(numpy_dt a, numpy_dt b)
+    # Now we figure out our function.  At present, we only allow addition and
+    # multiplication, because they are commutative and easy to bootstrap.
+    cdef numpy_dt ival, val
+    if operation == "sum":
+        ival = 0
+        func = r_add
+    elif operation == "prod":
+        ival = 1
+        func = r_multiply
+    elif operation == "max":
+        ival = mi
+        func = r_max
+    elif operation == "min":
+        ival = ma
+        func = r_min
+    else:
+        raise NotImplementedError
+    cdef np.int64_t i, j, ind_ind, ind_arr
+    ind_ind = 0
+    for i in range(sizes.size):
+        # Each entry in sizes is the size of the array
+        val = ival
+        for j in range(sizes[i]):
+            ind_arr = indices[ind_ind]
+            val = func(val, values[ind_arr])
+            ind_ind += 1
+        out_values[i] = val
+    return out_values

diff -r 38a1a502d5df896c78d68af1c6eef0935353e5db -r a2f87c1d4a0af44e6eaea8fb3c07ae33d8187989 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -139,6 +139,8 @@
           )
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
+    config.add_extension("ragged_arrays",
+                         ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r 38a1a502d5df896c78d68af1c6eef0935353e5db -r a2f87c1d4a0af44e6eaea8fb3c07ae33d8187989 yt/utilities/lib/tests/test_ragged_arrays.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -0,0 +1,36 @@
+from yt.testing import *
+import numpy as np
+from yt.utilities.lib.ragged_arrays import index_unop
+
+operations = ((np.sum, "sum"),
+              (np.prod, "prod"),
+              (np.max, "max"),
+              (np.min, "min"))
+dtypes = ((-1e8, 1e8, "float32"),
+          (-1e8, 1e8, "float64"),
+          (-10000, 10000, "int32"),
+          (-100000000, 100000000, "int64"))
+
+def test_index_unop():
+    np.random.seed(0x4d3d3d3)
+    indices = np.arange(1000)
+    np.random.shuffle(indices)
+    sizes = np.array([
+        200, 50, 50, 100, 32, 32, 32, 32, 32, 64, 376], dtype="int64")
+    for mi, ma, dtype in dtypes:
+        for op, operation in operations:
+            # Create a random set of values
+            values = np.random.random(1000)
+            if operation != "prod":
+                values = values * ma + (ma - mi)
+            if operation == "prod" and dtype.startswith("int"):
+                values = values.astype(dtype)
+                values[values != 0] = 1
+                values[values == 0] = -1
+            values = values.astype(dtype)
+            out_values = index_unop(values, indices, sizes, operation)
+            i = 0
+            for j, v in enumerate(sizes):
+                arr = values[indices[i:i+v]]
+                yield assert_equal, op(arr), out_values[j]
+                i += v


https://bitbucket.org/yt_analysis/yt/commits/e475a643c855/
Changeset:   e475a643c855
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-16 01:41:35
Summary:     This logic only makes sense for non-oblique plots
Affected #:  1 file

diff -r a2f87c1d4a0af44e6eaea8fb3c07ae33d8187989 -r e475a643c8559a2987c8bac3255078b64323553b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -734,8 +734,6 @@
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
-            xax = self.pf.coordinates.x_axis[axis_index]
-            yax = self.pf.coordinates.y_axis[axis_index]
 
             if self._axes_unit_names is None:
                 unit = get_smallest_appropriate_unit(
@@ -832,26 +830,32 @@
                           r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
                 axis_names = self.pf.coordinates.axis_name
+                xax = self.pf.coordinates.x_axis[axis_index]
+                yax = self.pf.coordinates.y_axis[axis_index]
+
                 if hasattr(self.pf.coordinates, "axis_default_unit_label"):
                     axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
                                         self.pf.coordinates.axis_default_unit_name[yax]]
                 labels = [r'$\rm{'+axis_names[xax]+axes_unit_labels[0] + r'}$',
                           r'$\rm{'+axis_names[yax]+axes_unit_labels[1] + r'}$']
 
+                if hasattr(self.pf.coordinates, "axis_field"):
+                    if xax in self.pf.coordinates.axis_field:
+                        xmin, xmax = self.pf.coordinates.axis_field[xax](0,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        xmin, xmax = [float(x) for x in extentx]
+                    if yax in self.pf.coordinates.axis_field:
+                        ymin, ymax = self.pf.coordinates.axis_field[yax](1,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        ymin, ymax = [float(y) for y in extenty]
+                    self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
+                    self.plots[f].axes.set_aspect("auto")
+
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
-            if hasattr(self.pf.coordinates, "axis_field"):
-                if xax in self.pf.coordinates.axis_field:
-                    xmin, xmax = self.pf.coordinates.axis_field[xax](0, self.xlim, self.ylim)
-                else:
-                    xmin, xmax = [float(x) for x in extentx]
-                if yax in self.pf.coordinates.axis_field:
-                    ymin, ymax = self.pf.coordinates.axis_field[yax](1, self.xlim, self.ylim)
-                else:
-                    ymin, ymax = [float(y) for y in extenty]
-                self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
-                self.plots[f].axes.set_aspect("auto")
             for label in (self.plots[f].axes.get_xticklabels() +
                           self.plots[f].axes.get_yticklabels() +
                           [self.plots[f].axes.xaxis.get_offset_text(),


https://bitbucket.org/yt_analysis/yt/commits/dbc69e197550/
Changeset:   dbc69e197550
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-16 01:42:54
Summary:     Making this an option, but not the default
Affected #:  1 file

diff -r e475a643c8559a2987c8bac3255078b64323553b -r dbc69e197550f91cefe14c9fce207dc8bc19c446 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -314,7 +314,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
-                 spectral_factor = None,
+                 spectral_factor = 1.0,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -621,7 +621,7 @@
                 self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
                 self.domain_dimensions[self.spec_axis] = nz
             else:
-                if self.spectral_factor is None:
+                if self.spectral_factor == "auto":
                     self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
                                                                              self.lat_axis]]))
                     self.spectral_factor /= self.domain_dimensions[self.spec_axis]


https://bitbucket.org/yt_analysis/yt/commits/68f525eb7848/
Changeset:   68f525eb7848
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-16 16:47:57
Summary:     "pps" -> "spectral_cube" and similar
Affected #:  6 files

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,8 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
-from yt.geometry.pps_coordinates import \
-    PPSCoordinateHandler
+from yt.geometry.spec_cube_coordinates import \
+    SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -361,8 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
-        elif self.geometry == "pps":
-            self.coordinates = PPSCoordinateHandler(self)
+        elif self.geometry == "spectral_cube":
+            self.coordinates = SpectralCubeCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -471,7 +471,7 @@
         else:
             beam_size = 1.0
         self.unit_registry.add("beam",beam_size,dimensions=dimensions.solid_angle)
-        if self.pps_data:
+        if self.spec_cube:
             units = self.wcs_2d.wcs.cunit[0]
             if units == "deg": units = "degree"
             if units == "rad": units = "radian"
@@ -546,17 +546,17 @@
         self.reversed = False
 
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
-        self.pps_data = False
+        self.spec_cube = False
         x = 0
         for p in lon_prefixes+lat_prefixes+spec_names.keys():
             y = np_char.startswith(self.axis_names[:self.dimensionality], p)
             x += y.sum()
-        if x == self.dimensionality: self._setup_pps()
+        if x == self.dimensionality: self._setup_spec_cube()
 
-    def _setup_pps(self):
+    def _setup_spec_cube(self):
 
-        self.pps_data = True
-        self.geometry = "pps"
+        self.spec_cube = True
+        self.geometry = "spectral_cube"
 
         end = min(self.dimensionality+1,4)
         if self.events_data:

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -23,7 +23,7 @@
         for field in pf.field_list:
             if field[0] == "fits": self[field].take_log = False
 
-    def _setup_pps_fields(self):
+    def _setup_spec_cube_fields(self):
 
         def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
@@ -51,9 +51,9 @@
 
     def setup_fluid_fields(self):
 
-        if self.pf.pps_data:
+        if self.pf.spec_cube:
             def _pixel(field, data):
                 return data.pf.arr(data["ones"], "pixel")
             self.add_field(("fits","pixel"), function=_pixel, units="pixel")
-            self._setup_pps_fields()
+            self._setup_spec_cube_fields()
             return

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/geometry/pps_coordinates.py
--- a/yt/geometry/pps_coordinates.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Cartesian fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from .cartesian_coordinates import \
-    CartesianCoordinateHandler
-
-class PPSCoordinateHandler(CartesianCoordinateHandler):
-
-    def __init__(self, pf):
-        super(PPSCoordinateHandler, self).__init__(pf)
-
-        self.axis_name = {}
-        self.axis_id = {}
-
-        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
-                                   ["Image\ x", "Image\ y", pf.spec_name]):
-            lower_ax = "xyz"[axis]
-            upper_ax = lower_ax.upper()
-
-            self.axis_name[axis] = axis_name
-            self.axis_name[lower_ax] = axis_name
-            self.axis_name[upper_ax] = axis_name
-            self.axis_name[axis_name] = axis_name
-
-            self.axis_id[lower_ax] = axis
-            self.axis_id[axis] = axis
-            self.axis_id[axis_name] = axis
-
-        self.default_unit_label = {}
-        self.default_unit_label[pf.lon_axis] = "pixel"
-        self.default_unit_label[pf.lat_axis] = "pixel"
-        self.default_unit_label[pf.spec_axis] = pf.spec_unit
-
-        def _spec_axis(ax, x, y):
-            p = (x,y)[ax]
-            return [self.pf.pixel2spec(pp).v for pp in p]
-
-        self.axis_field = {}
-        self.axis_field[self.pf.spec_axis] = _spec_axis
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
-
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/geometry/spec_cube_coordinates.py
--- /dev/null
+++ b/yt/geometry/spec_cube_coordinates.py
@@ -0,0 +1,65 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(SpectralCubeCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
+                                   ["Image\ x", "Image\ y", pf.spec_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.spec_axis] = pf.spec_unit
+
+        def _spec_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [self.pf.pixel2spec(pp).v for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.spec_axis] = _spec_axis
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r dbc69e197550f91cefe14c9fce207dc8bc19c446 -r 68f525eb7848514fd88311daec347477aadc7837 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -163,7 +163,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian" or pf.geometry == "pps":
+    if pf.geometry == "cartesian" or pf.geometry == "spectral_cube":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -742,7 +742,7 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            # For some plots we may set aspect by hand, such as for PPS data.
+            # For some plots we may set aspect by hand, such as for spectral cube data.
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:


https://bitbucket.org/yt_analysis/yt/commits/957a84036116/
Changeset:   957a84036116
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-16 18:09:29
Summary:     Detailing options to pass to load for FITS data
Affected #:  1 file

diff -r 68f525eb7848514fd88311daec347477aadc7837 -r 957a84036116743f067aa55fd799f68078a3751d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -676,8 +676,14 @@
 Additional Options
 ++++++++++++++++++
 
+The following are additional options that may be passed to the ``load`` command when analyzing
+FITS data:
+
+``nan_mask``
+~~~~~~~~~~~~
+
 FITS image data may include ``NaNs``. If you wish to mask this data out,
-you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+you may supply a ``nan_mask`` parameter, which may either be a
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
@@ -689,9 +695,27 @@
    # passing a dict
    ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
+``suppress_astropy_warnings``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
-warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+warnings, set ``suppress_astropy_warnings = False``.
+
+``z_axis_decomp``
+~~~~~~~~~~~~~~~~~
+
+For some applications, decomposing 3D FITS data into grids that span the x-y plane with short
+strides along the z-axis may result in a significant improvement in I/O speed. To enable this feature, set ``z_axis_decomp=True``.
+
+``spectral_factor``
+~~~~~~~~~~~~~~~~~~~
+
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
+scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
+or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
+ axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
+ or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -703,7 +727,6 @@
 
   from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
 
-
 ``setup_counts_fields``
 ~~~~~~~~~~~~~~~~~~~~~~~
 


https://bitbucket.org/yt_analysis/yt/commits/6d9a0731ec13/
Changeset:   6d9a0731ec13
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-18 19:56:20
Summary:     Some more documentation regarding installation and development options
Affected #:  2 files

diff -r 957a84036116743f067aa55fd799f68078a3751d -r 6d9a0731ec1319951116621170692e97bc603ff5 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -111,6 +111,8 @@
 out with them.  In :ref:`code-style-guide` there is a list of handy tips for
 how to structure and write your code.
 
+.. _mercurial-with-yt:
+
 How to Use Mercurial with yt
 ++++++++++++++++++++++++++++
 
@@ -135,6 +137,8 @@
   * If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
     list.
 
+.. _building-yt:
+
 Building yt
 +++++++++++
 

diff -r 957a84036116743f067aa55fd799f68078a3751d -r 6d9a0731ec1319951116621170692e97bc603ff5 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -101,6 +101,8 @@
 Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you
 figure it out.
 
+If you like, this might be a good time :ref:`to run the test suite <testing>`.
+
 .. _updating-yt:
 
 Updating yt and its dependencies
@@ -199,4 +201,14 @@
 ++++++++++++++++++++++++
 
 Installation on Microsoft Windows is only supported for Windows XP Service Pack 3 and
-higher (both 32-bit and 64-bit) using Anaconda.
\ No newline at end of file
+higher (both 32-bit and 64-bit) using Anaconda.
+
+Keeping yt Updated via Mercurial
+++++++++++++++++++++++++++++++++
+
+If you want to maintain your yt installation via updates straight from the Bitbucket repository,
+or if you want to do some development on your own, we suggest you check out some of the
+:ref:`development docs <contributing-code>`, especially the sections on :ref:`Mercurial
+<mercurial-with-yt>`
+and :ref:`building yt from source <building-yt>`.
+


https://bitbucket.org/yt_analysis/yt/commits/5c2d8110d05a/
Changeset:   5c2d8110d05a
Branch:      yt-3.0
User:        jzuhone
Date:        2014-05-20 03:53:29
Summary:     Fixing issue with unintentional side effect of importing from the analysis modules API.
Affected #:  2 files

diff -r 6d9a0731ec1319951116621170692e97bc603ff5 -r 5c2d8110d05a0b7efc6e7194302ac0f2939e30eb doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -209,6 +209,5 @@
 If you want to maintain your yt installation via updates straight from the Bitbucket repository,
 or if you want to do some development on your own, we suggest you check out some of the
 :ref:`development docs <contributing-code>`, especially the sections on :ref:`Mercurial
-<mercurial-with-yt>`
-and :ref:`building yt from source <building-yt>`.
+<mercurial-with-yt>` and :ref:`building yt from source <building-yt>`.
 

diff -r 6d9a0731ec1319951116621170692e97bc603ff5 -r 5c2d8110d05a0b7efc6e7194302ac0f2939e30eb yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -20,7 +20,12 @@
 pyfits = _astropy.pyfits
 pywcs = _astropy.pywcs
 
-class FITSImageBuffer(pyfits.HDUList):
+if pyfits is None:
+    HDUList = object
+else:
+    HDUList = pyfits.HDUList
+
+class FITSImageBuffer(HDUList):
 
     def __init__(self, data, fields=None, units="cm",
                  center=None, scale=None, wcs=None):


https://bitbucket.org/yt_analysis/yt/commits/8ce1487c06c6/
Changeset:   8ce1487c06c6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-20 14:06:48
Summary:     Merged in jzuhone/yt-3.x/yt-3.0 (pull request #905)

FITS frontend refactor, and some docs
Affected #:  15 files

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -36,20 +36,20 @@
 .. code:: python
 
     from yt.mods import *
-    from yt.analysis_modules.api import *
+    from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk", 
-              parameters={"TimeUnits":3.1557e13,
-                          "LengthUnits":3.0856e24,
-                          "DensityUnits":6.770424595218825e-27})
+    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+              parameters={"time_unit":(1.0,"Myr"),
+                          "length_unit":(1.0,"Mpc"),
+                          "mass_unit":(1.0e14,"Msun")}) 
 
 First, to get a sense of what the resulting image will look like, let's
-make a new ``yt`` field called ``"DensitySquared"``, since the X-ray
+make a new ``yt`` field called ``"density_squared"``, since the X-ray
 emission is proportional to :math:`\rho^2`, and a weak function of
 temperature and metallicity.
 
@@ -57,14 +57,14 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("DensitySquared", function=_density_squared)
+    add_field("density_squared", function=_density_squared)
 
 Then we'll project this field along the z-axis.
 
 .. code:: python
 
-    prj = ProjectionPlot(pf, "z", ["DensitySquared"], width=(500., "kpc"))
-    prj.set_cmap("DensitySquared", "gray_r")
+    prj = ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc"))
+    prj.set_cmap("density_squared", "gray_r")
     prj.show()
 
 .. image:: _images/dsquared.png
@@ -89,7 +89,7 @@
 
 .. code:: python
 
-    sp = pf.sphere("c", (250., "kpc"))
+    sp = ds.sphere("c", (250., "kpc"))
 
 This will serve as our ``data_source`` that we will use later. Next, we
 need to create the ``SpectralModel`` instance that will determine how
@@ -258,11 +258,6 @@
     events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
                                      sky_center=(187.5,12.333), responses=[ARF,RMF])
 
-.. parsed-literal::
-
-    WARNING:yt:This routine has not been tested to work with all RMFs. YMMV.
-
-
 Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
 deviation to scatter the photon sky positions around with, providing a
 crude representation of a PSF.
@@ -282,17 +277,17 @@
 
 .. code:: python
 
-    {'eobs': array([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
-             9.73534237,  10.21614256]), 
-     'xsky': array([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
-            187.49897546,  187.47307048]), 
-     'ysky': array([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
-            12.33327653,  12.32955225]), 
-     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]), 
+    {'eobs': YTArray([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
+             9.73534237,  10.21614256]) keV, 
+     'xsky': YTArray([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
+            187.49897546,  187.47307048]) degree, 
+     'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
+            12.33327653,  12.32955225]) degree, 
+     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]) (dimensionless), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633])}
+     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633]) (dimensionless)}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -436,7 +431,7 @@
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
-   pf = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
+   ds = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
 
 where for simplicity we have set the velocities to zero, though we
 could have created a realistic velocity field as well. Now, we
@@ -445,7 +440,7 @@
 
 .. code:: python
 
-   sphere = pf.sphere(pf.domain_center, 1.0/pf["mpc"])
+   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
+  "signature": "sha256:af10462a2a656015309ffc74e415bade3910ba7c7ccca5e15cfa98eca7ccadf4"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -23,7 +23,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-velocity\" cubes of radio data. "
+      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-spectrum\" cubes of radio data. "
      ]
     },
     {
@@ -82,7 +82,7 @@
      "input": [
       "from yt.frontends.fits.misc import PlotWindowWCS\n",
       "wcs_slc = PlotWindowWCS(slc)\n",
-      "wcs_slc.show()"
+      "wcs_slc[\"intensity\"]"
      ],
      "language": "python",
      "metadata": {},
@@ -109,14 +109,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's `spec2pixel` method to determine which pixels to slice on:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print ds.domain_left_edge[2], ds.domain_center[2], ds.domain_right_edge[2]"
+      "import astropy.units as u\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-250000.*u.m/u.s)"
      ],
      "language": "python",
      "metadata": {},
@@ -126,15 +128,33 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now, we'll choose a few values for the velocity within this range:"
+      "Now we can use this new center to create a new slice:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -250000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do this a few more times for different values of the velocity:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center[2] = ds.spec2pixel(-100000.*u.m/u.s)\n",
+      "print new_center[2]\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -146,21 +166,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -100000.\n",
-      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -150000.\n",
+      "new_center[2] = ds.spec2pixel(-150000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -179,14 +185,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also make a projection of all the emission along the line of sight:"
+      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], proj_style=\"sum\", origin=\"native\")\n",
       "prj.show()"
      ],
      "language": "python",
@@ -197,13 +203,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },
@@ -211,8 +210,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\")\n",
       "slc.show()"
      ],
      "language": "python",
@@ -223,8 +221,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\")\n",
       "slc.show()"
      ],
      "language": "python",

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -111,6 +111,8 @@
 out with them.  In :ref:`code-style-guide` there is a list of handy tips for
 how to structure and write your code.
 
+.. _mercurial-with-yt:
+
 How to Use Mercurial with yt
 ++++++++++++++++++++++++++++
 
@@ -135,6 +137,8 @@
   * If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
     list.
 
+.. _building-yt:
+
 Building yt
 +++++++++++
 
@@ -148,19 +152,31 @@
 
 .. code-block:: bash
 
-   python2.7 setup.py develop
+  $ python2.7 setup.py develop
 
 If you have previously "installed" via ``setup.py install`` you have to
 re-install:
 
 .. code-block:: bash
 
-   python2.7 setup.py install
+  $ python2.7 setup.py install
 
-Only one of these two options is needed.  yt may require you to specify the
-location to libpng and hdf5.  This can be done through files named ``png.cfg``
-and ``hdf5.cfg``.  If you are using the installation script, these will already
-exist.
+Only one of these two options is needed.
+
+If you plan to develop yt on Windows, we recommend using the `MinGW <http://www.mingw.org/>`_ gcc
+compiler that can be installed using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the syntax for the
+setup command is slightly different; you must type:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 develop
+
+or
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 install
 
 Making and Sharing Changes
 ++++++++++++++++++++++++++

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -676,8 +676,14 @@
 Additional Options
 ++++++++++++++++++
 
+The following are additional options that may be passed to the ``load`` command when analyzing
+FITS data:
+
+``nan_mask``
+~~~~~~~~~~~~
+
 FITS image data may include ``NaNs``. If you wish to mask this data out,
-you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+you may supply a ``nan_mask`` parameter, which may either be a
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
@@ -689,9 +695,27 @@
    # passing a dict
    ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
+``suppress_astropy_warnings``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
-warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+warnings, set ``suppress_astropy_warnings = False``.
+
+``z_axis_decomp``
+~~~~~~~~~~~~~~~~~
+
+For some applications, decomposing 3D FITS data into grids that span the x-y plane with short
+strides along the z-axis may result in a significant improvement in I/O speed. To enable this feature, set ``z_axis_decomp=True``.
+
+``spectral_factor``
+~~~~~~~~~~~~~~~~~~~
+
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
+scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
+or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
+ axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
+ or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -703,7 +727,6 @@
 
   from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
 
-
 ``setup_counts_fields``
 ~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -14,8 +14,7 @@
 be time-consuming, yt provides an installation script which downloads and builds
 a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
-other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
-supported.
+other Unix-like systems (XSEDE resources, clusters, etc.).
 
 Since the install is fully-isolated, if you get tired of having yt on your 
 system, you can just delete its directory, and yt and all of its dependencies
@@ -83,14 +82,73 @@
 will also need to set ``LD_LIBRARY_PATH`` and ``PYTHONPATH`` to contain 
 ``$YT_DEST/lib`` and ``$YT_DEST/python2.7/site-packages``, respectively.
 
+.. _testing-installation:
+
+Testing Your Installation
+-------------------------
+
+To test to make sure everything is installed properly, try running yt at
+the command line:
+
+.. code-block:: bash
+
+  $ yt --help
+
+If this works, you should get a list of the various command-line options for
+yt, which means you have successfully installed yt.  Congratulations!
+
+If you get an error, follow the instructions it gives you to debug the problem.
+Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you
+figure it out.
+
+If you like, this might be a good time :ref:`to run the test suite <testing>`.
+
+.. _updating-yt:
+
+Updating yt and its dependencies
+--------------------------------
+
+With many active developers, code development sometimes occurs at a furious
+pace in yt.  To make sure you're using the latest version of the code, run
+this command at a command-line:
+
+.. code-block:: bash
+
+  $ yt update
+
+Additionally, if you want to make sure you have the latest dependencies
+associated with yt and update the codebase simultaneously, type this:
+
+.. code-block:: bash
+
+  $ yt update --all
+
+.. _removing-yt:
+
+Removing yt and its dependencies
+--------------------------------
+
+Because yt and its dependencies are installed in an isolated directory when
+you use the script installer, you can easily remove yt and all of its
+dependencies cleanly.  Simply remove the install directory and its
+subdirectories and you're done.  If you *really* had problems with the
+code, this is a last defense for solving: remove and then fully
+:ref:`re-install <installing-yt>` from the install script again.
+
+.. _alternative-installation:
+
 Alternative Installation Methods
 --------------------------------
 
+.. _pip-installation:
+
+Installing yt Using pip or from Source
+++++++++++++++++++++++++++++++++++++++
+
 If you want to forego the use of the install script, you need to make sure you
 have yt's dependencies installed on your system.  These include: a C compiler,
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
-install yt as:
+``HDF5``, ``python``, ``cython``, ``NumPy``, ``matplotlib``, and ``h5py``. From here,
+you can use ``pip`` (which comes with ``Python``) to install yt as:
 
 .. code-block:: bash
 
@@ -110,67 +168,46 @@
 It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
 Please refer to ``setuptools`` documentation for the additional options.
 
-Provided that the required dependencies are in a predictable location, yt should
-be able to find them automatically. However, you can manually specify prefix used
-for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
-``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
-environmental variables respectively, e.g.
+If you choose this installation method, you do not need to run the activation
+script as it is unnecessary.
+
+.. _anaconda-installation:
+
+Installing yt Using Anaconda
+++++++++++++++++++++++++++++
+
+Perhaps the quickest way to get yt up and running is to install it using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_, which will provide you with a
+easy-to-use environment for installing Python packages. To install a bare-bones Python
+installation with yt, first visit http://repo.continuum.io/miniconda/ and download a recent
+version of the ``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
+system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  $ echo '/usr/local' > hdf5.cfg
-  $ export FTYPE_DIR=/opt/freetype
+  $ bash Miniconda-3.3.0-Linux-x86_64.sh
 
-If you choose this installation method, you do not need to run the activation
-script as it is unnecessary.
-
-.. _testing-installation:
-
-Testing Your Installation
--------------------------
-
-To test to make sure everything is installed properly, try running yt at 
-the command line:
+Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  $ yt --help
+  $ conda install yt
 
-If this works, you should get a list of the various command-line options for
-yt, which means you have successfully installed yt.  Congratulations!  
+which will install yt along with all of its dependencies.
 
-If you get an error, follow the instructions it gives you to debug the problem.  
-Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you 
-figure it out.
+.. _windows-installation:
 
-.. _updating-yt:
+Installing yt on Windows
+++++++++++++++++++++++++
 
-Updating yt and its dependencies
---------------------------------
+Installation on Microsoft Windows is only supported for Windows XP Service Pack 3 and
+higher (both 32-bit and 64-bit) using Anaconda.
 
-With many active developers, code development sometimes occurs at a furious 
-pace in yt.  To make sure you're using the latest version of the code, run
-this command at a command-line:
+Keeping yt Updated via Mercurial
+++++++++++++++++++++++++++++++++
 
-.. code-block:: bash
+If you want to maintain your yt installation via updates straight from the Bitbucket repository,
+or if you want to do some development on your own, we suggest you check out some of the
+:ref:`development docs <contributing-code>`, especially the sections on :ref:`Mercurial
+<mercurial-with-yt>` and :ref:`building yt from source <building-yt>`.
 
-  $ yt update
-
-Additionally, if you want to make sure you have the latest dependencies 
-associated with yt and update the codebase simultaneously, type this:
-
-.. code-block:: bash
-
-  $ yt update --all
-
-.. _removing-yt:
-
-Removing yt and its dependencies
---------------------------------
-
-Because yt and its dependencies are installed in an isolated directory when
-you use the script installer, you can easily remove yt and all of its 
-dependencies cleanly.  Simply remove the install directory and its 
-subdirectories and you're done.  If you *really* had problems with the
-code, this is a last defense for solving: remove and then fully
-:ref:`re-install <installing-yt>` from the install script again.

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,8 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
-from yt.geometry.ppv_coordinates import \
-    PPVCoordinateHandler
+from yt.geometry.spec_cube_coordinates import \
+    SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -361,8 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
-        elif self.geometry == "ppv":
-            self.coordinates = PPVCoordinateHandler(self)
+        elif self.geometry == "spectral_cube":
+            self.coordinates = SpectralCubeCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -44,11 +44,15 @@
 
 lon_prefixes = ["X","RA","GLON"]
 lat_prefixes = ["Y","DEC","GLAT"]
-vel_prefixes = ["V","ENER","FREQ","WAV"]
 delimiters = ["*", "/", "-", "^"]
 delimiters += [str(i) for i in xrange(10)]
 regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
+spec_names = {"V":"Velocity",
+              "FREQ":"Frequency",
+              "ENER":"Energy",
+              "WAV":"Wavelength"}
+
 field_from_unit = {"Jy":"intensity",
                    "K":"temperature"}
 
@@ -136,6 +140,7 @@
         self._file_map = {}
         self._ext_map = {}
         self._scale_map = {}
+        dup_field_index = {}
         # Since FITS header keywords are case-insensitive, we only pick a subset of
         # prefixes, ones that we expect to end up in headers.
         known_units = dict([(unit.lower(),unit) for unit in self.pf.unit_registry.lut])
@@ -162,13 +167,19 @@
                         if fname is None: fname = "image_%d" % (j)
                     if self.pf.num_files > 1 and fname.startswith("image"):
                         fname += "_file_%d" % (i)
+                    if ("fits", fname) in self.field_list:
+                        if fname in dup_field_index:
+                            dup_field_index[fname] += 1
+                        else:
+                            dup_field_index[fname] = 1
+                        mylog.warning("This field has the same name as a previously loaded " +
+                                      "field. Changing the name from %s to %s_%d. To avoid " %
+                                      (fname, fname, dup_field_index[fname]) +
+                                      " this, change one of the BTYPE header keywords.")
+                        fname += "_%d" % (dup_field_index[fname])
                     for k in xrange(naxis4):
                         if naxis4 > 1:
                             fname += "_%s_%d" % (hdu.header["CTYPE4"], k+1)
-                        if fname in self.field_list:
-                            mylog.error("You have two fields with the same name. Change one of " +
-                                        "the names in the BTYPE header keyword to distinguish " +
-                                        "them.")
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j
@@ -210,7 +221,7 @@
         # If nprocs > 1, decompose the domain into virtual grids
         if self.num_grids > 1:
             if self.pf.z_axis_decomp:
-                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                dz = pf.quan(1.0, "code_length")*pf.spectral_factor
                 self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
                                                             self.num_grids).astype("int")
                 self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
@@ -227,7 +238,7 @@
                 dims = np.array(pf.domain_dimensions)
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    dims[pf.vel_axis] = 1
+                    dims[pf.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.pf.arr(gle, "code_length")
@@ -235,9 +246,9 @@
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
+                    self.grid_left_edge[:,pf.spec_axis] = pf.domain_left_edge[pf.spec_axis]
+                    self.grid_right_edge[:,pf.spec_axis] = pf.domain_right_edge[pf.spec_axis]
+                    self.grid_dimensions[:,pf.spec_axis] = pf.domain_dimensions[pf.spec_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
@@ -303,6 +314,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 spectral_factor = 1.0,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -315,10 +327,13 @@
         self.specified_parameters = parameters
 
         self.z_axis_decomp = z_axis_decomp
+        self.spectral_factor = spectral_factor
 
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
+            mylog.info("For line folding, spectral_factor = 1.0")
+            self.spectral_factor = 1.0
         else:
             self.line_width = None
 
@@ -356,8 +371,8 @@
                 else:
                     fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                 f = _astropy.pyfits.open(fn, memmap=True,
-                                   do_not_scale_image_data=True,
-                                   ignore_blank=True)
+                                         do_not_scale_image_data=True,
+                                         ignore_blank=True)
                 self._fits_files.append(f)
 
         if len(self._handle) > 1 and self._handle[1].name == "EVENTS":
@@ -399,13 +414,23 @@
             self.events_data = False
             self.first_image = 0
             self.primary_header = self._handle[self.first_image].header
-            self.wcs = _astropy.pywcs.WCS(header=self.primary_header)
             self.naxis = self.primary_header["naxis"]
             self.axis_names = [self.primary_header["ctype%d" % (i+1)]
                                for i in xrange(self.naxis)]
             self.dims = [self.primary_header["naxis%d" % (i+1)]
                          for i in xrange(self.naxis)]
 
+            wcs = _astropy.pywcs.WCS(header=self.primary_header)
+            if self.naxis == 4:
+                self.wcs = _astropy.pywcs.WCS(naxis=3)
+                self.wcs.wcs.crpix = wcs.wcs.crpix[:3]
+                self.wcs.wcs.cdelt = wcs.wcs.cdelt[:3]
+                self.wcs.wcs.crval = wcs.wcs.crval[:3]
+                self.wcs.wcs.cunit = [str(unit) for unit in wcs.wcs.cunit][:3]
+                self.wcs.wcs.ctype = [type for type in wcs.wcs.ctype][:3]
+            else:
+                self.wcs = wcs
+
         self.refine_by = 2
 
         Dataset.__init__(self, fn, dataset_type)
@@ -441,11 +466,12 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         if "beam_size" in self.specified_parameters:
+            beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value
         else:
             beam_size = 1.0
         self.unit_registry.add("beam",beam_size,dimensions=dimensions.solid_angle)
-        if self.ppv_data:
+        if self.spec_cube:
             units = self.wcs_2d.wcs.cunit[0]
             if units == "deg": units = "degree"
             if units == "rad": units = "radian"
@@ -520,17 +546,17 @@
         self.reversed = False
 
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
-        self.ppv_data = False
+        self.spec_cube = False
         x = 0
-        for p in lon_prefixes+lat_prefixes+vel_prefixes:
+        for p in lon_prefixes+lat_prefixes+spec_names.keys():
             y = np_char.startswith(self.axis_names[:self.dimensionality], p)
             x += y.sum()
-        if x == self.dimensionality: self._setup_ppv()
+        if x == self.dimensionality: self._setup_spec_cube()
 
-    def _setup_ppv(self):
+    def _setup_spec_cube(self):
 
-        self.ppv_data = True
-        self.geometry = "ppv"
+        self.spec_cube = True
+        self.geometry = "spectral_cube"
 
         end = min(self.dimensionality+1,4)
         if self.events_data:
@@ -556,11 +582,11 @@
 
         if self.wcs.naxis > 2:
 
-            self.vel_axis = np.zeros((end-1), dtype="bool")
-            for p in vel_prefixes:
-                self.vel_axis += np_char.startswith(ctypes, p)
-            self.vel_axis = np.where(self.vel_axis)[0][0]
-            self.vel_name = ctypes[self.vel_axis].split("-")[0].lower()
+            self.spec_axis = np.zeros((end-1), dtype="bool")
+            for p in spec_names.keys():
+                self.spec_axis += np_char.startswith(ctypes, p)
+            self.spec_axis = np.where(self.spec_axis)[0][0]
+            self.spec_name = spec_names[ctypes[self.spec_axis].split("-")[0][0]]
 
             self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
             self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.lon_axis, self.lat_axis]]
@@ -571,41 +597,60 @@
             self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.lon_axis],
                                      self.wcs.wcs.ctype[self.lat_axis]]
 
-            x0 = self.wcs.wcs.crpix[self.vel_axis]
-            dz = self.wcs.wcs.cdelt[self.vel_axis]
-            z0 = self.wcs.wcs.crval[self.vel_axis]
-            self.vel_unit = str(self.wcs.wcs.cunit[self.vel_axis])
-
-            if dz < 0.0:
-                self.reversed = True
-                le = self.dims[self.vel_axis]+0.5
-                re = 0.5
-            else:
-                le = 0.5
-                re = self.dims[self.vel_axis]+0.5
-            self.domain_left_edge[self.vel_axis] = (le-x0)*dz + z0
-            self.domain_right_edge[self.vel_axis] = (re-x0)*dz + z0
-            if self.reversed: dz *= -1
+            self._p0 = self.wcs.wcs.crpix[self.spec_axis]
+            self._dz = self.wcs.wcs.cdelt[self.spec_axis]
+            self._z0 = self.wcs.wcs.crval[self.spec_axis]
+            self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
             if self.line_width is not None:
-                self.line_width = self.line_width.in_units(self.vel_unit)
-                self.freq_begin = self.domain_left_edge[self.vel_axis]
-                nz = np.rint(self.line_width.value/dz).astype("int")
-                self.line_width = dz*nz
-                self.domain_left_edge[self.vel_axis] = -self.line_width/2.
-                self.domain_right_edge[self.vel_axis] = self.line_width/2.
-                self.domain_dimensions[self.vel_axis] = nz
-
+                if self._dz < 0.0:
+                    self.reversed = True
+                    le = self.dims[self.spec_axis]+0.5
+                else:
+                    le = 0.5
+                self.line_width = self.line_width.in_units(self.spec_unit)
+                self.freq_begin = (le-self._p0)*self._dz + self._z0
+                # We now reset these so that they are consistent
+                # with the new setup
+                self._dz = np.abs(self._dz)
+                self._p0 = 0.0
+                self._z0 = 0.0
+                nz = np.rint(self.line_width.value/self._dz).astype("int")
+                self.line_width = self._dz*nz
+                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
+                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
+                self.domain_dimensions[self.spec_axis] = nz
+            else:
+                if self.spectral_factor == "auto":
+                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                             self.lat_axis]]))
+                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                        self.spectral_factor*Dz
+                self._dz /= self.spectral_factor
+                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
         else:
 
             self.wcs_2d = self.wcs
-            self.vel_axis = 2
-            self.vel_name = "z"
-            self.vel_unit = "code length"
+            self.spec_axis = 2
+            self.spec_name = "z"
+            self.spec_unit = "code length"
+
+    def spec2pixel(self, spec_value):
+        sv = self.arr(spec_value).in_units(self.spec_unit)
+        return self.arr((sv.v-self._z0)/self._dz+self._p0,
+                        "code_length")
+
+    def pixel2spec(self, pixel_value):
+        pv = self.arr(pixel_value, "code_length")
+        return self.arr((pv.v-self._p0)*self._dz+self._z0,
+                        self.spec_unit)
 
     def __del__(self):
-        for file in self._fits_files:
-            file.close()
+        for f in self._fits_files:
+            f.close()
             del file
         self._handle.close()
         del self._handle

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -23,7 +23,7 @@
         for field in pf.field_list:
             if field[0] == "fits": self[field].take_log = False
 
-    def _setup_ppv_fields(self):
+    def _setup_spec_cube_fields(self):
 
         def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
@@ -42,17 +42,18 @@
             self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
 
         if self.pf.dimensionality == 3:
-            def _vel_los(field, data):
-                axis = "xyz"[data.pf.vel_axis]
-                return data.pf.arr(data[axis].ndarray_view(),data.pf.vel_unit)
-            self.add_field(("fits",self.pf.vel_name), function=_vel_los,
-                           units=self.pf.vel_unit)
+            def _spec(field, data):
+                axis = "xyz"[data.pf.spec_axis]
+                sp = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
+                return data.pf.arr(sp, data.pf.spec_unit)
+            self.add_field(("fits","spectral"), function=_spec,
+                           units=self.pf.spec_unit, display_name=self.pf.spec_name)
 
     def setup_fluid_fields(self):
 
-        if self.pf.ppv_data:
+        if self.pf.spec_cube:
             def _pixel(field, data):
                 return data.pf.arr(data["ones"], "pixel")
             self.add_field(("fits","pixel"), function=_pixel, units="pixel")
-            self._setup_ppv_fields()
+            self._setup_spec_cube_fields()
             return

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,8 +26,10 @@
         self._handle = pf._handle
         if self.pf.line_width is not None:
             self.line_db = self.pf.line_database
+            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.spec_axis]
         else:
             self.line_db = None
+            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -90,19 +92,19 @@
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
+                        my_off = self.line_db.get(fname).in_units(self.pf.spec_unit).value
                         my_off = my_off - 0.5*self.pf.line_width
-                        my_off = int((my_off-self.pf.freq_begin)/dx[self.pf.vel_axis].value)
+                        my_off = int((my_off-self.pf.freq_begin)/self.dz)
                         my_off = max(my_off, 0)
-                        my_off = min(my_off, self.pf.dims[self.pf.vel_axis]-1)
-                        start[self.pf.vel_axis] += my_off
-                        end[self.pf.vel_axis] += my_off
+                        my_off = min(my_off, self.pf.dims[self.pf.spec_axis]-1)
+                        start[self.pf.spec_axis] += my_off
+                        end[self.pf.spec_axis] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in xrange(3)]
                     if self.pf.reversed:
-                        new_start = self.pf.dims[self.pf.vel_axis]-1-start[self.pf.vel_axis]
-                        new_end = max(self.pf.dims[self.pf.vel_axis]-1-end[self.pf.vel_axis],0)
-                        slices[self.pf.vel_axis] = slice(new_start,new_end,-1)
+                        new_start = self.pf.dims[self.pf.spec_axis]-1-start[self.pf.spec_axis]
+                        new_end = max(self.pf.dims[self.pf.spec_axis]-1-end[self.pf.spec_axis],0)
+                        slices[self.pf.spec_axis] = slice(new_start,new_end,-1)
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -114,8 +116,8 @@
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
                     if self.line_db is not None:
-                        nz1 = data.shape[self.pf.vel_axis]
-                        nz2 = g.ActiveDimensions[self.pf.vel_axis]
+                        nz1 = data.shape[self.pf.spec_axis]
+                        nz2 = g.ActiveDimensions[self.pf.spec_axis]
                         if nz1 != nz2:
                             old_data = data.copy()
                             data = np.zeros(g.ActiveDimensions)

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -14,7 +14,6 @@
 from yt.fields.derived_field import ValidateSpatial
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
-from yt.visualization._mpl_imports import FigureCanvasAgg
 import os
 
 def _make_counts(emin, emax):
@@ -130,26 +129,17 @@
             raise NotImplementedError("WCS axes are not implemented for oblique plots.")
         if not hasattr(pw.pf, "wcs_2d"):
             raise NotImplementedError("WCS axes are not implemented for this dataset.")
-        if pw.data_source.axis != pw.pf.vel_axis:
+        if pw.data_source.axis != pw.pf.spec_axis:
             raise NotImplementedError("WCS axes are not implemented for this axis.")
-        self.pf = pw.pf
+        self.plots = {}
         self.pw = pw
-        self.plots = {}
-        self.wcs_axes = []
         for f in pw.plots:
             rect = pw.plots[f]._get_best_layout()[1]
             fig = pw.plots[f].figure
-            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
-            fig.add_axes(ax)
-            self.wcs_axes.append(ax)
-        self._setup_plots()
-
-    def _setup_plots(self):
-        pw = self.pw
-        for f, ax in zip(pw.plots, self.wcs_axes):
-            wcs = ax.wcs.wcs
-            pw.plots[f].axes.get_xaxis().set_visible(False)
-            pw.plots[f].axes.get_yaxis().set_visible(False)
+            ax = fig.axes[0]
+            wcs_ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(wcs_ax)
+            wcs = pw.pf.wcs_2d.wcs
             xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
             yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
             xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
@@ -157,18 +147,18 @@
             ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
                                   wcs.cunit[yax])
             fp = pw._font_properties
-            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
-            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
-            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
-            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
-            ax.coords[0].ticklabels.set_fontproperties(fp)
-            ax.coords[1].ticklabels.set_fontproperties(fp)
-            self.plots[f] = pw.plots[f]
-        self.pw = pw
-        self.pf = self.pw.pf
-
-    def refresh(self):
-        self._setup_plots(self)
+            wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            wcs_ax.coords[0].ticklabels.set_fontproperties(fp)
+            wcs_ax.coords[1].ticklabels.set_fontproperties(fp)
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            wcs_ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            wcs_ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            wcs_ax.coords.frame._update_cache = []
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            self.plots[f] = fig
 
     def keys(self):
         return self.plots.keys()

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Cartesian fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from .cartesian_coordinates import \
-    CartesianCoordinateHandler
-
-class PPVCoordinateHandler(CartesianCoordinateHandler):
-
-    def __init__(self, pf):
-        super(PPVCoordinateHandler, self).__init__(pf)
-
-        self.axis_name = {}
-        self.axis_id = {}
-
-        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
-                                   ["Image\ x", "Image\ y", pf.vel_name]):
-            lower_ax = "xyz"[axis]
-            upper_ax = lower_ax.upper()
-
-            self.axis_name[axis] = axis_name
-            self.axis_name[lower_ax] = axis_name
-            self.axis_name[upper_ax] = axis_name
-            self.axis_name[axis_name] = axis_name
-
-            self.axis_id[lower_ax] = axis
-            self.axis_id[axis] = axis
-            self.axis_id[axis_name] = axis
-
-        self.default_unit_label = {}
-        self.default_unit_label[pf.lon_axis] = "pixel"
-        self.default_unit_label[pf.lat_axis] = "pixel"
-        self.default_unit_label[pf.vel_axis] = pf.vel_unit
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
-
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/geometry/spec_cube_coordinates.py
--- /dev/null
+++ b/yt/geometry/spec_cube_coordinates.py
@@ -0,0 +1,65 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(SpectralCubeCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
+                                   ["Image\ x", "Image\ y", pf.spec_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.spec_axis] = pf.spec_unit
+
+        def _spec_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [self.pf.pixel2spec(pp).v for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.spec_axis] = _spec_axis
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -20,7 +20,12 @@
 pyfits = _astropy.pyfits
 pywcs = _astropy.pywcs
 
-class FITSImageBuffer(pyfits.HDUList):
+if pyfits is None:
+    HDUList = object
+else:
+    HDUList = pyfits.HDUList
+
+class FITSImageBuffer(HDUList):
 
     def __init__(self, data, fields=None, units="cm",
                  center=None, scale=None, wcs=None):

diff -r 78d4ad6d25422503bf323322ab2ff8609c8da6ae -r 8ce1487c06c6efc8041f9c8bd4e6f80b5782a08d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -163,7 +163,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian" or pf.geometry == "ppv":
+    if pf.geometry == "cartesian" or pf.geometry == "spectral_cube":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -742,7 +742,7 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            # For some plots we may set aspect by hand, such as for PPV data.
+            # For some plots we may set aspect by hand, such as for spectral cube data.
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
@@ -832,12 +832,27 @@
                 axis_names = self.pf.coordinates.axis_name
                 xax = self.pf.coordinates.x_axis[axis_index]
                 yax = self.pf.coordinates.y_axis[axis_index]
+
                 if hasattr(self.pf.coordinates, "axis_default_unit_label"):
                     axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
                                         self.pf.coordinates.axis_default_unit_name[yax]]
                 labels = [r'$\rm{'+axis_names[xax]+axes_unit_labels[0] + r'}$',
                           r'$\rm{'+axis_names[yax]+axes_unit_labels[1] + r'}$']
 
+                if hasattr(self.pf.coordinates, "axis_field"):
+                    if xax in self.pf.coordinates.axis_field:
+                        xmin, xmax = self.pf.coordinates.axis_field[xax](0,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        xmin, xmax = [float(x) for x in extentx]
+                    if yax in self.pf.coordinates.axis_field:
+                        ymin, ymax = self.pf.coordinates.axis_field[yax](1,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        ymin, ymax = [float(y) for y in extenty]
+                    self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
+                    self.plots[f].axes.set_aspect("auto")
+
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list