[yt-svn] commit/yt: 11 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri May 29 12:20:34 PDT 2015


11 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2ed2bb41be42/
Changeset:   2ed2bb41be42
Branch:      yt
User:        jzuhone
Date:        2015-05-13 22:35:24+00:00
Summary:     Remove line database stuff from the FITS frontend and fix a couple of issues
Affected #:  2 files

diff -r 4b4b61882407d5a5c909647b24a0147f67eee5ac -r 2ed2bb41be42cbde0c8fde6d553ced22f7161da7 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -165,7 +165,7 @@
                     units = self._determine_image_units(hdu.header, known_units)
                     try:
                         # Grab field name from btype
-                        fname = hdu.header["btype"].lower()
+                        fname = hdu.header["btype"]
                     except KeyError:
                         # Try to guess the name from the units
                         fname = self._guess_name_from_units(units)
@@ -205,18 +205,6 @@
                                   "the same dimensions as the primary and will not be " +
                                   "available as a field.")
 
-        # For line fields, we still read the primary field. Not sure how to extend this
-        # For now, we pick off the first field from the field list.
-        line_db = self.dataset.line_database
-        primary_fname = self.field_list[0][1]
-        for k, v in iteritems(line_db):
-            mylog.info("Adding line field: %s at frequency %g GHz" % (k, v))
-            self.field_list.append((self.dataset_type, k))
-            self._ext_map[k] = self._ext_map[primary_fname]
-            self._axis_map[k] = self._axis_map[primary_fname]
-            self._file_map[k] = self._file_map[primary_fname]
-            self.dataset.field_units[k] = self.dataset.field_units[primary_fname]
-
     def _count_grids(self):
         self.num_grids = self.ds.parameters["nprocs"]
 
@@ -242,19 +230,11 @@
                 bbox = np.array([[le,re] for le, re in zip(ds.domain_left_edge,
                                                            ds.domain_right_edge)])
                 dims = np.array(ds.domain_dimensions)
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    dims[ds.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.ds.arr(gle, "code_length")
                 self.grid_right_edge = self.ds.arr(gre, "code_length")
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    self.grid_left_edge[:,ds.spec_axis] = ds.domain_left_edge[ds.spec_axis]
-                    self.grid_right_edge[:,ds.spec_axis] = ds.domain_right_edge[ds.spec_axis]
-                    self.grid_dimensions[:,ds.spec_axis] = ds.domain_dimensions[ds.spec_axis]
         else:
             self.grid_left_edge[0,:] = ds.domain_left_edge
             self.grid_right_edge[0,:] = ds.domain_right_edge
@@ -322,8 +302,6 @@
                  nan_mask=None,
                  spectral_factor=1.0,
                  z_axis_decomp=False,
-                 line_database=None,
-                 line_width=None,
                  suppress_astropy_warnings=True,
                  parameters=None,
                  units_override=None):
@@ -336,19 +314,6 @@
         self.z_axis_decomp = z_axis_decomp
         self.spectral_factor = spectral_factor
 
-        if line_width is not None:
-            self.line_width = YTQuantity(line_width[0], line_width[1])
-            self.line_units = line_width[1]
-            mylog.info("For line folding, spectral_factor = 1.0")
-            self.spectral_factor = 1.0
-        else:
-            self.line_width = None
-
-        self.line_database = {}
-        if line_database is not None:
-            for k in line_database:
-                self.line_database[k] = YTQuantity(line_database[k], self.line_units)
-
         if suppress_astropy_warnings:
             warnings.filterwarnings('ignore', module="astropy", append=True)
         auxiliary_files = ensure_list(auxiliary_files)
@@ -540,20 +505,14 @@
 
         # If nprocs is None, do some automatic decomposition of the domain
         if self.specified_parameters["nprocs"] is None:
-            if len(self.line_database) > 0:
-                dims = 2
-            else:
-                dims = self.dimensionality
             if self.z_axis_decomp:
                 nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
             else:
-                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**self.dimensionality).astype("int")
             self.parameters["nprocs"] = max(min(nprocs, 512), 1)
         else:
             self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
-        self.reversed = False
-
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
         self.spec_cube = False
         x = 0
@@ -618,41 +577,23 @@
             self._z0 = self.wcs.wcs.crval[self.spec_axis]
             self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
-            if self.line_width is not None:
-                if self._dz < 0.0:
-                    self.reversed = True
-                    le = self.dims[self.spec_axis]+0.5
-                else:
-                    le = 0.5
-                self.line_width = self.line_width.in_units(self.spec_unit)
-                self.freq_begin = (le-self._p0)*self._dz + self._z0
-                # We now reset these so that they are consistent
-                # with the new setup
-                self._dz = np.abs(self._dz)
-                self._p0 = 0.0
-                self._z0 = 0.0
-                nz = np.rint(self.line_width.value/self._dz).astype("int")
-                self.line_width = self._dz*nz
-                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
-                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
-                self.domain_dimensions[self.spec_axis] = nz
-            else:
-                if self.spectral_factor == "auto":
-                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
-                                                                             self.lat_axis]]))
-                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
-                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
-                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
-                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
-                                                        self.spectral_factor*Dz
-                self._dz /= self.spectral_factor
-                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            if self.spectral_factor == "auto":
+                self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                         self.lat_axis]]))
+                self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+            Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+            self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                     self.spectral_factor*Dz
+            self._dz /= self.spectral_factor
+            self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            
         else:
 
             self.wcs_2d = self.wcs
             self.spec_axis = 2
             self.spec_name = "z"
-            self.spec_unit = "code length"
+            self.spec_unit = "code_length"
 
     def spec2pixel(self, spec_value):
         sv = self.arr(spec_value).in_units(self.spec_unit)

diff -r 4b4b61882407d5a5c909647b24a0147f67eee5ac -r 2ed2bb41be42cbde0c8fde6d553ced22f7161da7 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -24,12 +24,6 @@
         super(IOHandlerFITS, self).__init__(ds)
         self.ds = ds
         self._handle = ds._handle
-        if self.ds.line_width is not None:
-            self.line_db = self.ds.line_database
-            self.dz = self.ds.line_width/self.domain_dimensions[self.ds.spec_axis]
-        else:
-            self.line_db = None
-            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -79,32 +73,15 @@
         dx = self.ds.domain_width/self.ds.domain_dimensions
         for field in fields:
             ftype, fname = field
-            tmp_fname = fname
-            if fname in self.ds.line_database:
-                fname = self.ds.field_list[0][1]
             f = self.ds.index._file_map[fname]
             ds = f[self.ds.index._ext_map[fname]]
             bzero, bscale = self.ds.index._scale_map[fname]
-            fname = tmp_fname
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
-                    if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.ds.spec_unit).value
-                        my_off = my_off - 0.5*self.ds.line_width
-                        my_off = int((my_off-self.ds.freq_begin)/self.dz)
-                        my_off = max(my_off, 0)
-                        my_off = min(my_off, self.ds.dims[self.ds.spec_axis]-1)
-                        start[self.ds.spec_axis] += my_off
-                        end[self.ds.spec_axis] += my_off
-                        mylog.debug("Reading from " + str(start) + str(end))
-                    slices = [slice(start[i],end[i]) for i in range(3)]
-                    if self.ds.reversed:
-                        new_start = self.ds.dims[self.ds.spec_axis]-1-start[self.ds.spec_axis]
-                        new_end = max(self.ds.dims[self.ds.spec_axis]-1-end[self.ds.spec_axis],0)
-                        slices[self.ds.spec_axis] = slice(new_start,new_end,-1)
+                    slices = [slice(start[i],end[i]) for i in xrange(3)]
                     if self.ds.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -115,13 +92,6 @@
                         data = ds.data[idx,slices[2],slices[1],slices[0]].transpose()
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
-                    if self.line_db is not None:
-                        nz1 = data.shape[self.ds.spec_axis]
-                        nz2 = g.ActiveDimensions[self.ds.spec_axis]
-                        if nz1 != nz2:
-                            old_data = data.copy()
-                            data = np.zeros(g.ActiveDimensions)
-                            data[:,:,nz2-nz1:] = old_data
                     if fname in self.ds.nan_mask:
                         data[np.isnan(data)] = self.ds.nan_mask[fname]
                     elif "all" in self.ds.nan_mask:


https://bitbucket.org/yt_analysis/yt/commits/173906d69172/
Changeset:   173906d69172
Branch:      yt
User:        jzuhone
Date:        2015-05-13 22:40:48+00:00
Summary:     Python 3 fix
Affected #:  1 file

diff -r 2ed2bb41be42cbde0c8fde6d553ced22f7161da7 -r 173906d69172d2060a1fe8059d1e590928771416 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -81,7 +81,7 @@
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
-                    slices = [slice(start[i],end[i]) for i in xrange(3)]
+                    slices = [slice(start[i],end[i]) for i in range(3)]
                     if self.ds.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1


https://bitbucket.org/yt_analysis/yt/commits/5faa5a816939/
Changeset:   5faa5a816939
Branch:      yt
User:        jzuhone
Date:        2015-05-18 15:24:49+00:00
Summary:     Add a function to replace the line fields functionality that was stripped out of the FITS frontend.
Affected #:  1 file

diff -r 173906d69172d2060a1fe8059d1e590928771416 -r 5faa5a816939b90bb770ad6f1ccb1d2f8ee3171a yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,6 +17,8 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
+from yt.units.yt_array import YTQuantity
+from yt.utilities.fits_image import FITSImageBuffer
 
 import os
 
@@ -68,6 +70,66 @@
                      validators = [ValidateSpatial()],
                      display_name="Counts (%s-%s keV)" % (emin, emax))
 
+def create_line_fields_dataset(filename, line_centers, spectral_width,
+                               output_filename=None, clobber=False):
+    r"""
+    Given a dictionary of spectral line centers and a width in
+    spectral units, extract data from a spectral cube at these line
+    centers and write a new FITS file containing the different lines
+    as separate FITS images, which can be read into yt as different
+    fields. 
+
+    Requires the SpectralCube (http://spectral-cube.readthedocs.org) 
+    library.
+
+    Parameters
+    ----------
+    filename : string
+        The spectral cube FITS file to extract the line fields from.
+    line_centers : dict of (float, string) tuples or YTQuantities
+        The centers of particular lines, where the keys are the names 
+        of the lines and the values are (float, string) tuples or 
+        YTQuantities, specifying a value for each center and its unit. 
+    spectral_width : YTQuantity or (float, string) tuple
+        The width along the spectral axis to extract around the line.
+    output_filename : string, optional
+        The name of the new file to write. If not specified a new 
+        filename will be constructed from the input one.
+    clobber : boolean, optional
+        Whether or not to overwrite an existing file. Default False.
+    """
+    from spectral_cube import SpectralCube
+    cube = SpectralCube.read(filename)
+    if not isinstance(spectral_width, YTQuantity):
+        line_width = YTQuantity(spectral_width[0], spectral_width[1])
+    line_data = {}
+    for k, v in line_centers.items():
+        if not isinstance(v, YTQuantity):
+            line_center = YTQuantity(v[0], v[1])
+        else:
+            line_center = v
+        mylog.info("Adding line field: %s at frequency %g %s" %
+                   (k, line_center.v, line_center.units))
+        line_lo = (line_center-0.5*line_width).to_astropy()
+        line_hi = (line_center+0.5*line_width).to_astropy()
+        subcube = cube.spectral_slab(line_lo, line_hi)
+        line_data[k] = subcube[:,:,:]
+    width = cube.header["naxis3"]*cube.header["cdelt3"]
+    w = subcube.wcs.copy()
+    w.wcs.crpix[-1] = 0.5
+    w.wcs.crval[-1] = -0.5*width
+    fid = FITSImageBuffer(line_data, wcs=w)
+    if output_filename is None:
+        if filename.lower().endswith(".fits.gz"):
+            new_fn = filename[:-8]
+        elif filename.lower().endswith(".fits"):
+            new_fn = filename[:-5]
+        new_fn += "_lines.fits"
+    else:
+        new_fn = output_filename
+    fid.writeto(new_fn, clobber=clobber)
+    return new_fn
+
 def ds9_region(ds, reg, obj=None, field_parameters=None):
     r"""
     Create a data container from a ds9 region file. Requires the pyregion


https://bitbucket.org/yt_analysis/yt/commits/624a50b0906d/
Changeset:   624a50b0906d
Branch:      yt
User:        jzuhone
Date:        2015-05-18 15:31:24+00:00
Summary:     Adding an example
Affected #:  1 file

diff -r 5faa5a816939b90bb770ad6f1ccb1d2f8ee3171a -r 624a50b0906dc1eb1cf1e3d42432bd8bc04d379a yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -77,9 +77,9 @@
     spectral units, extract data from a spectral cube at these line
     centers and write a new FITS file containing the different lines
     as separate FITS images, which can be read into yt as different
-    fields. 
+    fields.
 
-    Requires the SpectralCube (http://spectral-cube.readthedocs.org) 
+    Requires the SpectralCube (http://spectral-cube.readthedocs.org)
     library.
 
     Parameters
@@ -87,16 +87,27 @@
     filename : string
         The spectral cube FITS file to extract the line fields from.
     line_centers : dict of (float, string) tuples or YTQuantities
-        The centers of particular lines, where the keys are the names 
-        of the lines and the values are (float, string) tuples or 
-        YTQuantities, specifying a value for each center and its unit. 
+        The centers of particular lines, where the keys are the names
+        of the lines and the values are (float, string) tuples or
+        YTQuantities, specifying a value for each center and its unit.
     spectral_width : YTQuantity or (float, string) tuple
         The width along the spectral axis to extract around the line.
     output_filename : string, optional
-        The name of the new file to write. If not specified a new 
+        The name of the new file to write. If not specified a new
         filename will be constructed from the input one.
     clobber : boolean, optional
         Whether or not to overwrite an existing file. Default False.
+
+    Examples
+    --------
+    >>> line_centers = {'13CN': (218.03117, 'GHz'),
+    ...                 'CH3CH2CHO': (218.284256, 'GHz'),
+    ...                 'CH3NH2': (218.40956, 'GHz')}
+    >>> spectral_width = (0.05, "GHz")
+    >>> output_fn = create_line_fields_dataset("intensity_cube.fits", 
+    ...                                        line_centers, spectral_width, 
+    ...                                        output_filename="lines.fits",
+    ...                                        clobber=True)
     """
     from spectral_cube import SpectralCube
     cube = SpectralCube.read(filename)


https://bitbucket.org/yt_analysis/yt/commits/dd6cb65ffcdf/
Changeset:   dd6cb65ffcdf
Branch:      yt
User:        jzuhone
Date:        2015-05-18 16:36:57+00:00
Summary:     Adding documentation, renaming the convention to spectral "slabs" because it's more generic
Affected #:  2 files

diff -r 624a50b0906dc1eb1cf1e3d42432bd8bc04d379a -r dd6cb65ffcdf46186459c5b3b64539b66f3ae0ed doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -570,6 +570,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a new file with these slabs as different fields. In thise example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  output_fn = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width, 
+                                    output_filename="lines.fits",
+                                    sclobber=True)
+
+This dataset can then be loaded up in yt, and the different slabs will be different fields, 
+with the field names taken from the keys in ``slab_centers``. The WCS coordinates on the 
+spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm```0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 624a50b0906dc1eb1cf1e3d42432bd8bc04d379a -r dd6cb65ffcdf46186459c5b3b64539b66f3ae0ed yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,7 +17,7 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
-from yt.units.yt_array import YTQuantity
+from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageBuffer
 
 import os
@@ -70,14 +70,15 @@
                      validators = [ValidateSpatial()],
                      display_name="Counts (%s-%s keV)" % (emin, emax))
 
-def create_line_fields_dataset(filename, line_centers, spectral_width,
-                               output_filename=None, clobber=False):
+def create_spectral_slabs(filename, slab_centers, slab_width,
+                          output_filename=None, clobber=False):
     r"""
-    Given a dictionary of spectral line centers and a width in
-    spectral units, extract data from a spectral cube at these line
-    centers and write a new FITS file containing the different lines
+    Given a dictionary of spectral slab centers and a width in
+    spectral units, extract data from a spectral cube at these slab
+    centers and write a new FITS file containing the different slabs
     as separate FITS images, which can be read into yt as different
-    fields.
+    fields. Useful for extracting individual lines from a spectral 
+    cube and separating them out as different fields. 
 
     Requires the SpectralCube (http://spectral-cube.readthedocs.org)
     library.
@@ -85,13 +86,13 @@
     Parameters
     ----------
     filename : string
-        The spectral cube FITS file to extract the line fields from.
-    line_centers : dict of (float, string) tuples or YTQuantities
-        The centers of particular lines, where the keys are the names
-        of the lines and the values are (float, string) tuples or
+        The spectral cube FITS file to extract the data from.
+    slab_centers : dict of (float, string) tuples or YTQuantities
+        The centers of the slabs, where the keys are the names
+        of the new fields and the values are (float, string) tuples or
         YTQuantities, specifying a value for each center and its unit.
-    spectral_width : YTQuantity or (float, string) tuple
-        The width along the spectral axis to extract around the line.
+    slab_width : YTQuantity or (float, string) tuple
+        The width of the slab along the spectral axis.
     output_filename : string, optional
         The name of the new file to write. If not specified a new
         filename will be constructed from the input one.
@@ -100,42 +101,43 @@
 
     Examples
     --------
-    >>> line_centers = {'13CN': (218.03117, 'GHz'),
+    >>> slab_centers = {'13CN': (218.03117, 'GHz'),
     ...                 'CH3CH2CHO': (218.284256, 'GHz'),
     ...                 'CH3NH2': (218.40956, 'GHz')}
-    >>> spectral_width = (0.05, "GHz")
-    >>> output_fn = create_line_fields_dataset("intensity_cube.fits", 
-    ...                                        line_centers, spectral_width, 
-    ...                                        output_filename="lines.fits",
-    ...                                        clobber=True)
+    >>> slab_width = (0.05, "GHz")
+    >>> output_fn = create_spectral_slabs("intensity_cube.fits", 
+    ...                                   slab_centers, slab_width, 
+    ...                                   output_filename="lines.fits",
+    ...                                   clobber=True)
     """
     from spectral_cube import SpectralCube
     cube = SpectralCube.read(filename)
-    if not isinstance(spectral_width, YTQuantity):
-        line_width = YTQuantity(spectral_width[0], spectral_width[1])
-    line_data = {}
-    for k, v in line_centers.items():
+    if not isinstance(slab_width, YTQuantity):
+        slab_width = YTQuantity(slab_width[0], slab_width[1])
+    slab_data = {}
+    field_units = cube.header.get("bunit", "dimensionless")
+    for k, v in slab_centers.items():
         if not isinstance(v, YTQuantity):
-            line_center = YTQuantity(v[0], v[1])
+            slab_center = YTQuantity(v[0], v[1])
         else:
-            line_center = v
-        mylog.info("Adding line field: %s at frequency %g %s" %
-                   (k, line_center.v, line_center.units))
-        line_lo = (line_center-0.5*line_width).to_astropy()
-        line_hi = (line_center+0.5*line_width).to_astropy()
-        subcube = cube.spectral_slab(line_lo, line_hi)
-        line_data[k] = subcube[:,:,:]
-    width = cube.header["naxis3"]*cube.header["cdelt3"]
+            slab_center = v
+        mylog.info("Adding slab field %s at %g %s" %
+                   (k, slab_center.v, slab_center.units))
+        slab_lo = (slab_center-0.5*slab_width).to_astropy()
+        slab_hi = (slab_center+0.5*slab_width).to_astropy()
+        subcube = cube.spectral_slab(slab_lo, slab_hi)
+        slab_data[k] = YTArray(subcube[:,:,:], field_units)
+    width = subcube.header["naxis3"]*cube.header["cdelt3"]
     w = subcube.wcs.copy()
     w.wcs.crpix[-1] = 0.5
     w.wcs.crval[-1] = -0.5*width
-    fid = FITSImageBuffer(line_data, wcs=w)
+    fid = FITSImageBuffer(slab_data, wcs=w)
     if output_filename is None:
         if filename.lower().endswith(".fits.gz"):
             new_fn = filename[:-8]
         elif filename.lower().endswith(".fits"):
             new_fn = filename[:-5]
-        new_fn += "_lines.fits"
+        new_fn += "_slabs.fits"
     else:
         new_fn = output_filename
     fid.writeto(new_fn, clobber=clobber)


https://bitbucket.org/yt_analysis/yt/commits/955ef46851f1/
Changeset:   955ef46851f1
Branch:      yt
User:        jzuhone
Date:        2015-05-18 18:05:34+00:00
Summary:     Small docs fix
Affected #:  1 file

diff -r dd6cb65ffcdf46186459c5b3b64539b66f3ae0ed -r 955ef46851f12f18fc2c4ab78a5d1a543a0987ed doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -597,7 +597,7 @@
 This dataset can then be loaded up in yt, and the different slabs will be different fields, 
 with the field names taken from the keys in ``slab_centers``. The WCS coordinates on the 
 spectral axis are reset so that the center of the domain along this axis is zero, and the 
-left and right edges of the domain along this axis are :math:`\pm```0.5*slab_width``.
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^


https://bitbucket.org/yt_analysis/yt/commits/4862720ce973/
Changeset:   4862720ce973
Branch:      yt
User:        jzuhone
Date:        2015-05-18 18:14:24+00:00
Summary:     These aren't accurate anymore, so remove them
Affected #:  1 file

diff -r 955ef46851f12f18fc2c4ab78a5d1a543a0987ed -r 4862720ce973147a97d3716d861ea4f6c75d8865 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -132,6 +132,9 @@
     w.wcs.crpix[-1] = 0.5
     w.wcs.crval[-1] = -0.5*width
     fid = FITSImageBuffer(slab_data, wcs=w)
+    for hdu in fid:
+        hdu.header.pop("RESTFREQ", None)
+        hdu.header.pop("RESTFRQ", None)
     if output_filename is None:
         if filename.lower().endswith(".fits.gz"):
             new_fn = filename[:-8]


https://bitbucket.org/yt_analysis/yt/commits/3678a87c7c39/
Changeset:   3678a87c7c39
Branch:      yt
User:        jzuhone
Date:        2015-05-19 20:51:39+00:00
Summary:     Simplifying this logic
Affected #:  1 file

diff -r 4862720ce973147a97d3716d861ea4f6c75d8865 -r 3678a87c7c39f3d7d4c34761d36e8d0b2b1c90b0 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -136,11 +136,7 @@
         hdu.header.pop("RESTFREQ", None)
         hdu.header.pop("RESTFRQ", None)
     if output_filename is None:
-        if filename.lower().endswith(".fits.gz"):
-            new_fn = filename[:-8]
-        elif filename.lower().endswith(".fits"):
-            new_fn = filename[:-5]
-        new_fn += "_slabs.fits"
+        new_fn = filename.rsplit(".",2)[0] + "_slabs.fits"
     else:
         new_fn = output_filename
     fid.writeto(new_fn, clobber=clobber)


https://bitbucket.org/yt_analysis/yt/commits/4e09bade9211/
Changeset:   4e09bade9211
Branch:      yt
User:        jzuhone
Date:        2015-05-20 17:29:23+00:00
Summary:     Return a dataset instead, don't write anything.
Affected #:  2 files

diff -r 3678a87c7c39f3d7d4c34761d36e8d0b2b1c90b0 -r 4e09bade92112a4325b20409508cde0ad3d6d5bb doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -580,7 +582,7 @@
   
 If you have a spectral intensity dataset of some sort, and would like to extract emission in 
 particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
-used to generate a new file with these slabs as different fields. In thise example, we use it
+used to generate a dataset with these slabs as different fields. In this example, we use it
 to extract individual lines from an intensity cube:
 
 .. code-block:: python
@@ -589,14 +591,14 @@
                   'CH3CH2CHO': (218.284256, 'GHz'),
                   'CH3NH2': (218.40956, 'GHz')}
   slab_width = (0.05, "GHz")
-  output_fn = create_spectral_slabs("intensity_cube.fits",
-                                    slab_centers, slab_width, 
-                                    output_filename="lines.fits",
-                                    sclobber=True)
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
 
-This dataset can then be loaded up in yt, and the different slabs will be different fields, 
-with the field names taken from the keys in ``slab_centers``. The WCS coordinates on the 
-spectral axis are reset so that the center of the domain along this axis is zero, and the 
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
 left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data

diff -r 3678a87c7c39f3d7d4c34761d36e8d0b2b1c90b0 -r 4e09bade92112a4325b20409508cde0ad3d6d5bb yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -71,18 +71,19 @@
                      display_name="Counts (%s-%s keV)" % (emin, emax))
 
 def create_spectral_slabs(filename, slab_centers, slab_width,
-                          output_filename=None, clobber=False):
+                          **kwargs):
     r"""
     Given a dictionary of spectral slab centers and a width in
     spectral units, extract data from a spectral cube at these slab
-    centers and write a new FITS file containing the different slabs
-    as separate FITS images, which can be read into yt as different
-    fields. Useful for extracting individual lines from a spectral 
-    cube and separating them out as different fields. 
+    centers and return a `FITSDataset` instance containing the different 
+    slabs as separate yt fields. Useful for extracting individual 
+    lines from a spectral cube and separating them out as different fields. 
 
     Requires the SpectralCube (http://spectral-cube.readthedocs.org)
     library.
 
+    All keyword arguments will be passed on to the `FITSDataset` constructor.
+
     Parameters
     ----------
     filename : string
@@ -93,11 +94,6 @@
         YTQuantities, specifying a value for each center and its unit.
     slab_width : YTQuantity or (float, string) tuple
         The width of the slab along the spectral axis.
-    output_filename : string, optional
-        The name of the new file to write. If not specified a new
-        filename will be constructed from the input one.
-    clobber : boolean, optional
-        Whether or not to overwrite an existing file. Default False.
 
     Examples
     --------
@@ -105,12 +101,12 @@
     ...                 'CH3CH2CHO': (218.284256, 'GHz'),
     ...                 'CH3NH2': (218.40956, 'GHz')}
     >>> slab_width = (0.05, "GHz")
-    >>> output_fn = create_spectral_slabs("intensity_cube.fits", 
-    ...                                   slab_centers, slab_width, 
-    ...                                   output_filename="lines.fits",
-    ...                                   clobber=True)
+    >>> ds = create_spectral_slabs("intensity_cube.fits", 
+    ...                            slab_centers, slab_width,
+    ...                            nan_mask=0.0)
     """
     from spectral_cube import SpectralCube
+    from yt.frontends.fits.api import FITSDataset
     cube = SpectralCube.read(filename)
     if not isinstance(slab_width, YTQuantity):
         slab_width = YTQuantity(slab_width[0], slab_width[1])
@@ -126,7 +122,7 @@
         slab_lo = (slab_center-0.5*slab_width).to_astropy()
         slab_hi = (slab_center+0.5*slab_width).to_astropy()
         subcube = cube.spectral_slab(slab_lo, slab_hi)
-        slab_data[k] = YTArray(subcube[:,:,:], field_units)
+        slab_data[k] = YTArray(subcube.filled_data[:,:,:], field_units)
     width = subcube.header["naxis3"]*cube.header["cdelt3"]
     w = subcube.wcs.copy()
     w.wcs.crpix[-1] = 0.5
@@ -135,12 +131,8 @@
     for hdu in fid:
         hdu.header.pop("RESTFREQ", None)
         hdu.header.pop("RESTFRQ", None)
-    if output_filename is None:
-        new_fn = filename.rsplit(".",2)[0] + "_slabs.fits"
-    else:
-        new_fn = output_filename
-    fid.writeto(new_fn, clobber=clobber)
-    return new_fn
+    ds = FITSDataset(fid, **kwargs)
+    return ds
 
 def ds9_region(ds, reg, obj=None, field_parameters=None):
     r"""


https://bitbucket.org/yt_analysis/yt/commits/ed58bda6ac61/
Changeset:   ed58bda6ac61
Branch:      yt
User:        jzuhone
Date:        2015-05-20 17:44:33+00:00
Summary:     Clean up this logic a bit, and make sure we can read in HDUList instances as well.
Affected #:  2 files

diff -r 4e09bade92112a4325b20409508cde0ad3d6d5bb -r ed58bda6ac6139245a956f805badfcb53e79ee09 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -326,13 +326,13 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        if isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU):
-            self._handle = FITSFileHandler(self.filenames[0])
-            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        self._handle = FITSFileHandler(self.filenames[0])
+        if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
+            isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
+            fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
         else:
-            self._handle = FITSFileHandler(self.filenames[0])
             fn = self.filenames[0]
-        self._handle._fits_files = [self._handle]
+        self._handle._fits_files.append(self._handle)
         if self.num_files > 1:
             for fits_file in auxiliary_files:
                 if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):

diff -r 4e09bade92112a4325b20409508cde0ad3d6d5bb -r ed58bda6ac6139245a956f805badfcb53e79ee09 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -46,12 +46,15 @@
 class FITSFileHandler(HDF5FileHandler):
     def __init__(self, filename):
         from yt.utilities.on_demand_imports import _astropy
-        if isinstance(filename, _astropy.pyfits.PrimaryHDU):
+        if isinstance(filename, _astropy.pyfits.hdu.image._ImageBaseHDU):
             self.handle = _astropy.pyfits.HDUList(filename)
+        elif isinstance(filename, _astropy.pyfits.HDUList):
+            self.handle = filename
         else:
             self.handle = _astropy.pyfits.open(
                 filename, memmap=True, do_not_scale_image_data=True,
                 ignore_blank=True)
+        self._fits_files = []
 
     def __del__(self):
         for f in self._fits_files:


https://bitbucket.org/yt_analysis/yt/commits/d6983e51e584/
Changeset:   d6983e51e584
Branch:      yt
User:        atmyers
Date:        2015-05-29 19:20:26+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1588)

Removing line database stuff from the FITS frontend
Affected #:  5 files

diff -r 718444346962f551ca51fec7a6ef877b6badb473 -r d6983e51e584eb0e996dfd2a4475ff2a8e0a6014 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +572,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 718444346962f551ca51fec7a6ef877b6badb473 -r d6983e51e584eb0e996dfd2a4475ff2a8e0a6014 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -165,7 +165,7 @@
                     units = self._determine_image_units(hdu.header, known_units)
                     try:
                         # Grab field name from btype
-                        fname = hdu.header["btype"].lower()
+                        fname = hdu.header["btype"]
                     except KeyError:
                         # Try to guess the name from the units
                         fname = self._guess_name_from_units(units)
@@ -205,18 +205,6 @@
                                   "the same dimensions as the primary and will not be " +
                                   "available as a field.")
 
-        # For line fields, we still read the primary field. Not sure how to extend this
-        # For now, we pick off the first field from the field list.
-        line_db = self.dataset.line_database
-        primary_fname = self.field_list[0][1]
-        for k, v in iteritems(line_db):
-            mylog.info("Adding line field: %s at frequency %g GHz" % (k, v))
-            self.field_list.append((self.dataset_type, k))
-            self._ext_map[k] = self._ext_map[primary_fname]
-            self._axis_map[k] = self._axis_map[primary_fname]
-            self._file_map[k] = self._file_map[primary_fname]
-            self.dataset.field_units[k] = self.dataset.field_units[primary_fname]
-
     def _count_grids(self):
         self.num_grids = self.ds.parameters["nprocs"]
 
@@ -242,19 +230,11 @@
                 bbox = np.array([[le,re] for le, re in zip(ds.domain_left_edge,
                                                            ds.domain_right_edge)])
                 dims = np.array(ds.domain_dimensions)
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    dims[ds.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.ds.arr(gle, "code_length")
                 self.grid_right_edge = self.ds.arr(gre, "code_length")
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    self.grid_left_edge[:,ds.spec_axis] = ds.domain_left_edge[ds.spec_axis]
-                    self.grid_right_edge[:,ds.spec_axis] = ds.domain_right_edge[ds.spec_axis]
-                    self.grid_dimensions[:,ds.spec_axis] = ds.domain_dimensions[ds.spec_axis]
         else:
             self.grid_left_edge[0,:] = ds.domain_left_edge
             self.grid_right_edge[0,:] = ds.domain_right_edge
@@ -322,8 +302,6 @@
                  nan_mask=None,
                  spectral_factor=1.0,
                  z_axis_decomp=False,
-                 line_database=None,
-                 line_width=None,
                  suppress_astropy_warnings=True,
                  parameters=None,
                  units_override=None):
@@ -336,19 +314,6 @@
         self.z_axis_decomp = z_axis_decomp
         self.spectral_factor = spectral_factor
 
-        if line_width is not None:
-            self.line_width = YTQuantity(line_width[0], line_width[1])
-            self.line_units = line_width[1]
-            mylog.info("For line folding, spectral_factor = 1.0")
-            self.spectral_factor = 1.0
-        else:
-            self.line_width = None
-
-        self.line_database = {}
-        if line_database is not None:
-            for k in line_database:
-                self.line_database[k] = YTQuantity(line_database[k], self.line_units)
-
         if suppress_astropy_warnings:
             warnings.filterwarnings('ignore', module="astropy", append=True)
         auxiliary_files = ensure_list(auxiliary_files)
@@ -361,13 +326,13 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        if isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU):
-            self._handle = FITSFileHandler(self.filenames[0])
-            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        self._handle = FITSFileHandler(self.filenames[0])
+        if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
+            isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
+            fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
         else:
-            self._handle = FITSFileHandler(self.filenames[0])
             fn = self.filenames[0]
-        self._handle._fits_files = [self._handle]
+        self._handle._fits_files.append(self._handle)
         if self.num_files > 1:
             for fits_file in auxiliary_files:
                 if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):
@@ -540,20 +505,14 @@
 
         # If nprocs is None, do some automatic decomposition of the domain
         if self.specified_parameters["nprocs"] is None:
-            if len(self.line_database) > 0:
-                dims = 2
-            else:
-                dims = self.dimensionality
             if self.z_axis_decomp:
                 nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
             else:
-                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**self.dimensionality).astype("int")
             self.parameters["nprocs"] = max(min(nprocs, 512), 1)
         else:
             self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
-        self.reversed = False
-
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
         self.spec_cube = False
         x = 0
@@ -618,41 +577,23 @@
             self._z0 = self.wcs.wcs.crval[self.spec_axis]
             self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
-            if self.line_width is not None:
-                if self._dz < 0.0:
-                    self.reversed = True
-                    le = self.dims[self.spec_axis]+0.5
-                else:
-                    le = 0.5
-                self.line_width = self.line_width.in_units(self.spec_unit)
-                self.freq_begin = (le-self._p0)*self._dz + self._z0
-                # We now reset these so that they are consistent
-                # with the new setup
-                self._dz = np.abs(self._dz)
-                self._p0 = 0.0
-                self._z0 = 0.0
-                nz = np.rint(self.line_width.value/self._dz).astype("int")
-                self.line_width = self._dz*nz
-                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
-                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
-                self.domain_dimensions[self.spec_axis] = nz
-            else:
-                if self.spectral_factor == "auto":
-                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
-                                                                             self.lat_axis]]))
-                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
-                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
-                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
-                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
-                                                        self.spectral_factor*Dz
-                self._dz /= self.spectral_factor
-                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            if self.spectral_factor == "auto":
+                self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                         self.lat_axis]]))
+                self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+            Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+            self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                     self.spectral_factor*Dz
+            self._dz /= self.spectral_factor
+            self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            
         else:
 
             self.wcs_2d = self.wcs
             self.spec_axis = 2
             self.spec_name = "z"
-            self.spec_unit = "code length"
+            self.spec_unit = "code_length"
 
     def spec2pixel(self, spec_value):
         sv = self.arr(spec_value).in_units(self.spec_unit)

diff -r 718444346962f551ca51fec7a6ef877b6badb473 -r d6983e51e584eb0e996dfd2a4475ff2a8e0a6014 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -24,12 +24,6 @@
         super(IOHandlerFITS, self).__init__(ds)
         self.ds = ds
         self._handle = ds._handle
-        if self.ds.line_width is not None:
-            self.line_db = self.ds.line_database
-            self.dz = self.ds.line_width/self.domain_dimensions[self.ds.spec_axis]
-        else:
-            self.line_db = None
-            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -79,32 +73,15 @@
         dx = self.ds.domain_width/self.ds.domain_dimensions
         for field in fields:
             ftype, fname = field
-            tmp_fname = fname
-            if fname in self.ds.line_database:
-                fname = self.ds.field_list[0][1]
             f = self.ds.index._file_map[fname]
             ds = f[self.ds.index._ext_map[fname]]
             bzero, bscale = self.ds.index._scale_map[fname]
-            fname = tmp_fname
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
-                    if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.ds.spec_unit).value
-                        my_off = my_off - 0.5*self.ds.line_width
-                        my_off = int((my_off-self.ds.freq_begin)/self.dz)
-                        my_off = max(my_off, 0)
-                        my_off = min(my_off, self.ds.dims[self.ds.spec_axis]-1)
-                        start[self.ds.spec_axis] += my_off
-                        end[self.ds.spec_axis] += my_off
-                        mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in range(3)]
-                    if self.ds.reversed:
-                        new_start = self.ds.dims[self.ds.spec_axis]-1-start[self.ds.spec_axis]
-                        new_end = max(self.ds.dims[self.ds.spec_axis]-1-end[self.ds.spec_axis],0)
-                        slices[self.ds.spec_axis] = slice(new_start,new_end,-1)
                     if self.ds.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -115,13 +92,6 @@
                         data = ds.data[idx,slices[2],slices[1],slices[0]].transpose()
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
-                    if self.line_db is not None:
-                        nz1 = data.shape[self.ds.spec_axis]
-                        nz2 = g.ActiveDimensions[self.ds.spec_axis]
-                        if nz1 != nz2:
-                            old_data = data.copy()
-                            data = np.zeros(g.ActiveDimensions)
-                            data[:,:,nz2-nz1:] = old_data
                     if fname in self.ds.nan_mask:
                         data[np.isnan(data)] = self.ds.nan_mask[fname]
                     elif "all" in self.ds.nan_mask:

diff -r 718444346962f551ca51fec7a6ef877b6badb473 -r d6983e51e584eb0e996dfd2a4475ff2a8e0a6014 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,6 +17,8 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
+from yt.units.yt_array import YTQuantity, YTArray
+from yt.utilities.fits_image import FITSImageBuffer
 
 import os
 
@@ -68,6 +70,70 @@
                      validators = [ValidateSpatial()],
                      display_name="Counts (%s-%s keV)" % (emin, emax))
 
+def create_spectral_slabs(filename, slab_centers, slab_width,
+                          **kwargs):
+    r"""
+    Given a dictionary of spectral slab centers and a width in
+    spectral units, extract data from a spectral cube at these slab
+    centers and return a `FITSDataset` instance containing the different 
+    slabs as separate yt fields. Useful for extracting individual 
+    lines from a spectral cube and separating them out as different fields. 
+
+    Requires the SpectralCube (http://spectral-cube.readthedocs.org)
+    library.
+
+    All keyword arguments will be passed on to the `FITSDataset` constructor.
+
+    Parameters
+    ----------
+    filename : string
+        The spectral cube FITS file to extract the data from.
+    slab_centers : dict of (float, string) tuples or YTQuantities
+        The centers of the slabs, where the keys are the names
+        of the new fields and the values are (float, string) tuples or
+        YTQuantities, specifying a value for each center and its unit.
+    slab_width : YTQuantity or (float, string) tuple
+        The width of the slab along the spectral axis.
+
+    Examples
+    --------
+    >>> slab_centers = {'13CN': (218.03117, 'GHz'),
+    ...                 'CH3CH2CHO': (218.284256, 'GHz'),
+    ...                 'CH3NH2': (218.40956, 'GHz')}
+    >>> slab_width = (0.05, "GHz")
+    >>> ds = create_spectral_slabs("intensity_cube.fits", 
+    ...                            slab_centers, slab_width,
+    ...                            nan_mask=0.0)
+    """
+    from spectral_cube import SpectralCube
+    from yt.frontends.fits.api import FITSDataset
+    cube = SpectralCube.read(filename)
+    if not isinstance(slab_width, YTQuantity):
+        slab_width = YTQuantity(slab_width[0], slab_width[1])
+    slab_data = {}
+    field_units = cube.header.get("bunit", "dimensionless")
+    for k, v in slab_centers.items():
+        if not isinstance(v, YTQuantity):
+            slab_center = YTQuantity(v[0], v[1])
+        else:
+            slab_center = v
+        mylog.info("Adding slab field %s at %g %s" %
+                   (k, slab_center.v, slab_center.units))
+        slab_lo = (slab_center-0.5*slab_width).to_astropy()
+        slab_hi = (slab_center+0.5*slab_width).to_astropy()
+        subcube = cube.spectral_slab(slab_lo, slab_hi)
+        slab_data[k] = YTArray(subcube.filled_data[:,:,:], field_units)
+    width = subcube.header["naxis3"]*cube.header["cdelt3"]
+    w = subcube.wcs.copy()
+    w.wcs.crpix[-1] = 0.5
+    w.wcs.crval[-1] = -0.5*width
+    fid = FITSImageBuffer(slab_data, wcs=w)
+    for hdu in fid:
+        hdu.header.pop("RESTFREQ", None)
+        hdu.header.pop("RESTFRQ", None)
+    ds = FITSDataset(fid, **kwargs)
+    return ds
+
 def ds9_region(ds, reg, obj=None, field_parameters=None):
     r"""
     Create a data container from a ds9 region file. Requires the pyregion

diff -r 718444346962f551ca51fec7a6ef877b6badb473 -r d6983e51e584eb0e996dfd2a4475ff2a8e0a6014 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -54,12 +54,15 @@
 class FITSFileHandler(HDF5FileHandler):
     def __init__(self, filename):
         from yt.utilities.on_demand_imports import _astropy
-        if isinstance(filename, _astropy.pyfits.PrimaryHDU):
+        if isinstance(filename, _astropy.pyfits.hdu.image._ImageBaseHDU):
             self.handle = _astropy.pyfits.HDUList(filename)
+        elif isinstance(filename, _astropy.pyfits.HDUList):
+            self.handle = filename
         else:
             self.handle = _astropy.pyfits.open(
                 filename, memmap=True, do_not_scale_image_data=True,
                 ignore_blank=True)
+        self._fits_files = []
 
     def __del__(self):
         for f in self._fits_files:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list