[yt-svn] commit/yt: 13 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 9 16:38:11 PDT 2015


13 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/1c956287e419/
Changeset:   1c956287e419
Branch:      yt
User:        jzuhone
Date:        2015-05-25 15:20:09+00:00
Summary:     Merging
Affected #:  8 files

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -89,6 +89,8 @@
         dims : integer, optional
             The spatial resolution of the cube. Implies nx = ny, e.g. the 
             aspect ratio of the PPVCube's spatial dimensions is 1.
+        thermal_broad : boolean, optional
+            Whether or not to broaden the line using the gas temperature. Default: False.
         atomic_weight : float, optional
             Set this value to the atomic weight of the particle that is emitting the line
             if *thermal_broad* is True. Defaults to 56 (Fe).
@@ -152,9 +154,7 @@
                                "methods are supported in PPVCube.")
 
         dd = ds.all_data()
-
         fd = dd._determine_fields(field)[0]
-
         self.field_units = ds._get_field_info(fd).units
 
         self.vbins = ds.arr(np.linspace(velocity_bounds[0],
@@ -214,16 +214,6 @@
         self.ds.field_info.pop(("gas","intensity"))
         self.ds.field_info.pop(("gas","v_los"))
 
-    def create_intensity(self):
-        def _intensity(field, data):
-            v = self.current_v-data["v_los"].v
-            T = data["temperature"].v
-            w = ppv_utils.compute_weight(self.thermal_broad, self.dv_cgs,
-                                         self.particle_mass, v.flatten(), T.flatten())
-            w[np.isnan(w)] = 0.0
-            return data[self.field]*w.reshape(v.shape)
-        return _intensity
-
     def transform_spectral_axis(self, rest_value, units):
         """
         Change the units of the spectral axis to some equivalent unit, such
@@ -259,17 +249,18 @@
         self.dv = self.vbins[1]-self.vbins[0]
 
     @parallel_root_only
-    def write_fits(self, filename, clobber=True, length_unit=None,
+    def write_fits(self, filename, clobber=False, length_unit=None,
                    sky_scale=None, sky_center=None):
         r""" Write the PPVCube to a FITS file.
 
         Parameters
         ----------
         filename : string
-            The name of the file to write.
-        clobber : boolean
-            Whether or not to clobber an existing file with the same name.
-        length_unit : string
+            The name of the file to write to. 
+        clobber : boolean, optional
+            Whether to overwrite a file with the same name that already 
+            exists. Default False.
+        length_unit : string, optional
             The units to convert the coordinates to in the file.
         sky_scale : tuple, optional
             Conversion between an angle unit and a length unit, if sky
@@ -280,7 +271,8 @@
 
         Examples
         --------
-        >>> cube.write_fits("my_cube.fits", clobber=False, sky_scale=(1.0,"arcsec/kpc"))
+        >>> cube.write_fits("my_cube.fits", clobber=False, 
+        ...                 sky_scale=(1.0,"arcsec/kpc"), sky_center=(30.,45.))
         """
         vunit = fits_info[self.axis_type][0]
         vtype = fits_info[self.axis_type][1]
@@ -303,13 +295,11 @@
         w.wcs.cunit = [units,units,vunit]
         w.wcs.ctype = ["LINEAR","LINEAR",vtype]
 
+        fib = FITSImageData(self.data.transpose(), fields=self.field, wcs=w)
+        fib.update_all_headers("bunit", re.sub('()', '', str(self.proj_units)))
+        fib.update_all_headers("btype", self.field)
         if sky_scale is not None and sky_center is not None:
-            w = create_sky_wcs(w, sky_center, sky_scale)
-
-        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
-        fib[0].header["bunit"] = re.sub('()', '', str(self.proj_units))
-        fib[0].header["btype"] = self.field
-
+            fib.create_sky_wcs(sky_center, sky_scale)
         fib.writeto(filename, clobber=clobber)
 
     def __repr__(self):
@@ -320,3 +310,13 @@
 
     def __getitem__(self, item):
         return self.data[item]
+
+    def _create_intensity(self):
+        def _intensity(field, data):
+            v = self.current_v-data["v_los"].v
+            T = data["temperature"].v
+            w = ppv_utils.compute_weight(self.thermal_broad, self.dv_cgs,
+                                         self.particle_mass, v.flatten(), T.flatten())
+            w[np.isnan(w)] = 0.0
+            return data[self.field]*w.reshape(v.shape)
+        return _intensity

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -1,5 +1,5 @@
 """
-Unit test the sunyaev_zeldovich analysis module.
+Unit test the PPVCube analysis module.
 """
 
 #-----------------------------------------------------------------------------

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -316,7 +316,7 @@
         >>> sky_center = (30., 45., "deg")
         >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        from yt.utilities.fits_image import FITSImageBuffer, create_sky_wcs
+        from yt.utilities.fits_image import FITSImageData
 
         dx = self.dx.in_units("kpc")
         dy = dx
@@ -328,10 +328,9 @@
         w.wcs.cunit = ["kpc"]*2
         w.wcs.ctype = ["LINEAR"]*2
 
+        fib = FITSImageData(self.data, fields=self.data.keys(), wcs=w)
         if sky_scale is not None and sky_center is not None:
-            w = create_sky_wcs(w, sky_center, sky_scale)
-
-        fib = FITSImageBuffer(self.data, fields=self.data.keys(), wcs=w)
+            fib.create_sky_wcs(sky_center, sky_scale)
         fib.writeto(filename, clobber=clobber)
         
     @parallel_root_only

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -1,5 +1,5 @@
 """
-FITSImageBuffer Class
+FITSImageData Class
 """
 
 #-----------------------------------------------------------------------------
@@ -16,28 +16,26 @@
 from yt.data_objects.construction_data_containers import YTCoveringGridBase
 from yt.utilities.on_demand_imports import _astropy, NotAModule
 from yt.units.yt_array import YTQuantity, YTArray
+from yt.units import dimensions
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.visualization.volume_rendering.camera import off_axis_projection
 import re
 
 pyfits = _astropy.pyfits
 pywcs = _astropy.pywcs
 
-if isinstance(pyfits, NotAModule):
-    HDUList = object
-else:
-    HDUList = pyfits.HDUList
+class FITSImageData(object):
 
-class FITSImageBuffer(HDUList):
+    def __init__(self, data, fields=None, units=None, width=None, wcs=None):
+        r""" Initialize a FITSImageData object.
 
-    def __init__(self, data, fields=None, units=None, pixel_scale=None, wcs=None):
-        r""" Initialize a FITSImageBuffer object.
-
-        FITSImageBuffer contains a list of FITS ImageHDU instances, and
-        optionally includes WCS information. It inherits from HDUList, so
-        operations such as `writeto` are enabled. Images can be constructed
-        from ImageArrays, NumPy arrays, dicts of such arrays,
-        FixedResolutionBuffers, and YTCoveringGrids. The latter two are the
-        most powerful because WCS information can be constructed from their
-        coordinates.
+        FITSImageData contains a collection of FITS ImageHDU instances and
+        WCS information, along with units for each of the images. FITSImageData
+        instances can be constructed from ImageArrays, NumPy arrays, dicts 
+        of such arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter 
+        two are the most powerful because WCS information can be constructed 
+        automatically from their coordinates.
 
         Parameters
         ----------
@@ -50,10 +48,10 @@
             single array one field name must be specified.
         units : string
             The units of the WCS coordinates. Defaults to "cm".
-        pixel_scale : float
-            The scale of the pixel, in *units*. Either a single float or
-            iterable of floats. Only used if this information is not already
-            provided by *data*.
+        width : float or YTQuantity
+            The width of the image. Either a single value or iterable of values.
+            If a float, assumed to be in *units*. Only used if this information 
+            is not already provided by *data*.
         wcs : `astropy.wcs.WCS` instance, optional
             Supply an AstroPy WCS instance. Will override automatic WCS
             creation from FixedResolutionBuffers and YTCoveringGrids.
@@ -66,7 +64,7 @@
         >>> prj = ds.proj(2, "kT", weight_field="density")
         >>> frb = prj.to_frb((0.5, "Mpc"), 800)
         >>> # This example just uses the FRB and puts the coords in kpc.
-        >>> f_kpc = FITSImageBuffer(frb, fields="kT", units="kpc")
+        >>> f_kpc = FITSImageData(frb, fields="kT", units="kpc")
         >>> # This example specifies a specific WCS.
         >>> from astropy.wcs import WCS
         >>> w = WCS(naxis=self.dimensionality)
@@ -77,46 +75,47 @@
         >>> w.wcs.ctype = ["RA---TAN","DEC--TAN"]
         >>> scale = 1./3600. # One arcsec per pixel
         >>> w.wcs.cdelt = [-scale, scale]
-        >>> f_deg = FITSImageBuffer(frb, fields="kT", wcs=w)
+        >>> f_deg = FITSImageData(frb, fields="kT", wcs=w)
         >>> f_deg.writeto("temp.fits")
         """
 
-        if units is None: units = "cm"
-        if pixel_scale is None: pixel_scale = 1.0
+        if units is None: 
+            units = "cm"
+        if width is None: 
+            width = 1.0
 
-        super(FITSImageBuffer, self).__init__()
+        exclude_fields = ['x','y','z','px','py','pz',
+                          'pdx','pdy','pdz','weight_field']
 
-        if isinstance(fields, string_types):
+        self.hdulist = pyfits.HDUList()
+
+        if isinstance(fields, string_types): 
             fields = [fields]
 
-        exclude_fields = ['x', 'y', 'z', 'px', 'py', 'pz',
-                          'pdx', 'pdy', 'pdz', 'weight_field']
-
         if hasattr(data, 'keys'):
             img_data = data
-        else:
-            img_data = {}
             if fields is None:
-                mylog.error("Please specify a field name for this array.")
-                raise KeyError("Please specify a field name for this array.")
-            img_data[fields[0]] = data
+                fields = list(img_data.keys())
+        elif isinstance(data, np.ndarray):
+            if fields is None:
+                mylog.warning("No field name given for this array. Calling it 'image_data'.")
+                fn = 'image_data'
+                fields = [fn]
+            else:
+                fn = fields[0]
+            img_data = {fn: data}
 
-        if fields is None: fields = img_data.keys()
-        if len(fields) == 0:
-            mylog.error("Please specify one or more fields to write.")
-            raise KeyError("Please specify one or more fields to write.")
+        self.fields = fields
 
         first = True
-
         self.field_units = {}
-
         for key in fields:
             if key not in exclude_fields:
                 if hasattr(img_data[key], "units"):
                     self.field_units[key] = str(img_data[key].units)
                 else:
                     self.field_units[key] = "dimensionless"
-                mylog.info("Making a FITS image of field %s" % (key))
+                mylog.info("Making a FITS image of field %s" % key)
                 if first:
                     hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
                     first = False
@@ -126,37 +125,35 @@
                 hdu.header["btype"] = key
                 if hasattr(img_data[key], "units"):
                     hdu.header["bunit"] = re.sub('()', '', str(img_data[key].units))
-                self.append(hdu)
+                self.hdulist.append(hdu)
 
-        self.dimensionality = len(self[0].data.shape)
-        
-        if self.dimensionality == 2:
-            self.nx, self.ny = self[0].data.shape
-        elif self.dimensionality == 3:
-            self.nx, self.ny, self.nz = self[0].data.shape
+        self.shape = self.hdulist[0].shape
+        self.dimensionality = len(self.shape)
 
         if wcs is None:
-            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+            w = pywcs.WCS(header=self.hdulist[0].header, naxis=self.dimensionality)
             if isinstance(img_data, FixedResolutionBuffer):
                 # FRBs are a special case where we have coordinate
                 # information, so we take advantage of this and
                 # construct the WCS object
-                dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units)/self.nx
-                dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units)/self.ny
-                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units)
-                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units)
+                dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units).v/self.shape[0]
+                dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units).v/self.shape[1]
+                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units).v
+                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units).v
                 center = [xctr, yctr]
                 cdelt = [dx,dy]
             elif isinstance(img_data, YTCoveringGridBase):
                 cdelt = img_data.dds.in_units(units).v
-                center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units)
+                center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units).v
             else:
                 # If img_data is just an array, we assume the center is the origin
-                # and use *pixel_scale* to determine the cell widths
-                if iterable(pixel_scale):
-                    cdelt = pixel_scale
+                # and use the image width to determine the cell widths
+                if not iterable(width):
+                    width = [width]*self.dimensionality
+                if isinstance(width[0], YTQuantity):
+                    cdelt = [wh.in_units(units).v/n for wh, n in zip(width, self.shape)]
                 else:
-                    cdelt = [pixel_scale]*self.dimensionality
+                    cdelt = [wh/n for wh, n in zip(width, self.shape)]
                 center = [0.0]*self.dimensionality
             w.wcs.crpix = 0.5*(np.array(self.shape)+1)
             w.wcs.crval = center
@@ -174,57 +171,104 @@
         """
         self.wcs = wcs
         h = self.wcs.to_header()
-        for img in self:
+        for img in self.hdulist:
             for k, v in h.items():
                 img.header[k] = v
 
-    def update_all_headers(self, key, value):
+    def update_header(self, field, key, value):
         """
-        Update the FITS headers for all images with the
-        same *key*, *value* pair.
+        Update the FITS header for *field* with a
+        *key*, *value* pair. If *field* == "all", all 
+        headers will be updated.
         """
-        for img in self: img.header[key] = value
+        if field == "all":
+            for img in self.hdulist:
+                img.header[key] = value
+        else:
+            if field not in self.keys():
+                raise KeyError("%s not an image!" % field)
+            idx = self.fields.index(field)
+            self.hdulist[idx].header[key] = value
 
     def keys(self):
-        return [f.name.lower() for f in self]
+        return self.fields
+
+    def __getitem__(self, field):
+        if field not in self.keys():
+            raise KeyError("%s not an image!" % field)
+        idx = self.fields.index(field)
+        return YTArray(self.hdulist[idx].data, self.field_units[field])
 
     def has_key(self, key):
-        return key in self.keys()
+        return key in self.fields
 
     def values(self):
-        return [self[k] for k in self.keys()]
+        return [self[k] for k in self.fields]
 
     def items(self):
-        return [(k, self[k]) for k in self.keys()]
+        return [(k, self[k]) for k in self.fields]
 
-    def writeto(self, fileobj, **kwargs):
-        pyfits.HDUList(self).writeto(fileobj, **kwargs)
+    def get_header(self, field):
+        """
+        Get the FITS header for a specific field.
 
-    @property
-    def shape(self):
-        if self.dimensionality == 2:
-            return self.nx, self.ny
-        elif self.dimensionality == 3:
-            return self.nx, self.ny, self.nz
+        Parameters
+        ----------
+        field : string
+            The field for which to get the corresponding header. 
+        """
+        if field not in self.keys():
+            raise KeyError("%s not an image!" % field)
+        idx = self.fields.index(field)
+        return self.hdulist[idx].header
+
+    @parallel_root_only
+    def writeto(self, fileobj, fields=None, clobber=False, **kwargs):
+        r"""
+        Write all of the fields or a subset of them to a FITS file. 
+
+        Parameters
+        ----------
+        fileobj : string
+            The name of the file to write to. 
+        fields : list of strings, optional
+            The fields to write to the file. If not specified
+            all of the fields in the buffer will be written.
+        clobber : boolean, optional
+            Whether or not to overwrite a previously existing file.
+            Default: False
+        All other keyword arguments are passed to the `writeto`
+        method of `astropy.io.fits.HDUList`.
+        """
+        if fields is None:
+            hdus = self.hdulist
+        else:
+            hdus = pyfits.HDUList()
+            for field in fields:
+                hdus.append(self.hdulist[field])
+        hdus.writeto(fileobj, clobber=clobber, **kwargs)
+
+    def info(self):
+        """
+        Display information about the underlying FITS file. 
+        """
+        self.hdulist.info()
 
     def to_glue(self, label="yt", data_collection=None):
         """
-        Takes the data in the FITSImageBuffer and exports it to
-        Glue (http://www.glueviz.org) for interactive
-        analysis. Optionally add a *label*. If you are already within
-        the Glue environment, you can pass a *data_collection* object,
-        otherwise Glue will be started.
+        Takes the data in the FITSImageData instance and exports it to
+        Glue (http://www.glueviz.org) for interactive analysis. Optionally 
+        add a *label*. If you are already within the Glue environment, you 
+        can pass a *data_collection* object, otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
 
-        field_dict = dict((key,self[key].data) for key in self.keys())
-        
         image = Data(label=label)
         image.coords = coordinates_from_header(self.wcs.to_header())
-        for k,v in field_dict.items():
-            image.add_component(v, k)
+        for k,v in self.items():
+            image.add_component(v.v, k)
         if data_collection is None:
             dc = DataCollection([image])
             app = GlueApplication(dc)
@@ -239,67 +283,132 @@
         `aplpy.FITSFigure` constructor.
         """
         import aplpy
-        return aplpy.FITSFigure(self, **kwargs)
-
-    def get_data(self, field):
-        return YTArray(self[field].data, self.field_units[field])
+        return aplpy.FITSFigure(self.hdulist, **kwargs)
 
     def set_unit(self, field, units):
         """
         Set the units of *field* to *units*.
         """
-        new_data = YTArray(self[field].data, self.field_units[field]).in_units(units)
-        self[field].data = new_data.v
-        self[field].header["bunit"] = units
+        if field not in self.keys():
+            raise KeyError("%s not an image!" % field)
+        new_data = self[field].in_units(units)
+        idx = self.fields.index(field)
+        self.hdulist[idx].data = new_data.v
+        self.hdulist[idx].header["bunit"] = units
         self.field_units[field] = units
 
-axis_wcs = [[1,2],[0,2],[0,1]]
+    def pop(self, key):
+        """
+        Remove a field with name *key*
+        and return it as a new FITSImageData 
+        instance.
+        """
+        if key not in self.keys():
+            raise KeyError("%s not an image!" % key)
+        data = self[key]
+        idx = self.fields.index(key)
+        self.hdulist.pop(idx)
+        self.field_units.pop(key)
+        self.fields.remove(key)
+        return FITSImageData(data, fields=key, wcs=self.wcs)
 
-def create_sky_wcs(old_wcs, sky_center, sky_scale,
-                   ctype=["RA---TAN","DEC--TAN"], crota=None):
-    """
-    Takes an astropy.wcs.WCS instance created in yt from a
-    simulation that has a Cartesian coordinate system and
-    converts it to one in a celestial coordinate system.
+    @classmethod
+    def from_file(cls, filename):
+        """
+        Generate a FITSImageData instance from one previously written to 
+        disk.
 
-    Parameters
-    ----------
-    old_wcs : astropy.wcs.WCS
-        The original WCS to be converted.
-    sky_center : tuple
-        Reference coordinates of the WCS in degrees.
-    sky_scale : tuple
-        Conversion between an angle unit and a length unit,
-        e.g. (3.0, "arcsec/kpc")
-    ctype : list of strings, optional
-        The type of the coordinate system to create.
-    crota : list of floats, optional
-        Rotation angles between cartesian coordinates and
-        the celestial coordinates.
-    """
-    naxis = old_wcs.naxis
-    crval = [sky_center[0], sky_center[1]]
-    scaleq = YTQuantity(sky_scale[0],sky_scale[1])
-    deltas = old_wcs.wcs.cdelt
-    units = [str(unit) for unit in old_wcs.wcs.cunit]
-    new_dx = (YTQuantity(-deltas[0], units[0])*scaleq).in_units("deg")
-    new_dy = (YTQuantity(deltas[1], units[1])*scaleq).in_units("deg")
-    new_wcs = pywcs.WCS(naxis=naxis)
-    cdelt = [new_dx.v, new_dy.v]
-    cunit = ["deg"]*2
-    if naxis == 3:
-        crval.append(old_wcs.wcs.crval[2])
-        cdelt.append(old_wcs.wcs.cdelt[2])
-        ctype.append(old_wcs.wcs.ctype[2])
-        cunit.append(old_wcs.wcs.cunit[2])
-    new_wcs.wcs.crpix = old_wcs.wcs.crpix
-    new_wcs.wcs.cdelt = cdelt
-    new_wcs.wcs.crval = crval
-    new_wcs.wcs.cunit = cunit
-    new_wcs.wcs.ctype = ctype
-    if crota is not None:
-        new_wcs.wcs.crota = crota
-    return new_wcs
+        Parameters
+        ----------
+        filename : string
+            The name of the file to open.
+        """
+        f = pyfits.open(filename)
+        data = {}
+        for hdu in f:
+            data[hdu.header["btype"]] = YTArray(hdu.data, hdu.header["bunit"])
+        f.close()
+        return cls(data, wcs=pywcs.WCS(header=hdu.header))
+
+    @classmethod
+    def from_images(cls, image_list):
+        """
+        Generate a new FITSImageData instance from a list of FITSImageData 
+        instances.
+
+        Parameters
+        ----------
+        image_list : list of FITSImageData instances
+            The images to be combined.
+        """
+        w = image_list[0].wcs
+        img_shape = image_list[0].shape
+        data = {}
+        for image in image_list:
+            assert_same_wcs(w, image.wcs)
+            if img_shape != image.shape:
+                raise RuntimeError("Images do not have the same shape!")
+            for k,v in image.items():
+                data[k] = v
+        return cls(data, wcs=w)
+
+    def create_sky_wcs(self, sky_center, sky_scale,
+                       ctype=["RA---TAN","DEC--TAN"], 
+                       crota=None, cd=None, pc=None):
+        """
+        Takes a Cartesian WCS and converts it to one in a 
+        celestial coordinate system.
+
+        Parameters
+        ----------
+        sky_center : iterable of floats
+            Reference coordinates of the WCS in degrees.
+        sky_scale : tuple or YTQuantity
+            Conversion between an angle unit and a length unit,
+            e.g. (3.0, "arcsec/kpc")
+        ctype : list of strings, optional
+            The type of the coordinate system to create.
+        crota : 2-element ndarray, optional
+            Rotation angles between cartesian coordinates and
+            the celestial coordinates.
+        cd : 2x2-element ndarray, optional
+            Dimensioned coordinate transformation matrix.
+        pc : 2x2-element ndarray, optional
+            Coordinate transformation matrix.
+        """
+        old_wcs = self.wcs
+        naxis = old_wcs.naxis
+        crval = [sky_center[0], sky_center[1]]
+        if isinstance(sky_scale, YTQuantity):
+            scaleq = sky_scale
+        else:
+            scaleq = YTQuantity(sky_scale[0],sky_scale[1])
+        if scaleq.units.dimensions != dimensions.angle/dimensions.length:
+            raise RuntimeError("sky_scale %s not in correct dimensions of angle/length!" % sky_scale)
+        deltas = old_wcs.wcs.cdelt
+        units = [str(unit) for unit in old_wcs.wcs.cunit]
+        new_dx = (YTQuantity(-deltas[0], units[0])*scaleq).in_units("deg")
+        new_dy = (YTQuantity(deltas[1], units[1])*scaleq).in_units("deg")
+        new_wcs = pywcs.WCS(naxis=naxis)
+        cdelt = [new_dx.v, new_dy.v]
+        cunit = ["deg"]*2
+        if naxis == 3:
+            crval.append(old_wcs.wcs.crval[2])
+            cdelt.append(old_wcs.wcs.cdelt[2])
+            ctype.append(old_wcs.wcs.ctype[2])
+            cunit.append(old_wcs.wcs.cunit[2])
+        new_wcs.wcs.crpix = old_wcs.wcs.crpix
+        new_wcs.wcs.cdelt = cdelt
+        new_wcs.wcs.crval = crval
+        new_wcs.wcs.cunit = cunit
+        new_wcs.wcs.ctype = ctype
+        if crota is not None:
+            new_wcs.wcs.crota = crota
+        if cd is not None:
+            new_wcs.wcs.cd = cd
+        if pc is not None:
+            new_wcs.wcs.cd = pc
+        self.set_wcs(new_wcs)
 
 def sanitize_fits_unit(unit):
     if unit == "Mpc":
@@ -309,11 +418,9 @@
         unit = "AU"
     return unit
 
-def construct_image(data_source, center=None, width=None, image_res=None):
-    ds = data_source.ds
-    axis = data_source.axis
-    if center is None or width is None:
-        center = ds.domain_center[axis_wcs[axis]]
+axis_wcs = [[1,2],[0,2],[0,1]]
+
+def construct_image(ds, axis, data_source, center, width=None, image_res=None):
     if width is None:
         width = ds.domain_width[axis_wcs[axis]]
         unit = ds.get_smallest_appropriate_unit(width[0])
@@ -323,28 +430,28 @@
         width = ds.coordinates.sanitize_width(axis, width, None)
         unit = str(width[0].units)
     if image_res is None:
-        dd = ds.all_data()
-        dx, dy = [dd.quantities.extrema("d%s" % "xyz"[idx])[0]
-                  for idx in axis_wcs[axis]]
-        nx = int((width[0]/dx).in_units("dimensionless"))
-        ny = int((width[1]/dy).in_units("dimensionless"))
+        ddims = ds.domain_dimensions*2**ds.index.max_level
+        if iterable(axis):
+            nx = ddims.max()
+            ny = ddims.max()
+        else:
+            nx, ny = [ddims[idx] for idx in axis_wcs[axis]]
     else:
         if iterable(image_res):
             nx, ny = image_res
         else:
             nx, ny = image_res, image_res
-        dx, dy = width[0]/nx, width[1]/ny
+    dx, dy = width[0]/nx, width[1]/ny
     crpix = [0.5*(nx+1), 0.5*(ny+1)]
-    if hasattr(ds, "wcs"):
+    if hasattr(ds, "wcs") and not iterable(axis):
         # This is a FITS dataset, so we use it to construct the WCS
         cunit = [str(ds.wcs.wcs.cunit[idx]) for idx in axis_wcs[axis]]
         ctype = [ds.wcs.wcs.ctype[idx] for idx in axis_wcs[axis]]
         cdelt = [ds.wcs.wcs.cdelt[idx] for idx in axis_wcs[axis]]
         ctr_pix = center.in_units("code_length")[:ds.dimensionality].v
-        crval = ds.wcs.wcs_pix2world(ctr_pix.reshape(1,ds.dimensionality))[0]
+        crval = ds.wcs.wcs_pix2world(ctr_pix.reshape(1, ds.dimensionality))[0]
         crval = [crval[idx] for idx in axis_wcs[axis]]
     else:
-        # This is some other kind of dataset                                                                      
         if unit == "unitary":
             unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
         elif unit == "code_length":
@@ -353,8 +460,17 @@
         cunit = [unit]*2
         ctype = ["LINEAR"]*2
         cdelt = [dx.in_units(unit)]*2
-        crval = [center[idx].in_units(unit) for idx in axis_wcs[axis]]
-    frb = data_source.to_frb(width[0], (nx,ny), center=center, height=width[1])
+        if iterable(axis):
+            crval = center.in_units(unit)
+        else:
+            crval = [center[idx].in_units(unit) for idx in axis_wcs[axis]]
+    if hasattr(data_source, 'to_frb'):
+        if iterable(axis):
+            frb = data_source.to_frb(width[0], (nx, ny), height=width[1])
+        else:
+            frb = data_source.to_frb(width[0], (nx, ny), center=center, height=width[1])
+    else:
+        frb = None
     w = pywcs.WCS(naxis=2)
     w.wcs.crpix = crpix
     w.wcs.cdelt = cdelt
@@ -363,14 +479,42 @@
     w.wcs.ctype = ctype
     return w, frb
 
-class FITSSlice(FITSImageBuffer):
+def assert_same_wcs(wcs1, wcs2):
+    from numpy.testing import assert_allclose
+    assert wcs1.naxis == wcs2.naxis
+    for i in range(wcs1.naxis):
+        assert wcs1.wcs.cunit[i] == wcs2.wcs.cunit[i]
+        assert wcs1.wcs.ctype[i] == wcs2.wcs.ctype[i]
+    assert_allclose(wcs1.wcs.crpix, wcs2.wcs.crpix)
+    assert_allclose(wcs1.wcs.cdelt, wcs2.wcs.cdelt)
+    assert_allclose(wcs1.wcs.crval, wcs2.wcs.crval)
+    crota1 = getattr(wcs1.wcs, "crota", None)
+    crota2 = getattr(wcs2.wcs, "crota", None)
+    if crota1 is None or crota2 is None:
+        assert crota1 == crota2
+    else:
+        assert_allclose(wcs1.wcs.crota, wcs2.wcs.crota)
+    cd1 = getattr(wcs1.wcs, "cd", None)
+    cd2 = getattr(wcs2.wcs, "cd", None)
+    if cd1 is None or cd2 is None:
+        assert cd1 == cd2
+    else:
+        assert_allclose(wcs1.wcs.cd, wcs2.wcs.cd)
+    pc1 = getattr(wcs1.wcs, "pc", None)
+    pc2 = getattr(wcs2.wcs, "pc", None)
+    if pc1 is None or pc2 is None:
+        assert pc1 == pc2
+    else:
+        assert_allclose(wcs1.wcs.pc, wcs2.wcs.pc)
+
+class FITSSlice(FITSImageData):
     r"""
-    Generate a FITSImageBuffer of an on-axis slice.
+    Generate a FITSImageData of an on-axis slice.
 
     Parameters
     ----------
-    ds : FITSDataset
-        The FITS dataset object.
+    ds : :class:`yt.data_objects.api.Dataset`
+        The dataset object.
     axis : character or integer
         The axis of the slice. One of "x","y","z", or 0,1,2.
     fields : string or list of strings
@@ -414,20 +558,18 @@
         axis = fix_axis(axis, ds)
         center, dcenter = ds.coordinates.sanitize_center(center, axis)
         slc = ds.slice(axis, center[axis], **kwargs)
-        w, frb = construct_image(slc, center=dcenter, width=width,
-                                 image_res=image_res)
+        w, frb = construct_image(ds, axis, slc, dcenter, width=width, image_res=image_res)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
-        for i, field in enumerate(fields):
-            self[i].header["bunit"] = str(frb[field].units)
 
-class FITSProjection(FITSImageBuffer):
+
+class FITSProjection(FITSImageData):
     r"""
-    Generate a FITSImageBuffer of an on-axis projection.
+    Generate a FITSImageData of an on-axis projection.
 
     Parameters
     ----------
-    ds : FITSDataset
-        The FITS dataset object.
+    ds : :class:`yt.data_objects.api.Dataset`
+        The dataset object.
     axis : character or integer
         The axis along which to project. One of "x","y","z", or 0,1,2.
     fields : string or list of strings
@@ -474,8 +616,155 @@
         axis = fix_axis(axis, ds)
         center, dcenter = ds.coordinates.sanitize_center(center, axis)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
-        w, frb = construct_image(prj, center=dcenter, width=width,
-                                 image_res=image_res)
+        w, frb = construct_image(ds, axis, prj, dcenter, width=width, image_res=image_res)
         super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
-        for i, field in enumerate(fields):
-            self[i].header["bunit"] = str(frb[field].units)
+
+class FITSOffAxisSlice(FITSImageData):
+    r"""
+    Generate a FITSImageData of an off-axis slice.
+
+    Parameters
+    ----------
+    ds : :class:`yt.data_objects.api.Dataset`
+        The dataset object.
+    normal : a sequence of floats
+        The vector normal to the projection plane.
+    fields : string or list of strings
+        The fields to slice
+    center : A sequence of floats, a string, or a tuple.
+        The coordinate of the center of the image. If set to 'c', 'center' or
+        left blank, the plot is centered on the middle of the domain. If set to
+        'max' or 'm', the center will be located at the maximum of the
+        ('gas', 'density') field. Centering on the max or min of a specific
+        field is supported by providing a tuple such as ("min","temperature") or
+        ("max","dark_matter_density"). Units can be specified by passing in *center*
+        as a tuple containing a coordinate and string unit name or by passing
+        in a YTArray. If a list or unitless array is supplied, code units are
+        assumed.
+    width : tuple or a float.
+        Width can have four different formats to support windows with variable
+        x and y widths.  They are:
+
+        ==================================     =======================
+        format                                 example
+        ==================================     =======================
+        (float, string)                        (10,'kpc')
+        ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+        float                                  0.2
+        (float, float)                         (0.2, 0.3)
+        ==================================     =======================
+
+        For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+        wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+        window that is 10 kiloparsecs wide along the x axis and 15
+        kiloparsecs wide along the y axis.  In the other two examples, code
+        units are assumed, for example (0.2, 0.3) requests a plot that has an
+        x width of 0.2 and a y width of 0.3 in code units.  If units are
+        provided the resulting plot axis labels will use the supplied units.
+    image_res : an int or 2-tuple of ints
+        Specify the resolution of the resulting image.
+    north_vector : a sequence of floats
+        A vector defining the 'up' direction in the plot.  This
+        option sets the orientation of the slicing plane.  If not
+        set, an arbitrary grid-aligned north-vector is chosen.
+    """
+    def __init__(self, ds, normal, fields, center='c', width=None, image_res=512, 
+                 north_vector=None):
+        fields = ensure_list(fields)
+        center, dcenter = ds.coordinates.sanitize_center(center, 4)
+        cut = ds.cutting(normal, center, north_vector=north_vector)
+        center = ds.arr([0.0] * 2, 'code_length')
+        w, frb = construct_image(ds, normal, cut, center, width=width, image_res=image_res)
+        super(FITSOffAxisSlice, self).__init__(frb, fields=fields, wcs=w)
+
+
+class FITSOffAxisProjection(FITSImageData):
+    r"""
+    Generate a FITSImageData of an off-axis projection.
+
+    Parameters
+    ----------
+    ds : :class:`yt.data_objects.api.Dataset`
+        This is the dataset object corresponding to the
+        simulation output to be plotted.
+    normal : a sequence of floats
+        The vector normal to the projection plane.
+    fields : string, list of strings
+        The name of the field(s) to be plotted.
+    center : A sequence of floats, a string, or a tuple.
+         The coordinate of the center of the image. If set to 'c', 'center' or
+         left blank, the plot is centered on the middle of the domain. If set to
+         'max' or 'm', the center will be located at the maximum of the
+         ('gas', 'density') field. Centering on the max or min of a specific
+         field is supported by providing a tuple such as ("min","temperature") or
+         ("max","dark_matter_density"). Units can be specified by passing in *center*
+         as a tuple containing a coordinate and string unit name or by passing
+         in a YTArray. If a list or unitless array is supplied, code units are
+         assumed.
+    width : tuple or a float.
+         Width can have four different formats to support windows with variable
+         x and y widths.  They are:
+
+         ==================================     =======================
+         format                                 example
+         ==================================     =======================
+         (float, string)                        (10,'kpc')
+         ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+         float                                  0.2
+         (float, float)                         (0.2, 0.3)
+         ==================================     =======================
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
+    depth : A tuple or a float
+         A tuple containing the depth to project through and the string
+         key of the unit: (width, 'unit').  If set to a float, code units
+         are assumed
+    weight_field : string
+         The name of the weighting field.  Set to None for no weight.
+    image_res : an int or 2-tuple of ints
+        Specify the resolution of the resulting image. 
+    depth_res : an int 
+        Specify the resolution of the depth of the projection.
+    north_vector : a sequence of floats
+         A vector defining the 'up' direction in the plot.  This
+         option sets the orientation of the slicing plane.  If not
+         set, an arbitrary grid-aligned north-vector is chosen.
+    method : string
+         The method of projection.  Valid methods are:
+
+         "integrate" with no weight_field specified : integrate the requested
+         field along the line of sight.
+
+         "integrate" with a weight_field specified : weight the requested
+         field by the weighting field and integrate along the line of sight.
+
+         "sum" : This method is the same as integrate, except that it does not
+         multiply by a path length when performing the integration, and is
+         just a straight summation of the field along the given axis. WARNING:
+         This should only be used for uniform resolution grid datasets, as other
+         datasets may result in unphysical images.
+    """
+    def __init__(self, ds, normal, fields, center='c', width=(1.0, 'unitary'), 
+                 weight_field=None, image_res=512, depth_res=256, 
+                 north_vector=None, depth=(1.0,"unitary"), no_ghost=False, method='integrate'):
+        fields = ensure_list(fields)
+        center, dcenter = ds.coordinates.sanitize_center(center, 4)
+        buf = {}
+        width = ds.coordinates.sanitize_width(normal, width, depth)
+        wd = tuple(el.in_units('code_length').v for el in width)
+        if not iterable(image_res):
+            image_res = (image_res, image_res)
+        res = (image_res[0], image_res[1], depth_res)
+        for field in fields:
+            buf[field] = off_axis_projection(ds, center, normal, wd, res, field, 
+                                             no_ghost=no_ghost, north_vector=north_vector, 
+                                             method=method, weight=weight_field).swapaxes(0, 1)
+        center = ds.arr([0.0] * 2, 'code_length')
+        w, not_an_frb = construct_image(ds, normal, buf, center, width=width, image_res=image_res)
+        super(FITSOffAxisProjection, self).__init__(buf, fields=fields, wcs=w)

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/utilities/tests/test_fits_image.py
--- /dev/null
+++ b/yt/utilities/tests/test_fits_image.py
@@ -0,0 +1,128 @@
+"""
+Unit test FITS image creation in yt.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import tempfile
+import os
+import numpy as np
+import shutil
+from yt.testing import fake_random_ds
+from yt.convenience import load
+from numpy.testing import \
+    assert_equal
+from yt.utilities.fits_image import \
+    FITSImageData, FITSProjection, \
+    FITSSlice, FITSOffAxisSlice, \
+    FITSOffAxisProjection, \
+    assert_same_wcs
+from yt.visualization.volume_rendering.camera import \
+    off_axis_projection
+
+def test_fits_image():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    fields = ("density", "temperature")
+    units = ('g/cm**3', 'K',)
+    ds = fake_random_ds(64, fields=fields, units=units, nprocs=16,
+                        length_unit=100.0)
+
+    prj = ds.proj("density", 2)
+    prj_frb = prj.to_frb((0.5, "unitary"), 128)
+
+    fid1 = FITSImageData(prj_frb, fields=["density","temperature"], units="cm")
+    fits_prj = FITSProjection(ds, "z", ["density","temperature"], image_res=128,
+                              width=(0.5,"unitary"))
+
+    yield assert_equal, fid1["density"], fits_prj["density"]
+    yield assert_equal, fid1["temperature"], fits_prj["temperature"]
+
+    fid1.writeto("fid1.fits", clobber=True)
+    new_fid1 = FITSImageData.from_file("fid1.fits")
+
+    yield assert_equal, fid1["density"], new_fid1["density"]
+    yield assert_equal, fid1["temperature"], new_fid1["temperature"]
+
+    ds2 = load("fid1.fits")
+    ds2.index
+
+    assert ("fits","density") in ds2.field_list
+    assert ("fits","temperature") in ds2.field_list
+
+    dw_cm = ds2.domain_width.in_units("cm")
+
+    assert dw_cm[0].v == 50.
+    assert dw_cm[1].v == 50.
+
+    slc = ds.slice(2, 0.5)
+    slc_frb = slc.to_frb((0.5, "unitary"), 128)
+
+    fid2 = FITSImageData(slc_frb, fields=["density","temperature"], units="cm")
+    fits_slc = FITSSlice(ds, "z", ["density","temperature"], image_res=128,
+                         width=(0.5,"unitary"))
+
+    yield assert_equal, fid2["density"], fits_slc["density"]
+    yield assert_equal, fid2["temperature"], fits_slc["temperature"]
+
+    dens_img = fid2.pop("density")
+    temp_img = fid2.pop("temperature")
+
+    # This already has some assertions in it, so we don't need to do anything
+    # with it other can just make one
+    fid_comb = FITSImageData.from_images([dens_img, temp_img])
+
+    cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
+    cut_frb = cut.to_frb((0.5, "unitary"), 128)
+
+    fid3 = FITSImageData(cut_frb, fields=["density","temperature"], units="cm")
+    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"], 
+                                image_res=128, center=[0.5, 0.42, 0.6],
+                                width=(0.5,"unitary"))
+
+    yield assert_equal, fid3["density"], fits_cut["density"]
+    yield assert_equal, fid3["temperature"], fits_cut["temperature"]
+
+    fid3.create_sky_wcs([30.,45.], (1.0,"arcsec/kpc"))
+    fid3.writeto("fid3.fits", clobber=True)
+    new_fid3 = FITSImageData.from_file("fid3.fits")
+    assert_same_wcs(fid3.wcs, new_fid3.wcs)
+    assert new_fid3.wcs.wcs.cunit[0] == "deg"
+    assert new_fid3.wcs.wcs.cunit[1] == "deg"
+    assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
+    assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"
+
+    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9], 
+                              0.5, 128, "density").swapaxes(0, 1)
+    fid4 = FITSImageData(buf, fields="density", width=100.0)
+    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density", 
+                                     width=(0.5,"unitary"), image_res=128, 
+                                     depth_res=128, depth=(0.5,"unitary"))
+
+    yield assert_equal, fid4["density"], fits_oap["density"]
+
+    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25], 
+                           [32, 32, 32], fields=["density","temperature"])
+    fid5 = FITSImageData(cvg, fields=["density","temperature"])
+    assert fid5.dimensionality == 3
+
+    fid5.update_header("density", "time", 0.1)
+    fid5.update_header("all", "units", "cgs")
+    
+    assert fid5.get_header("density")["time"] == 0.1
+    assert fid5.get_header("temperature")["units"] == "cgs"
+    assert fid5.get_header("density")["units"] == "cgs"
+    
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -318,14 +318,14 @@
             requested.
         """
 
-        from yt.utilities.fits_image import FITSImageBuffer
+        from yt.utilities.fits_image import FITSImageData
 
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if fields is None: 
             fields = [field[-1] for field in self.data_source.field_data
                       if field not in extra_fields]
 
-        fib = FITSImageBuffer(self, fields=fields, units=units)
+        fib = FITSImageData(self, fields=fields, units=units)
         if other_keys is not None:
             for k,v in other_keys.items():
                 fib.update_all_headers(k,v)
@@ -410,7 +410,7 @@
 
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
-        mylog.info("Making a fixed resolutuion buffer of (%s) %d by %d" % \
+        mylog.info("Making a fixed resolution buffer of (%s) %d by %d" % \
             (item, self.buff_size[0], self.buff_size[1]))
         dd = self.data_source
         width = self.ds.arr((self.bounds[1] - self.bounds[0],

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -2252,7 +2252,6 @@
             def temp_weightfield(a, b):
                 tr = b[f].astype("float64") * b[w]
                 return b.apply_units(tr, a.units)
-                return tr
             return temp_weightfield
         ds.field_info.add_field(weightfield,
             function=_make_wf(field, weight))

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 1c956287e41941aab03b0e41da0f3494a922754a yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -33,14 +33,14 @@
         f.create_dataset("A", data=image[:,:,3])
         f.close()
     if fits:
-        from yt.utilities.fits_image import FITSImageBuffer
+        from yt.utilities.fits_image import FITSImageData
         data = {}
         data["r"] = image[:,:,0]
         data["g"] = image[:,:,1]
         data["b"] = image[:,:,2]
         data["a"] = image[:,:,3]
         nx, ny = data["r"].shape
-        fib = FITSImageBuffer(data)
+        fib = FITSImageData(data)
         fib.writeto('%s.fits'%fn,clobber=True)
 
 def import_rgba(name, h5=True):


https://bitbucket.org/yt_analysis/yt/commits/bcbac756c303/
Changeset:   bcbac756c303
Branch:      yt
User:        jzuhone
Date:        2015-06-18 13:32:04+00:00
Summary:     Change the signature of internal method prepare_spectrum to take a redshift parameter--this gets around some odd XSPEC behavior for preparing spectral models
Affected #:  1 file

diff -r 1c956287e41941aab03b0e41da0f3494a922754a -r bcbac756c30377f8134581e8208aac96e649059b yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -32,17 +32,17 @@
         self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         pass
-    
+
     def get_spectrum(self):
         pass
-                                                        
+
 class XSpecThermalModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -54,19 +54,24 @@
         The maximum energy for the spectral model.
     nchan : integer
         The number of channels in the spectral model.
+    settings : dictionary, optional
+        A dictionary of key, value pairs (must both be strings)
+        that can be used to set various 
 
     Examples
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
+    def __init__(self, model_name, emin, emax, nchan, 
+                 thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        self.settings = settings
+        super(XSpecThermalModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self, zobs):
         """
-        Prepare the thermal model for execution.
+        Prepare the thermal model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -79,9 +84,12 @@
         else:
             self.norm = 1.0e-14
         self.thermal_comp.norm = 1.0
-        self.thermal_comp.Redshift = 0.0
+        self.thermal_comp.Redshift = zobs
         if self.thermal_broad:
             xspec.Xset.addModelString("APECTHERMAL","yes")
+        if self.settings is not None:
+            for k,v in self.settings.items():
+                xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self, kT):
         """
@@ -89,18 +97,20 @@
         """
         self.thermal_comp.kT = kT
         self.thermal_comp.Abundanc = 0.0
-        cosmic_spec = self.norm*np.array(self.model.values(0))
+        cosmic_spec = np.array(self.model.values(0))
         if self.model_name == "bremss":
             metal_spec = np.zeros(self.nchan)
         else:
             self.thermal_comp.Abundanc = 1.0
-            metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
+            metal_spec = np.array(self.model.values(0)) - cosmic_spec
+        cosmic_spec *= self.norm
+        metal_spec *= self.norm
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
-        
+
 class XSpecAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -119,14 +129,16 @@
     --------
     >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
     """
-    def __init__(self, model_name, nH, emin=0.01, emax=50.0, nchan=100000):
+    def __init__(self, model_name, nH, emin=0.01, emax=50.0, 
+                 nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        self.settings = settings
+        super(XSpecAbsorbModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self):
         """
-        Prepare the absorption model for execution.
+        Prepare the absorption model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -135,6 +147,9 @@
         self.model = xspec.Model(self.model_name+"*powerlaw")
         self.model.powerlaw.norm = self.nchan/(self.emax.value-self.emin.value)
         self.model.powerlaw.PhoIndex = 0.0
+        if self.settings is not None:
+            for k,v in self.settings.items():
+                xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self):
         """
@@ -150,7 +165,7 @@
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
     CfA (afoster at cfa.harvard.edu). 
-    
+
     Parameters
     ----------
 
@@ -183,7 +198,7 @@
                                      self.apec_prefix+"_coco.fits")
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableApecModel, self).__init__(emin, emax, nchan)  
         self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
@@ -196,8 +211,8 @@
                            32.0650,35.4530,39.9480,39.0983,40.0780,
                            44.9559,47.8670,50.9415,51.9961,54.9380,
                            55.8450,58.9332,58.6934,63.5460,65.3800])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self, zobs):
         """
         Prepare the thermal model for execution.
         """
@@ -216,17 +231,18 @@
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
         self.maxlam = self.wvbins.max()
-    
+        self.scale_factor = 1.0/(1.+zobs)
+
     def _make_spectrum(self, element, tindex):
-        
+
         tmpspec = np.zeros(self.nchan)
-        
+
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]*self.scale_factor
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.d
         if self.thermal_broad:
@@ -246,19 +262,19 @@
             return tmpspec
         else:
             ind = ind[0]
-                                                    
+
         n_cont = self.coco_handle[tindex].data.field('N_Cont')[ind]
-        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
+        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]*self.scale_factor
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
         tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
-        
+
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
-        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
+        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]*self.scale_factor
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
-        
+
         tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
-        
+
         return tmpspec
 
     def get_spectrum(self, kT):
@@ -288,7 +304,7 @@
 class TableAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from a table stored in an HDF5 file.
-    
+
     Parameters
     ----------
 
@@ -299,7 +315,7 @@
 
     Examples
     --------
-    >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
+    >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
 
     def __init__(self, filename, nH):
@@ -312,15 +328,15 @@
         self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableAbsorbModel, self).__init__(emin, emax, nchan)
         self.nH = YTQuantity(nH*1.0e22, "cm**-2")
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         """
         Prepare the absorption model for execution.
         """
         pass
-        
+
     def get_spectrum(self):
         """
         Get the absorption spectrum.


https://bitbucket.org/yt_analysis/yt/commits/34fee6f93b39/
Changeset:   34fee6f93b39
Branch:      yt
User:        jzuhone
Date:        2015-06-18 13:37:02+00:00
Summary:     1) Fix a bug in photon_simulator where in a corner case the angular diameter distance units were not set correctly.
2) Allow the "normal" parameter to be "x", "y", or "z" to project along an axis.
3) Bin counts into a channel spectrum, even if no response was provided.
4) General code cleanup.
Affected #:  1 file

diff -r bcbac756c30377f8134581e8208aac96e649059b -r 34fee6f93b398f0a901e4cdf9a3f464cee444cdd yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -25,6 +25,7 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
@@ -34,6 +35,10 @@
 
 comm = communication_system.communicators[-1]
 
+axes_lookup = {"x":("y","z"),
+               "y":("z","x"),
+               "z":("x","y")}
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -50,10 +55,10 @@
         self.cosmo = cosmo
         self.p_bins = p_bins
         self.num_cells = len(photons["x"])
-        
+
     def keys(self):
         return self.photons.keys()
-    
+
     def items(self):
         ret = []
         for k, v in self.photons.items():
@@ -62,7 +67,7 @@
             else:
                 ret.append((k,v))
         return ret
-    
+
     def values(self):
         ret = []
         for k, v in self.photons.items():
@@ -71,7 +76,7 @@
             else:
                 ret.append(v)
         return ret
-                                
+
     def __getitem__(self, key):
         if key == "Energy":
             return [self.photons["Energy"][self.p_bins[i]:self.p_bins[i+1]]
@@ -127,9 +132,9 @@
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-        
+
         photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
-        
+
         f.close()
 
         cosmo = Cosmology(hubble_constant=parameters["HubbleConstant"],
@@ -137,7 +142,7 @@
                           omega_lambda=parameters["OmegaLambda"])
 
         return cls(photons, parameters, cosmo, p_bins)
-    
+
     @classmethod
     def from_scratch(cls, data_source, redshift, area,
                      exp_time, photon_model, parameters=None,
@@ -277,9 +282,9 @@
             D_A = parse_value(dist, "Mpc")
             redshift = 0.0
 
-        if center == "c":
+        if center == "center" or center == "c":
             parameters["center"] = ds.domain_center
-        elif center == "max":
+        elif center == "max" or center == "m":
             parameters["center"] = ds.find_max("density")[-1]
         elif iterable(center):
             if isinstance(center, YTArray):
@@ -288,7 +293,7 @@
                 parameters["center"] = ds.arr(center, "code_length")
         elif center is None:
             parameters["center"] = data_source.get_field_parameter("center")
-            
+
         parameters["FiducialExposureTime"] = parse_value(exp_time, "s")
         parameters["FiducialArea"] = parse_value(area, "cm**2")
         parameters["FiducialRedshift"] = redshift
@@ -308,32 +313,32 @@
             dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
-                
+
         photons = photon_model(data_source, parameters)
 
         mylog.info("Finished generating photons.")
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-                        
+
         return cls(photons, parameters, cosmo, p_bins)
-        
+
     def write_h5_file(self, photonfile):
         """
         Write the photons to the HDF5 file *photonfile*.
         """
 
         if parallel_capable:
-            
+
             mpi_long = get_mpi_type("int64")
             mpi_double = get_mpi_type("float64")
-        
+
             local_num_cells = len(self.photons["x"])
             sizes_c = comm.comm.gather(local_num_cells, root=0)
-            
-            local_num_photons = self.photons["NumberOfPhotons"].sum()
+
+            local_num_photons = np.sum(self.photons["NumberOfPhotons"])
             sizes_p = comm.comm.gather(local_num_photons, root=0)
-            
+
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
                 num_photons = sum(sizes_p)        
@@ -362,24 +367,24 @@
                 dx = np.empty([])
                 n_ph = np.empty([])
                 e = np.empty([])
-                                                
-            comm.comm.Gatherv([self.photons["x"].ndarray_view(), local_num_cells, mpi_double],
+
+            comm.comm.Gatherv([self.photons["x"].d, local_num_cells, mpi_double],
                               [x, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["y"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["y"].d, local_num_cells, mpi_double],
                               [y, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["z"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["z"].d, local_num_cells, mpi_double],
                               [z, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vx"].d, local_num_cells, mpi_double],
                               [vx, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vy"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vy"].d, local_num_cells, mpi_double],
                               [vy, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vz"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vz"].d, local_num_cells, mpi_double],
                               [vz, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["dx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["dx"].d, local_num_cells, mpi_double],
                               [dx, (sizes_c, disps_c), mpi_double], root=0)
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
-            comm.comm.Gatherv([self.photons["Energy"].ndarray_view(), local_num_photons, mpi_double],
+            comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
                               [e, (sizes_p, disps_p), mpi_double], root=0) 
 
         else:
@@ -393,13 +398,13 @@
             dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
             e = self.photons["Energy"].d
-                                                
+
         if comm.rank == 0:
-            
+
             f = h5py.File(photonfile, "w")
 
             # Scalars
-       
+
             f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
             f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
             f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
@@ -409,7 +414,7 @@
             f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
             f.create_dataset("dimension", data=self.parameters["Dimension"])
             f.create_dataset("width", data=float(self.parameters["Width"]))
-                        
+
             # Arrays
 
             f.create_dataset("x", data=x)
@@ -426,19 +431,22 @@
 
         comm.barrier()
 
-    def project_photons(self, L, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False, no_shifting=False):
+                        convolve_energies=False, no_shifting=False,
+                        north_vector=None):
         r"""
         Projects photons onto an image plane given a line of sight.
 
         Parameters
         ----------
 
-        L : array_like
-            Normal vector to the plane of projection.
+        normal : character or array_like
+            Normal vector to the plane of projection. If "x", "y", or "z", will
+            assume to be along that axis (and will probably be faster). Otherwise, 
+            should be an off-axis normal vector, e.g [1.0,2.0,-3.0]
         area_new : float, optional
             New value for the effective area of the detector. If *responses*
             are specified the value of this keyword is ignored.
@@ -463,7 +471,11 @@
             If this is set, the photon energies will be convolved with the RMF.
         no_shifting : boolean, optional
             If set, the photon energies will not be Doppler shifted.
-            
+        north_vector : a sequence of floats
+            A vector defining the 'up' direction. This option sets the orientation of 
+            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
+            is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
+            "z") is explicitly specified.
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -475,7 +487,7 @@
         if redshift_new is not None and dist_new is not None:
             mylog.error("You may specify a new redshift or distance, "+
                         "but not both!")
-        
+
         if sky_center is None:
             sky_center = YTArray([30.,45.], "degree")
         else:
@@ -486,37 +498,36 @@
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
 
-        L = np.array(L)
-        L /= np.sqrt(np.dot(L, L))
-        vecs = np.identity(3)
-        t = np.cross(L, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(L, vecs[ax,:]).ravel()
-        orient = Orientation(L, north_vector=north)
-        
-        x_hat = orient.unit_vectors[0]
-        y_hat = orient.unit_vectors[1]
-        z_hat = orient.unit_vectors[2]
+        if not isinstance(normal, string_types):
+            L = np.array(normal)
+            orient = Orientation(L, north_vector=north_vector)
+            x_hat = orient.unit_vectors[0]
+            y_hat = orient.unit_vectors[1]
+            z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
         n_ph_tot = n_ph.sum()
-        
+
         eff_area = None
 
         parameters = {}
-        
+
         if responses is not None:
             responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
             if len(responses) == 2:
                 parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
-            
+
+        zobs0 = self.parameters["FiducialRedshift"]
+        D_A0 = self.parameters["FiducialAngularDiameterDistance"]
+        scale_factor = 1.0
+
         if (exp_time_new is None and area_new is None and
             redshift_new is None and dist_new is None):
             my_n_obs = n_ph_tot
-            zobs = self.parameters["FiducialRedshift"]
-            D_A = self.parameters["FiducialAngularDiameterDistance"]
+            zobs = zobs0
+            D_A = D_A0
         else:
             if exp_time_new is None:
                 Tratio = 1.
@@ -545,8 +556,8 @@
                 Aratio = parse_value(area_new, "cm**2")/self.parameters["FiducialArea"]
             if redshift_new is None and dist_new is None:
                 Dratio = 1.
-                zobs = self.parameters["FiducialRedshift"]
-                D_A = self.parameters["FiducialAngularDiameterDistance"]
+                zobs = zobs0
+                D_A = D_A0
             else:
                 if redshift_new is None:
                     zobs = 0.0
@@ -554,28 +565,17 @@
                 else:
                     zobs = redshift_new
                     D_A = self.cosmo.angular_diameter_distance(0.0,zobs).in_units("Mpc")
-                fid_D_A = self.parameters["FiducialAngularDiameterDistance"]
-                Dratio = fid_D_A*fid_D_A*(1.+self.parameters["FiducialRedshift"]**3) / \
+                    scale_factor = (1.+zobs0)/(1.+zobs)
+                Dratio = D_A0*D_A0*(1.+zobs0)**3 / \
                          (D_A*D_A*(1.+zobs)**3)
             fak = Aratio*Tratio*Dratio
             if fak > 1:
-                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % (fak))
+                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
             my_n_obs = np.uint64(n_ph_tot*fak)
 
         n_obs_all = comm.mpi_allreduce(my_n_obs)
         if comm.rank == 0:
             mylog.info("Total number of photons to use: %d" % (n_obs_all))
-        
-        x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-
-        if not no_shifting:
-            vz = self.photons["vx"]*z_hat[0] + \
-                 self.photons["vy"]*z_hat[1] + \
-                 self.photons["vz"]*z_hat[2]
-            shift = -vz.in_cgs()/clight
-            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -584,32 +584,57 @@
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
-        x *= delta
-        y *= delta
-        z *= delta
-        x += self.photons["x"][obs_cells]
-        y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]
+        if isinstance(normal, string_types):
+
+            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky *= delta
+            ysky *= delta
+            xsky += self.photons[axes_lookup[normal][0]][obs_cells]
+            ysky += self.photons[axes_lookup[normal][1]][obs_cells]
+
+            if not no_shifting:
+                vz = self.photons["v%s" % normal]
+
+        else:
+            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+
+            if not no_shifting:
+                vz = self.photons["vx"]*z_hat[0] + \
+                     self.photons["vy"]*z_hat[1] + \
+                     self.photons["vz"]*z_hat[2]
+
+            x *= delta
+            y *= delta
+            z *= delta
+            x += self.photons["x"][obs_cells]
+            y += self.photons["y"][obs_cells]
+            z += self.photons["z"][obs_cells]
+
+            xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
+            ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
+
         if no_shifting:
             eobs = self.photons["Energy"][idxs]
         else:
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
             eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        eobs *= scale_factor
 
-        xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
-        ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
-        eobs /= (1.+zobs)
-        
         if absorb_model is None:
             not_abs = np.ones(eobs.shape, dtype='bool')
         else:
             mylog.info("Absorbing.")
-            absorb_model.prepare()
+            absorb_model.prepare_spectrum()
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
             randvec = aspec.max()*np.random.random(eobs.shape)
             not_abs = randvec < absorb
-        
+
         if eff_area is None:
             detected = np.ones(eobs.shape, dtype='bool')
         else:
@@ -618,14 +643,14 @@
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
             randvec = eff_area.max()*np.random.random(eobs.shape)
             detected = randvec < earea
-        
+
         detected = np.logical_and(not_abs, detected)
-                    
+
         events = {}
 
         dx_min = self.parameters["Width"]/self.parameters["Dimension"]
         dtheta = YTQuantity(np.rad2deg(dx_min/D_A), "degree")
-        
+
         events["xpix"] = xsky[detected]/dx_min.v + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min.v + 0.5*(nx+1)
         events["eobs"] = eobs[detected]
@@ -635,15 +660,15 @@
         if psf_sigma is not None:
             events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
             events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
-        
+
         num_events = len(events["xpix"])
-            
+
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
-                
+
         if exp_time_new is None:
             parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
         else:
@@ -657,7 +682,7 @@
         parameters["sky_center"] = sky_center
         parameters["pix_center"] = np.array([0.5*(nx+1)]*2)
         parameters["dtheta"] = dtheta
-        
+
         return EventList(events, parameters)
 
     def _normalize_arf(self, respfile):
@@ -688,7 +713,7 @@
 
         eidxs = np.argsort(events["eobs"])
 
-        phEE = events["eobs"][eidxs].ndarray_view()
+        phEE = events["eobs"][eidxs].d
         phXX = events["xpix"][eidxs]
         phYY = events["ypix"][eidxs]
 
@@ -758,7 +783,7 @@
         self.num_events = events["xpix"].shape[0]
         self.wcs = _astropy.pywcs.WCS(naxis=2)
         self.wcs.wcs.crpix = parameters["pix_center"]
-        self.wcs.wcs.crval = parameters["sky_center"].ndarray_view()
+        self.wcs.wcs.crval = parameters["sky_center"].d
         self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2
@@ -785,8 +810,9 @@
         return self.events.__repr__()
 
     def __add__(self, other):
-        keys1 = self.parameters.keys()
-        keys2 = other.parameters.keys()
+        assert_same_wcs(self.wcs, other.wcs)
+        keys1 = list(self.parameters.keys())
+        keys2 = list(other.parameters.keys())
         keys1.sort()
         keys2.sort()
         if keys1 != keys2:
@@ -809,9 +835,9 @@
         return EventList(events, self.parameters)
 
     def filter_events(self, region):
-        """                                                                                                                                 
-        Filter events using a ds9 region. Requires the pyregion package.                                                                    
-        Returns a new EventList.                                                                                                            
+        """
+        Filter events using a ds9 region. Requires the pyregion package.
+        Returns a new EventList.
         """
         import pyregion
         import os
@@ -836,7 +862,7 @@
         """
         events = {}
         parameters = {}
-        
+
         f = h5py.File(h5file, "r")
 
         parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
@@ -923,7 +949,7 @@
         cols = []
 
         col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
-                             array=self.events["eobs"].in_units("eV").ndarray_view())
+                             array=self.events["eobs"].in_units("eV").d)
         col2 = pyfits.Column(name='X', format='D', unit='pixel',
                              array=self.events["xpix"])
         col3 = pyfits.Column(name='Y', format='D', unit='pixel',
@@ -942,7 +968,7 @@
              cols.append(col4)
 
         coldefs = pyfits.ColDefs(cols)
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
 
         tbhdu.header["MTYPE1"] = "sky"
@@ -1006,29 +1032,29 @@
         """
         pyfits = _astropy.pyfits
         if isinstance(self.parameters["Area"], string_types):
-             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
-             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
+             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with responses.")
+             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
 
         if emin is None:
-            emin = self.events["eobs"].min().value*0.95
+            emin = self.events["eobs"].min().value
         if emax is None:
-            emax = self.events["eobs"].max().value*1.05
+            emax = self.events["eobs"].max().value
 
-        idxs = np.logical_and(self.events["eobs"].ndarray_view() >= emin,
-                              self.events["eobs"].ndarray_view() <= emax)
+        idxs = np.logical_and(self.events["eobs"].d >= emin,
+                              self.events["eobs"].d <= emax)
         flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
                self.parameters["ExposureTime"]/self.parameters["Area"]
 
         col1 = pyfits.Column(name='ENERGY', format='E',
-                             array=self["eobs"].ndarray_view())
+                             array=self["eobs"].d)
         col2 = pyfits.Column(name='DEC', format='D',
-                             array=self["ysky"].ndarray_view())
+                             array=self["ysky"].d)
         col3 = pyfits.Column(name='RA', format='D',
-                             array=self["xsky"].ndarray_view())
+                             array=self["xsky"].d)
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("PHLIST")
 
         tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
@@ -1056,7 +1082,7 @@
 
         coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
 
-        wrhdu = pyfits.new_table(coldefs)
+        wrhdu = pyfits.BinTableHDU.from_columns(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
 
         wrhdu.header["HDUCLASS"] = "HEASARC"
@@ -1104,14 +1130,14 @@
 
         f.create_dataset("/xpix", data=self.events["xpix"])
         f.create_dataset("/ypix", data=self.events["ypix"])
-        f.create_dataset("/xsky", data=self.events["xsky"].ndarray_view())
-        f.create_dataset("/ysky", data=self.events["ysky"].ndarray_view())
-        f.create_dataset("/eobs", data=self.events["eobs"].ndarray_view())
+        f.create_dataset("/xsky", data=self.events["xsky"].d)
+        f.create_dataset("/ysky", data=self.events["ysky"].d)
+        f.create_dataset("/eobs", data=self.events["eobs"].d)
         if "PI" in self.events:
             f.create_dataset("/pi", data=self.events["PI"])
         if "PHA" in self.events:
-            f.create_dataset("/pha", data=self.events["PHA"])
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].ndarray_view())
+            f.create_dataset("/pha", data=self.events["PHA"])                  
+        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
         f.create_dataset("/pix_center", data=self.parameters["pix_center"])
         f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
 
@@ -1138,11 +1164,11 @@
         if emin is None:
             mask_emin = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emin = self.events["eobs"].ndarray_view() > emin
+            mask_emin = self.events["eobs"].d > emin
         if emax is None:
             mask_emax = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emax = self.events["eobs"].ndarray_view() < emax
+            mask_emax = self.events["eobs"].d < emax
 
         mask = np.logical_and(mask_emin, mask_emax)
 
@@ -1175,38 +1201,30 @@
         hdu.writeto(imagefile, clobber=clobber)
 
     @parallel_root_only
-    def write_spectrum(self, specfile, energy_bins=False, emin=0.1,
+    def write_spectrum(self, specfile, bin_type="energy", emin=0.1,
                        emax=10.0, nchan=2000, clobber=False):
         r"""
         Bin event energies into a spectrum and write it to a FITS binary table. Can bin
-        on energy or channel, the latter only if the photons were convolved with an
-        RMF. In that case, the spectral binning will be determined by the RMF binning.
+        on energy or channel. In that case, the spectral binning will be determined by 
+        the RMF binning.
 
         Parameters
         ----------
 
         specfile : string
             The name of the FITS file to be written.
-        energy_bins : boolean, optional
-            Bin on energy or channel. 
+        bin_type : string, optional
+            Bin on "energy" or "channel". If an RMF is detected, channel information will be
+            imported from it. 
         emin : float, optional
-            The minimum energy of the spectral bins in keV. Only used if binning on energy.
+            The minimum energy of the spectral bins in keV. Only used if binning without an RMF.
         emax : float, optional
-            The maximum energy of the spectral bins in keV. Only used if binning on energy.
+            The maximum energy of the spectral bins in keV. Only used if binning without an RMF.
         nchan : integer, optional
-            The number of channels. Only used if binning on energy.
+            The number of channels. Only used if binning without an RMF.
         """
         pyfits = _astropy.pyfits
-        if energy_bins:
-            spectype = "energy"
-            espec = self.events["eobs"].d
-            range = (emin, emax)
-            spec, ee = np.histogram(espec, bins=nchan, range=range)
-            bins = 0.5*(ee[1:]+ee[:-1])
-        else:
-            if "ChannelType" not in self.parameters:
-                mylog.error("These events were not convolved with an RMF. Set energy_bins=True.")
-                raise KeyError
+        if bin_type == "channel" and "ChannelType" in self.parameters:
             spectype = self.parameters["ChannelType"]
             espec = self.events[spectype]
             f = pyfits.open(self.parameters["RMF"])
@@ -1228,7 +1246,17 @@
             if cmin == 1: minlength += 1
             spec = np.bincount(self.events[spectype],minlength=minlength)
             if cmin == 1: spec = spec[1:]
-            bins = np.arange(nchan).astype("int32")+cmin
+            bins = (np.arange(nchan)+cmin).astype("int32")
+        else:    
+            espec = self.events["eobs"].d
+            erange = (emin, emax)
+            spec, ee = np.histogram(espec, bins=nchan, range=erange)
+            if bin_type == "energy":
+                bins = 0.5*(ee[1:]+ee[:-1])
+                spectype = "energy"
+            else:
+                bins = (np.arange(nchan)+1).astype("int32")
+                spectype = "pi"
 
         col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins)
         col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64"))
@@ -1240,46 +1268,45 @@
         tbhdu = pyfits.new_table(coldefs)
         tbhdu.update_ext_name("SPECTRUM")
 
-        if not energy_bins:
-            tbhdu.header["DETCHANS"] = spec.shape[0]
-            tbhdu.header["TOTCTS"] = spec.sum()
-            tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["CONTENT"] = spectype
-            tbhdu.header["HDUCLASS"] = "OGIP"
-            tbhdu.header["HDUCLAS1"] = "SPECTRUM"
-            tbhdu.header["HDUCLAS2"] = "TOTAL"
-            tbhdu.header["HDUCLAS3"] = "TYPE:I"
-            tbhdu.header["HDUCLAS4"] = "COUNT"
-            tbhdu.header["HDUVERS"] = "1.1.0"
-            tbhdu.header["HDUVERS1"] = "1.1.0"
-            tbhdu.header["CHANTYPE"] = spectype
-            tbhdu.header["BACKFILE"] = "none"
-            tbhdu.header["CORRFILE"] = "none"
-            tbhdu.header["POISSERR"] = True
-            if "RMF" in self.parameters:
-                 tbhdu.header["RESPFILE"] = self.parameters["RMF"]
-            else:
-                 tbhdu.header["RESPFILE"] = "none"
-            if "ARF" in self.parameters:
-                tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
-            else:        
-                tbhdu.header["ANCRFILE"] = "none"
-            if "Mission" in self.parameters:
-                tbhdu.header["MISSION"] = self.parameters["Mission"]
-            else:
-                tbhdu.header["MISSION"] = "none"
-            if "Telescope" in self.parameters:
-                tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
-            else:
-                tbhdu.header["TELESCOP"] = "none"
-            if "Instrument" in self.parameters:
-                tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
-            else:
-                tbhdu.header["INSTRUME"] = "none"
-            tbhdu.header["AREASCAL"] = 1.0
-            tbhdu.header["CORRSCAL"] = 0.0
-            tbhdu.header["BACKSCAL"] = 1.0
+        tbhdu.header["DETCHANS"] = spec.shape[0]
+        tbhdu.header["TOTCTS"] = spec.sum()
+        tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["CONTENT"] = spectype
+        tbhdu.header["HDUCLASS"] = "OGIP"
+        tbhdu.header["HDUCLAS1"] = "SPECTRUM"
+        tbhdu.header["HDUCLAS2"] = "TOTAL"
+        tbhdu.header["HDUCLAS3"] = "TYPE:I"
+        tbhdu.header["HDUCLAS4"] = "COUNT"
+        tbhdu.header["HDUVERS"] = "1.1.0"
+        tbhdu.header["HDUVERS1"] = "1.1.0"
+        tbhdu.header["CHANTYPE"] = spectype
+        tbhdu.header["BACKFILE"] = "none"
+        tbhdu.header["CORRFILE"] = "none"
+        tbhdu.header["POISSERR"] = True
+        if "RMF" in self.parameters:
+            tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+        else:
+            tbhdu.header["RESPFILE"] = "none"
+        if "ARF" in self.parameters:
+            tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
+        else:        
+            tbhdu.header["ANCRFILE"] = "none"
+        if "Mission" in self.parameters:
+            tbhdu.header["MISSION"] = self.parameters["Mission"]
+        else:
+            tbhdu.header["MISSION"] = "none"
+        if "Telescope" in self.parameters:
+            tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
+        else:
+            tbhdu.header["TELESCOP"] = "none"
+        if "Instrument" in self.parameters:
+            tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
+        else:
+            tbhdu.header["INSTRUME"] = "none"
+        tbhdu.header["AREASCAL"] = 1.0
+        tbhdu.header["CORRSCAL"] = 0.0
+        tbhdu.header["BACKSCAL"] = 1.0
 
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 


https://bitbucket.org/yt_analysis/yt/commits/5a7876356148/
Changeset:   5a7876356148
Branch:      yt
User:        jzuhone
Date:        2015-06-18 13:39:35+00:00
Summary:     1) Some pretty substantial speedups and simplifying of code for the ThermalPhotonModel.
2) Add a method parameter which allows one to choose between different orders of interpolation for generating photon energies.
Affected #:  1 file

diff -r 34fee6f93b398f0a901e4cdf9a3f464cee444cdd -r 5a78763561483541a32fbe10a5e811f03922c00c yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -25,7 +25,7 @@
 from yt.extern.six import string_types
 import numpy as np
 from yt.funcs import *
-from yt.utilities.physical_constants import mp, kboltz
+from yt.utilities.physical_constants import mp
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_objects
 from yt.units.yt_array import uconcatenate
@@ -34,6 +34,12 @@
 kT_min = 8.08e-2
 kT_max = 50.
 
+photon_units = {"Energy":"keV",
+                "dx":"kpc"}
+for ax in "xyz":
+    photon_units[ax] = "kpc"
+    photon_units["v"+ax] = "km/s"
+
 class PhotonModel(object):
 
     def __init__(self):
@@ -46,7 +52,7 @@
 class ThermalPhotonModel(PhotonModel):
     r"""
     Initialize a ThermalPhotonModel from a thermal spectrum. 
-    
+
     Parameters
     ----------
 
@@ -61,30 +67,37 @@
     photons_per_chunk : integer
         The maximum number of photons that are allocated per chunk. Increase or decrease
         as needed.
+    method : integer, optional
+        The method used to generate the photon energies from the spectrum:
+        1: Invert the cumulative distribution function of the spectrum.
+        2: Acceptance-rejection method using the spectrum. 
+        Method 1 should be sufficient for most cases. 
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
+                 photons_per_chunk=10000000, method=1):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
+        self.method = method
 
     def __call__(self, data_source, parameters):
-        
+
         ds = data_source.ds
 
         exp_time = parameters["FiducialExposureTime"]
         area = parameters["FiducialArea"]
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
-        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
+        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2)
         src_ctr = parameters["center"]
 
-        vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
-
         my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        self.spectral_model.prepare()
-        energy = self.spectral_model.ebins
+        self.spectral_model.prepare_spectrum(redshift)
+        emid = self.spectral_model.emid
+        ebins = self.spectral_model.ebins
+        nchan = len(emid)
 
         citer = data_source.chunks([], "io")
 
@@ -99,7 +112,13 @@
         photons["Energy"] = []
         photons["NumberOfPhotons"] = []
 
-        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+        spectral_norm = area.v*exp_time.v*dist_fac
+
+        tot_num_cells = data_source.ires.shape[0]
+
+        pbar = get_pbar("Generating photons ", tot_num_cells)
+
+        cell_counter = 0
 
         for chunk in parallel_objects(citer):
 
@@ -114,7 +133,7 @@
             if isinstance(self.Zmet, string_types):
                 metalZ = chunk[self.Zmet].v
             else:
-                metalZ = self.Zmet
+                metalZ = self.Zmet*np.ones(num_cells)
 
             idxs = np.argsort(kT)
 
@@ -133,7 +152,7 @@
                 n += bcount
             kT_idxs = np.unique(kT_idxs)
 
-            cell_em = EM[idxs]*vol_scale
+            cell_em = EM[idxs]*spectral_norm
 
             number_of_photons = np.zeros(num_cells, dtype="uint64")
             energies = np.zeros(self.photons_per_chunk)
@@ -141,8 +160,6 @@
             start_e = 0
             end_e = 0
 
-            pbar = get_pbar("Generating photons for chunk ", num_cells)
-
             for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
                 kT = kT_bins[ikT] + 0.5*dkT
@@ -151,58 +168,57 @@
 
                 cem = cell_em[ibegin:iend]
 
-                em_sum_c = cem.sum()
-                if isinstance(self.Zmet, string_types):
-                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
-                else:
-                    em_sum_m = metalZ*em_sum_c
-
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
 
-                cumspec_c = np.cumsum(cspec.d)
-                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
-                cumspec_c /= cumspec_c[-1]
-                cumspec_c = np.insert(cumspec_c, 0, 0.0)
-
-                cumspec_m = np.cumsum(mspec.d)
-                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
-                cumspec_m /= cumspec_m[-1]
-                cumspec_m = np.insert(cumspec_m, 0, 0.0)
+                tot_ph_c = cspec.d.sum()
+                tot_ph_m = mspec.d.sum()
 
                 u = np.random.random(size=n_current)
 
-                cell_norm_c = tot_ph_c*cem/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
-            
-                if isinstance(self.Zmet, string_types):
-                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
-                else:
-                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
-            
-                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+                cell_norm_c = tot_ph_c*cem
+                cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
+                cell_norm = np.modf(cell_norm_c + cell_norm_m)
+                cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)
 
-                end_e += int((cell_n_c+cell_n_m).sum())
+                number_of_photons[ibegin:iend] = cell_n
+
+                end_e += int(cell_n.sum())
 
                 if end_e > self.photons_per_chunk:
                     raise RuntimeError("Number of photons generated for this chunk "+
                                        "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
                                        "Increase photons_per_chunk!")
 
-                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
-            
+                if self.method == 1:
+                    cumspec_c = np.cumsum(cspec.d)
+                    cumspec_m = np.cumsum(mspec.d)
+                    cumspec_c = np.insert(cumspec_c, 0, 0.0)
+                    cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+                ei = start_e
+                for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
+                    if cn > 0:
+                        if self.method == 1:
+                            cumspec = cumspec_c + Z*cumspec_m
+                            cumspec /= cumspec[-1]
+                            randvec = np.random.uniform(size=cn)
+                            randvec.sort()
+                            cell_e = np.interp(randvec, cumspec, ebins)
+                        elif self.method == 2:
+                            tot_spec = cspec.d+Z*mspec.d
+                            tot_spec /= tot_spec.sum()
+                            eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                            cell_e = emid[eidxs]
+                        energies[ei:ei+cn] = cell_e
+                        cell_counter += 1
+                    pbar.update(cell_counter)
+                    ei += cn
+
                 start_e = end_e
 
-                pbar.update(iend)
-
-            pbar.finish()
-
             active_cells = number_of_photons > 0
             idxs = idxs[active_cells]
 
-            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
-            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
-
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
             photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
@@ -213,23 +229,17 @@
             photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
+        pbar.finish()
+
         for key in photons:
             if len(photons[key]) > 0:
                 photons[key] = uconcatenate(photons[key])
+            elif key == "NumberOfPhotons":
+                photons[key] = np.array([])
+            else:
+                photons[key] = YTArray([], photon_units[key])
+
+        mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
+        mylog.info("Number of cells with photons: %d" % len(photons["x"]))
 
         return photons
-
-def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
-    energies = np.array([])
-    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
-        if cn_c > 0:
-            randvec_c = np.random.uniform(size=cn_c)
-            randvec_c.sort()
-            cell_e_c = np.interp(randvec_c, counts_c, energy)
-            energies = np.append(energies, cell_e_c)
-        if cn_m > 0: 
-            randvec_m = np.random.uniform(size=cn_m)
-            randvec_m.sort()
-            cell_e_m = np.interp(randvec_m, counts_m, energy)
-            energies = np.append(energies, cell_e_m)
-    return energies


https://bitbucket.org/yt_analysis/yt/commits/36e31b962fd8/
Changeset:   36e31b962fd8
Branch:      yt
User:        jzuhone
Date:        2015-06-18 13:39:56+00:00
Summary:     Update documentation to reflect new options for photon simulator.
Affected #:  1 file

diff -r 5a78763561483541a32fbe10a5e811f03922c00c -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -139,6 +139,12 @@
 the optional keyword ``thermal_broad`` is set to ``True``, the spectral
 lines will be thermally broadened.
 
+.. note:: 
+
+   ``SpectralModel`` objects based on XSPEC models (both the thermal 
+   emission and Galactic absorption models mentioned below) only work 
+   in Python 2.7, since currently PyXspec only works with Python 2.x. 
+   
 Now that we have our ``SpectralModel`` that gives us a spectrum, we need
 to connect this model to a ``PhotonModel`` that will connect the field
 data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +154,8 @@
 .. code:: python
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
-                                       photons_per_chunk=100000000)
+                                       photons_per_chunk=100000000,
+                                       method=1)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +172,18 @@
 this parameter needs to be set higher, or if you are looking to decrease memory
 usage, you might set this parameter lower.
 
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method=1``: Construct the cumulative distribution function of the spectrum and invert
+  it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+  on construction of the CDF and interpolation between the points, so for some spectra it
+  may not be accurate enough). 
+* ``method=2``: Generate the photon energies from the spectrum using an acceptance-rejection
+  technique (accurate, but likely to be slow). 
+
+``method=1`` (the default) should be sufficient for most cases. 
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +210,27 @@
 By default, the angular diameter distance to the object is determined
 from the ``cosmology`` and the cosmological ``redshift``. If a
 ``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be 
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+  units of code length. 
+* A ``YTArray`` corresponding to the coordinates of the center in some
+  length units. 
+* ``"center"`` or ``"c"`` corresponds to the domain center. 
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the 
+``"center"`` field parameter of the ``data_source``. 
+
+``from_scratch`` takes a few other optional keyword arguments. If your 
+source is local to the galaxy, you can set its distance directly, using 
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and 
+``cosmology`` will be ignored. Finally, if the photon generating 
+function accepts any parameters, they can be passed to ``from_scratch`` 
+via a ``parameters`` dictionary.
 
 At this point, the ``photons`` are distributed in the three-dimensional
 space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +299,7 @@
     abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
 Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
 specify a ``sky_center`` in RA,DEC on the sky in degrees.
 
@@ -274,26 +308,40 @@
 course far short of a full simulation of a telescope ray-trace, but it's
 a quick-and-dirty way to get something close to the real thing. We'll
 discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve 
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``. 
 
 .. code:: python
 
     ARF = "chandra_ACIS-S3_onaxis_arf.fits"
     RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
-    L = [0.0,0.0,1.0]
-    events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
-                                     sky_center=(187.5,12.333), responses=[ARF,RMF])
+    normal = [0.0,0.0,1.0]
+    events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
+                                     absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
+                                     convolve_energies=True, no_shifting=False, north_vector=None,
+                                     psf_sigma=None)
 
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary 
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to 
+project along one of those axes. 
 
-.. warning::
+``project_photons`` takes several other optional keyword arguments. 
 
-   The binned images that result, even if you convolve with responses,
-   are still of the same resolution as the finest cell size of the
-   simulation dataset. If you want a more accurate simulation of a
-   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler 
+  shifting of photon energies is turned on. 
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+  angular diameter distance by hand instead of having it determined
+  by the cosmology and the value of the redshift. Should only be used
+  for simulations of nearby objects. 
+* For off-axis ``normal`` vectors,  the ``north_vector`` argument can 
+  be used to control what vector corresponds to the "up" direction in 
+  the resulting event list. 
+* ``psf_sigma`` may be specified to provide a crude representation of 
+  a PSF, and corresponds to the standard deviation (in degress) of a 
+  Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:
 
@@ -343,19 +391,27 @@
 
 Which is starting to look like a real observation!
 
+.. warning::
+
+   The binned images that result, even if you convolve with responses,
+   are still of the same resolution as the finest cell size of the
+   simulation dataset. If you want a more accurate simulation of a
+   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
 We can also bin up the spectrum into energy bins, and write it to a FITS
 table file. This is an example where we've binned up the spectrum
 according to the unconvolved photon energy:
 
 .. code:: python
 
-    events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+    events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
 
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
 with response files, then any other keywords will be ignored and it will
 try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``, 
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. 
+For now, we'll stick with the energy spectrum, and plot it up:
 
 .. code:: python
 


https://bitbucket.org/yt_analysis/yt/commits/f45c19ad2d3d/
Changeset:   f45c19ad2d3d
Branch:      yt
User:        jzuhone
Date:        2015-06-18 13:40:44+00:00
Summary:     Merging
Affected #:  39 files

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -62,3 +62,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
 For more information on installation, what to do if you run into problems, or 
 ways to help development, please visit our website.
 
-Enjoy!
+Enjoy!
\ No newline at end of file

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 distribute_setup.py
--- a/distribute_setup.py
+++ /dev/null
@@ -1,541 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.32"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>=" + version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s to %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unknown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Moving elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    try:
-        f = open(pkg_info, 'w')
-    except EnvironmentError:
-        log.warn("Don't have permissions to write %s, skipping", pkg_info)
-        return
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
-    _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install') + 1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index + 1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools', replacement=False)
-            )
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools')
-        )
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patching complete.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
-    _cmd2 = ['-c', 'install', '--record']
-    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the distribute package
-    """
-    install_args = []
-    if options.user_install:
-        if sys.version_info < (2, 6):
-            log.warn("--user requires Python 2.6 or later")
-            raise SystemExit(1)
-        install_args.append('--user')
-    return install_args
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the distribute package')
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main(version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    tarball = download_setuptools(download_base=options.download_base)
-    return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,18 +1,14 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
+# First things first, if you experience problems, please visit the Help 
+# section at http://yt-project.org.
+#
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them.  And
-# that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
-# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up.
-#
-# If you experience problems, please visit the Help section at 
-# http://yt-project.org.
-#
+# There are a few options, but you only need to set *one* of them, which is 
+# the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
@@ -23,16 +19,25 @@
     DEST_DIR=${YT_DEST}
 fi
 
+# What follows are some other options that you may or may not need to change.
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
+# If you've got yt some other place, set this to point to it. The script will
+# already check the current directory and the one above it in the tree.
+YT_DIR=""
+
 # If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
 #NUMPY_ARGS="--fcompiler=fake"
 
+INST_PY3=0      # Install Python 3 along with Python 2. If this is turned
+                # on, all Python packages (including yt) will be installed
+                # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
@@ -50,9 +55,6 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got yt some other place, set this to point to it.
-YT_DIR=""
-
 # If you need to pass anything to matplotlib, do so here.
 MPL_SUPP_LDFLAGS=""
 MPL_SUPP_CFLAGS=""
@@ -111,6 +113,7 @@
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
     echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
     echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
     echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
@@ -415,6 +418,10 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
+printf "%-15s = %s so I " "INST_PY3" "${INST_PY3}"
+get_willwont ${INST_PY3}
+echo "be installing Python 3"
+
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -487,6 +494,13 @@
     exit 1
 }
 
+if [ $INST_PY3 -eq 1 ]
+then
+	 PYTHON_EXEC='python3.4'
+else 
+	 PYTHON_EXEC='python2.7'
+fi
+
 function do_setup_py
 {
     [ -e $1/done ] && return
@@ -501,21 +515,27 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
+    if [[ $LIB =~ .*mercurial.* ]] 
+    then
+        PYEXE="python2.7"
+    else
+        PYEXE=${PYTHON_EXEC}
+    fi
     case $LIB in
         *h5py*)
             pushd $LIB &> /dev/null
-            ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
             popd &> /dev/null
             ;;
         *numpy*)
-            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            if [ -e ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/numpy/__init__.py ]
             then
-                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                VER=$(${DEST_DIR}/bin/${PYTHON_EXEC} -c 'from distutils.version import StrictVersion as SV; \
                                                  import numpy; print SV(numpy.__version__) < SV("1.8.0")')
                 if [ $VER == "True" ]
                 then
                     echo "Removing previous NumPy instance (see issue #889)"
-                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                    rm -rf ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/{numpy*,*.pth}
                 fi
             fi
             ;;
@@ -523,8 +543,8 @@
             ;;
     esac
     cd $LIB
-    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 }
@@ -592,14 +612,15 @@
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
 export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages
 
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+PYTHON2='Python-2.7.9'
+PYTHON3='Python-3.4.3'
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.9'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12' 
 H5PY='h5py-2.5.0'
@@ -620,11 +641,13 @@
 TORNADO='tornado-4.0.2'
 ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
+SETUPTOOLS='setuptools-16.0'
 
 # Now we dump all our SHA512 files out.
 echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160  Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
+echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -646,6 +669,7 @@
 echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
 echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
+echo '38a89aad89dc9aa682dbfbca623e2f69511f5e20d4a3526c01aabbc7e93ae78f20aac566676b431e111540b41540a1c4f644ce4174e7ecf052318612075e02dc  setuptools-16.0.tar.gz' > setuptools-16.0.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
@@ -660,10 +684,11 @@
 [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-get_ytproject $PYTHON.tgz
+[ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
+[ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+get_ytproject $PYTHON2.tgz
 get_ytproject $NUMPY.tar.gz
 get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $MERCURIAL.tar.gz
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
@@ -671,6 +696,7 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
+get_ytproject $SETUPTOOLS.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -787,11 +813,11 @@
     fi
 fi
 
-if [ ! -e $PYTHON/done ]
+if [ ! -e $PYTHON2/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
-    cd $PYTHON
+    echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+    [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+    cd $PYTHON2
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -802,7 +828,30 @@
     cd ..
 fi
 
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages/
+if [ $INST_PY3 -eq 1 ]
+then
+    if [ ! -e $PYTHON3/done ]
+    then
+        echo "Installing Python 3. Because two Pythons are better than one."
+        [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
+        cd $PYTHON3
+        ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+        ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/python 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3-config ${DEST_DIR}/bin/python-config 2>&1 ) 1>> ${LOG_FILE}
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+fi
+
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
+
+# Install setuptools
+do_setup_py $SETUPTOOLS
 
 if [ $INST_HG -eq 1 ]
 then
@@ -847,12 +896,10 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
-
-echo "Installing distribute"
-( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
+ 
 echo "Installing pip"
-( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 if [ $INST_SCIPY -eq 0 ]
 then
@@ -986,13 +1033,14 @@
 
 echo "Installing yt"
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
-if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
+	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
-    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +572,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -213,10 +213,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/visualizing/FITSImageBuffer.ipynb
--- a/doc/source/visualizing/FITSImageBuffer.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageBuffer` class, which has subclasses `FITSSlice` and `FITSProjection` to write slices and projections directly to FITS. We'll test this out on an Athena dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "from yt.utilities.fits_image import FITSImageBuffer, FITSSlice, FITSProjection"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
-      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
-      "                                                               \"time_unit\":(1.0,\"Myr\")})"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
-      "prj.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset. `prj_fits` is a full-fledged FITS file in memory, specifically an [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) object. This means that we can use all of the methods inherited from `HDUList`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits.info()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`info` shows us the contents of the virtual FITS file. We can also look at the header for the `\"temperature\"` image, like so:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits[\"temperature\"].header"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. The projection can be written to disk using the `writeto` method:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds2 = yt.load(\"sloshing.fits\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
-      "slc2.set_log(\"temperature\", True)\n",
-      "slc2.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageBuffer` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc3 = ds.slice(0, 0.0)\n",
-      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
-      "fib = FITSImageBuffer(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, a 3D FITS cube can be created from a covering grid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
-      "fib = FITSImageBuffer(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/visualizing/FITSImageData.ipynb
--- /dev/null
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -0,0 +1,409 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:c7de5ef190feaa2289595aec7eaa05db02fd535e408e0d04aa54088b0bd3ebae"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageData` class. We'll test these capabilities out on an Athena dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
+      "                                                               \"time_unit\":(1.0,\"Myr\")})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Creating FITS images from Slices and Projections"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "There are several ways to make a `FITSImageData` instance. The most intuitive ways are to use the `FITSSlice`, `FITSProjection`, `FITSOffAxisSlice`, and `FITSOffAxisProjection` classes to write slices and projections directly to FITS. To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Because `FITSImageData` inherits from the [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) class, we can call its methods. For example, `info` shows us the contents of the virtual FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also look at the header for a particular field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. If we want the raw image data with units, we can call `get_data`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.get_data(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `set_unit` method to change the units of a particular field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.set_unit(\"temperature\",\"R\")\n",
+      "prj_fits.get_data(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The image can be written to disk using the `writeto` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"sloshing.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+      "slc2.set_log(\"temperature\", True)\n",
+      "slc2.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Using `FITSImageData` directly"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageData` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc3 = ds.slice(0, 0.0)\n",
+      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
+      "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "A 3D FITS cube can also be created from a covering grid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
+      "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Other `FITSImageData` Methods"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "A `FITSImageData` instance can be generated from one previously written to disk using the `from_file` classmethod:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+      "fid.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Multiple `FITSImageData` can be combined to create a new one, provided that the coordinate information is the same:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
+      "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+      "prj_fits3.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Alternatively, individual fields can be popped as well:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dens_fits = prj_fits3.pop(\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dens_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits3.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "So far, the FITS images we have shown have linear spatial coordinates. One may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sky_center = [30.,45.] # in degrees\n",
+      "sky_scale = (2.5, \"arcsec/kpc\") # could also use a YTQuantity\n",
+      "prj_fits.create_sky_wcs(sky_center, sky_scale, ctype=[\"RA---TAN\",\"DEC--TAN\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "By the default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fid_frb.update_header(\"all\", \"time\", 0.1) # Update all the fields\n",
+      "fid_frb.update_header(\"temperature\", \"scale\", \"Rankine\") # Update just one field"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print fid_frb[\"density\"].header[\"time\"]\n",
+      "print fid_frb[\"temperature\"].header[\"scale\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -56,7 +56,7 @@
 
    import yt
    ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
 This object, ``surface``, can be queried for values on the surface.  For
@@ -172,7 +172,7 @@
    trans = [1.0, 0.5]
    filename = './surfaces'
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i], color_field='temperature', plot_index = i)
@@ -248,7 +248,7 @@
        return (data['density']*data['density']*np.sqrt(data['temperature']))
    add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i],

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 doc/source/visualizing/writing_fits_images.rst
--- a/doc/source/visualizing/writing_fits_images.rst
+++ b/doc/source/visualizing/writing_fits_images.rst
@@ -3,4 +3,4 @@
 Writing FITS Images
 ==========================
 
-.. notebook:: FITSImageBuffer.ipynb
\ No newline at end of file
+.. notebook:: FITSImageData.ipynb
\ No newline at end of file

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 optional-requirements.txt
--- /dev/null
+++ b/optional-requirements.txt
@@ -0,0 +1,1 @@
+ipython[notebook]
\ No newline at end of file

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 requirements.txt
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+numpy==1.9.2 
+matplotlib==1.4.3 
+Cython==0.22 
+h5py==2.5.0 
+nose==1.3.6 
+sympy==0.7.6 

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,6 @@
     sys.exit(1)
 
 import setuptools
-from distutils.version import StrictVersion
-if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
-    import distribute_setup
-    distribute_setup.use_setuptools()
-
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils.command import install_data as np_install_data

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -16,12 +16,9 @@
 import numpy as np
 from yt.utilities.physical_constants import \
     charge_proton_cgs, \
-    cm_per_km, \
-    km_per_cm, \
     mass_electron_cgs, \
     speed_of_light_cgs
 
-
 def voigt(a,u):
     """
     NAME:
@@ -140,67 +137,75 @@
     k1 = k1.astype(np.float64).clip(0)
     return k1
 
-def tau_profile(lam0, fval, gamma, vkms, column_density, 
-                deltav=None, delta_lambda=None,
+def tau_profile(lamba_0, f_value, gamma, v_doppler, column_density, 
+                delta_v=None, delta_lambda=None,
                 lambda_bins=None, n_lambda=12000, dlambda=0.01):
-    """
+    r"""
     Create an optical depth vs. wavelength profile for an 
     absorption line using a voigt profile.
-    :param lam0 (float): central wavelength (angstroms).
-    :param fval (float): f-value.
-    :param gamma (float): gamma value.
-    :param vkms (float): doppler b-parameter.
-    :param column_density (float): column density (cm^-2).
-    :param deltav (float): velocity offset from lam0 (km/s).
-    Default: None (no shift).
-    :param delta_lambda (float): wavelength offset in angstroms.
-    Default: None (no shift).
-    :param lambda_bins (array): array of wavelengths in angstroms.
-    Default: None
-    :param n_lambda (float): size of lambda bins to create 
-    array if lambda_bins is None.  Default: 12000
-    :param dlambda (float): lambda bin width if lambda_bins is 
-    None. Default: 0.01
+
+    Parameters
+    ----------
+    
+    lamba_0 : float YTQuantity in length units
+       central wavelength.
+    f_value : float
+       absorption line f-value.
+    gamma : float
+       absorption line gamma value.
+    v_doppler : float YTQuantity in velocity units
+       doppler b-parameter.
+    column_density : float YTQuantity in (length units)^-2
+       column density.
+    delta_v : float YTQuantity in velocity units
+       velocity offset from lamba_0.
+       Default: None (no shift).
+    delta_lambda : float YTQuantity in length units
+        wavelength offset.
+        Default: None (no shift).
+    lambda_bins : YTArray in length units
+        wavelength array for line deposition.  If None, one will be 
+        created using n_lambda and dlambda.
+        Default: None.
+    n_lambda : int
+        size of lambda bins to create if lambda_bins is None.
+        Default: 12000.
+    dlambda : float 
+        lambda bin width in angstroms if lambda_bins is None.
+        Default: 0.01.
+        
     """
 
-    ## constants
-    me = mass_electron_cgs              # grams mass electron 
-    e = charge_proton_cgs               # esu 
-    c = speed_of_light_cgs * km_per_cm  # km/s
-    ccgs = speed_of_light_cgs           # cm/s 
-
-    ## shift lam0 by deltav
-    if deltav is not None:
-        lam1 = lam0 * (1 + deltav / c)
+    ## shift lamba_0 by delta_v
+    if delta_v is not None:
+        lam1 = lamba_0 * (1 + delta_v / speed_of_light_cgs)
     elif delta_lambda is not None:
-        lam1 = lam0 + delta_lambda
+        lam1 = lamba_0 + delta_lambda
     else:
-        lam1 = lam0
+        lam1 = lamba_0
 
     ## conversions
-    vdop = vkms * cm_per_km           # in cm/s
-    lam0cgs = lam0 / 1.e8             # rest wavelength in cm
-    lam1cgs = lam1 / 1.e8             # line wavelength in cm
-    nu1 = ccgs / lam1cgs              # line freq in Hz
-    nudop = vdop / ccgs * nu1         # doppler width in Hz
-    lamdop = vdop / ccgs * lam1       # doppler width in Ang
+    nu1 = speed_of_light_cgs / lam1           # line freq in Hz
+    nudop = v_doppler / speed_of_light_cgs * nu1   # doppler width in Hz
+    lamdop = v_doppler / speed_of_light_cgs * lam1 # doppler width in Ang
 
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
             np.arange(n_lambda, dtype=np.float) * dlambda - \
-            n_lambda * dlambda / 2    # wavelength vector (angstroms)
-    nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
+            n_lambda * dlambda / 2  # wavelength vector (angstroms)
+    nua = (speed_of_light_cgs / lambda_bins)  # frequency vector (Hz)
 
     ## tau_0
-    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
-        column_density * fval / vdop
-    tau0 = tau_X * lam0cgs
+    tau_X = np.sqrt(np.pi) * charge_proton_cgs**2 / \
+      (mass_electron_cgs * speed_of_light_cgs) * \
+      column_density * f_value / v_doppler
+    tau0 = tau_X * lamba_0
 
     # dimensionless frequency offset in units of doppler freq
-    x = (nua - nu1) / nudop
-    a = gamma / (4 * np.pi * nudop)   # damping parameter 
-    phi = voigt(a, x)                 # profile
-    tauphi = tau0 * phi               # profile scaled with tau0
+    x = ((nua - nu1) / nudop).in_units("")
+    a = (gamma / (4 * np.pi * nudop)).in_units("s")  # damping parameter 
+    phi = voigt(a, x)                                # line profile
+    tauphi = (tau0 * phi).in_units("")               # profile scaled with tau0
 
     return (lambda_bins, tauphi)

diff -r 36e31b962fd85a0a33853bc706d36ce7c40b13d4 -r f45c19ad2d3d334712e860a63b0854ef5dc3bb54 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -4,7 +4,7 @@
 
 
 """
-from __future__ import print_function
+
 from __future__ import absolute_import
 
 #-----------------------------------------------------------------------------
@@ -21,13 +21,12 @@
 from .absorption_line import tau_profile
 
 from yt.funcs import get_pbar, mylog
-from yt.units.yt_array import YTArray
+from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
-    amu_cgs, boltzmann_constant_cgs, \
-    speed_of_light_cgs, km_per_cm
+    boltzmann_constant_cgs, \
+    speed_of_light_cgs
 from yt.utilities.on_demand_imports import _astropy
 
-speed_of_light_kms = speed_of_light_cgs * km_per_cm
 pyfits = _astropy.pyfits
 
 class AbsorptionSpectrum(object):
@@ -49,8 +48,10 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
-        self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
+        self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
+                                   "angstrom")
+        self.bin_width = YTQuantity((lambda_max - lambda_min) / 
+                                    float(n_lambda - 1), "angstrom")
         self.line_list = []
         self.continuum_list = []
 
@@ -76,8 +77,10 @@
            mass of atom in amu.
         """
         self.line_list.append({'label': label, 'field_name': field_name,
-                               'wavelength': wavelength, 'f_value': f_value,
-                               'gamma': gamma, 'atomic_mass': atomic_mass,
+                               'wavelength': YTQuantity(wavelength, "angstrom"),
+                               'f_value': f_value,
+                               'gamma': gamma,
+                               'atomic_mass': YTQuantity(atomic_mass, "amu"),
                                'label_threshold': label_threshold})
 
     def add_continuum(self, label, field_name, wavelength,
@@ -209,16 +212,15 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['velocity_los'] / speed_of_light_cgs
-            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
-                                             field_data['temperature']) /
-                                            (amu_cgs * line['atomic_mass']))
-            thermal_b.convert_to_cgs()
+            thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
+                                  field_data['temperature']) /
+                                  line['atomic_mass'])
             center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
             width_ratio = ((line['wavelength'] + delta_lambda) * \
-                thermal_b / speed_of_light_kms / self.bin_width).value
+                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
 
             if (width_ratio < 1.0).any():
                 mylog.warn(("%d out of %d line components are unresolved, " +
@@ -240,11 +242,13 @@
                 my_bin_ratio = spectrum_bin_ratio
                 while True:
                     lambda_bins, line_tau = \
-                        tau_profile(line['wavelength'], line['f_value'],
-                                    line['gamma'], thermal_b[lixel],
-                                    column_density[lixel],
-                                    delta_lambda=delta_lambda[lixel],
-                                    lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
+                        tau_profile(
+                            line['wavelength'], line['f_value'],
+                            line['gamma'], thermal_b[lixel].in_units("km/s"),
+                            column_density[lixel],
+                            delta_lambda=delta_lambda[lixel],
+                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
+                        
                     # Widen wavelength window until optical depth reaches a max value at the ends.
                     if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                       (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
@@ -260,7 +264,7 @@
                 if line['label_threshold'] is not None and \
                         column_density[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = km_per_cm * field_data['velocity_los'][lixel]
+                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
@@ -271,7 +275,7 @@
                                                     'redshift': field_data['redshift'][lixel],
                                                     'v_pec': peculiar_velocity})
                     pbar.update(i)
-            pbar.finish()
+                pbar.finish()
 
             del column_density, delta_lambda, thermal_b, \
                 center_bins, width_ratio, left_index, right_index
@@ -280,7 +284,7 @@
         """
         Write out list of spectral lines.
         """
-        print("Writing spectral line list: %s." % filename)
+        mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')
         f.write('#%-14s %-14s %-12s %-12s %-12s %-12s\n' %
@@ -295,7 +299,7 @@
         """
         Write spectrum to an ascii file.
         """
-        print("Writing spectrum to ascii file: %s." % filename)
+        mylog.info("Writing spectrum to ascii file: %s." % filename)
         f = open(filename, 'w')
         f.write("# wavelength[A] tau flux\n")
         for i in range(self.lambda_bins.size):
@@ -307,7 +311,7 @@
         """
         Write spectrum to a fits file.
         """
-        print("Writing spectrum to fits file: %s." % filename)
+        mylog.info("Writing spectrum to fits file: %s." % filename)
         col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_bins)
         col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
         cols = pyfits.ColDefs([col1, col2])
@@ -319,7 +323,7 @@
         Write spectrum to an hdf5 file.
 
         """
-        print("Writing spectrum to hdf5 file: %s." % filename)
+        mylog.info("Writing spectrum to hdf5 file: %s." % filename)
         output = h5py.File(filename, 'w')
         output.create_dataset('wavelength', data=self.lambda_bins)
         output.create_dataset('tau', data=self.tau_field)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e3d0784b7991/
Changeset:   e3d0784b7991
Branch:      yt
User:        jzuhone
Date:        2015-06-18 15:12:48+00:00
Summary:     Merge
Affected #:  4 files

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r e3d0784b79910a6d6af653572f4ab8bb644aacf0 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -139,6 +139,12 @@
 the optional keyword ``thermal_broad`` is set to ``True``, the spectral
 lines will be thermally broadened.
 
+.. note:: 
+
+   ``SpectralModel`` objects based on XSPEC models (both the thermal 
+   emission and Galactic absorption models mentioned below) only work 
+   in Python 2.7, since currently PyXspec only works with Python 2.x. 
+   
 Now that we have our ``SpectralModel`` that gives us a spectrum, we need
 to connect this model to a ``PhotonModel`` that will connect the field
 data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +154,8 @@
 .. code:: python
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
-                                       photons_per_chunk=100000000)
+                                       photons_per_chunk=100000000,
+                                       method=1)
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +172,18 @@
 this parameter needs to be set higher, or if you are looking to decrease memory
 usage, you might set this parameter lower.
 
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method=1``: Construct the cumulative distribution function of the spectrum and invert
+  it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+  on construction of the CDF and interpolation between the points, so for some spectra it
+  may not be accurate enough). 
+* ``method=2``: Generate the photon energies from the spectrum using an acceptance-rejection
+  technique (accurate, but likely to be slow). 
+
+``method=1`` (the default) should be sufficient for most cases. 
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +210,27 @@
 By default, the angular diameter distance to the object is determined
 from the ``cosmology`` and the cosmological ``redshift``. If a
 ``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be 
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+  units of code length. 
+* A ``YTArray`` corresponding to the coordinates of the center in some
+  length units. 
+* ``"center"`` or ``"c"`` corresponds to the domain center. 
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the 
+``"center"`` field parameter of the ``data_source``. 
+
+``from_scratch`` takes a few other optional keyword arguments. If your 
+source is local to the galaxy, you can set its distance directly, using 
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and 
+``cosmology`` will be ignored. Finally, if the photon generating 
+function accepts any parameters, they can be passed to ``from_scratch`` 
+via a ``parameters`` dictionary.
 
 At this point, the ``photons`` are distributed in the three-dimensional
 space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +299,7 @@
     abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
 Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
 specify a ``sky_center`` in RA,DEC on the sky in degrees.
 
@@ -274,26 +308,40 @@
 course far short of a full simulation of a telescope ray-trace, but it's
 a quick-and-dirty way to get something close to the real thing. We'll
 discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve 
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``. 
 
 .. code:: python
 
     ARF = "chandra_ACIS-S3_onaxis_arf.fits"
     RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
-    L = [0.0,0.0,1.0]
-    events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
-                                     sky_center=(187.5,12.333), responses=[ARF,RMF])
+    normal = [0.0,0.0,1.0]
+    events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
+                                     absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
+                                     convolve_energies=True, no_shifting=False, north_vector=None,
+                                     psf_sigma=None)
 
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary 
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to 
+project along one of those axes. 
 
-.. warning::
+``project_photons`` takes several other optional keyword arguments. 
 
-   The binned images that result, even if you convolve with responses,
-   are still of the same resolution as the finest cell size of the
-   simulation dataset. If you want a more accurate simulation of a
-   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler 
+  shifting of photon energies is turned on. 
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+  angular diameter distance by hand instead of having it determined
+  by the cosmology and the value of the redshift. Should only be used
+  for simulations of nearby objects. 
+* For off-axis ``normal`` vectors,  the ``north_vector`` argument can 
+  be used to control what vector corresponds to the "up" direction in 
+  the resulting event list. 
+* ``psf_sigma`` may be specified to provide a crude representation of 
+  a PSF, and corresponds to the standard deviation (in degress) of a 
+  Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:
 
@@ -343,19 +391,27 @@
 
 Which is starting to look like a real observation!
 
+.. warning::
+
+   The binned images that result, even if you convolve with responses,
+   are still of the same resolution as the finest cell size of the
+   simulation dataset. If you want a more accurate simulation of a
+   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
 We can also bin up the spectrum into energy bins, and write it to a FITS
 table file. This is an example where we've binned up the spectrum
 according to the unconvolved photon energy:
 
 .. code:: python
 
-    events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+    events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
 
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
 with response files, then any other keywords will be ignored and it will
 try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``, 
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. 
+For now, we'll stick with the energy spectrum, and plot it up:
 
 .. code:: python
 

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r e3d0784b79910a6d6af653572f4ab8bb644aacf0 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -25,7 +25,7 @@
 from yt.extern.six import string_types
 import numpy as np
 from yt.funcs import *
-from yt.utilities.physical_constants import mp, kboltz
+from yt.utilities.physical_constants import mp
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_objects
 from yt.units.yt_array import uconcatenate
@@ -34,6 +34,12 @@
 kT_min = 8.08e-2
 kT_max = 50.
 
+photon_units = {"Energy":"keV",
+                "dx":"kpc"}
+for ax in "xyz":
+    photon_units[ax] = "kpc"
+    photon_units["v"+ax] = "km/s"
+
 class PhotonModel(object):
 
     def __init__(self):
@@ -46,7 +52,7 @@
 class ThermalPhotonModel(PhotonModel):
     r"""
     Initialize a ThermalPhotonModel from a thermal spectrum. 
-    
+
     Parameters
     ----------
 
@@ -61,30 +67,37 @@
     photons_per_chunk : integer
         The maximum number of photons that are allocated per chunk. Increase or decrease
         as needed.
+    method : integer, optional
+        The method used to generate the photon energies from the spectrum:
+        1: Invert the cumulative distribution function of the spectrum.
+        2: Acceptance-rejection method using the spectrum. 
+        Method 1 should be sufficient for most cases. 
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
+                 photons_per_chunk=10000000, method=1):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
+        self.method = method
 
     def __call__(self, data_source, parameters):
-        
+
         ds = data_source.ds
 
         exp_time = parameters["FiducialExposureTime"]
         area = parameters["FiducialArea"]
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
-        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
+        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2)
         src_ctr = parameters["center"]
 
-        vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
-
         my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        self.spectral_model.prepare()
-        energy = self.spectral_model.ebins
+        self.spectral_model.prepare_spectrum(redshift)
+        emid = self.spectral_model.emid
+        ebins = self.spectral_model.ebins
+        nchan = len(emid)
 
         citer = data_source.chunks([], "io")
 
@@ -99,7 +112,13 @@
         photons["Energy"] = []
         photons["NumberOfPhotons"] = []
 
-        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+        spectral_norm = area.v*exp_time.v*dist_fac
+
+        tot_num_cells = data_source.ires.shape[0]
+
+        pbar = get_pbar("Generating photons ", tot_num_cells)
+
+        cell_counter = 0
 
         for chunk in parallel_objects(citer):
 
@@ -114,7 +133,7 @@
             if isinstance(self.Zmet, string_types):
                 metalZ = chunk[self.Zmet].v
             else:
-                metalZ = self.Zmet
+                metalZ = self.Zmet*np.ones(num_cells)
 
             idxs = np.argsort(kT)
 
@@ -133,7 +152,7 @@
                 n += bcount
             kT_idxs = np.unique(kT_idxs)
 
-            cell_em = EM[idxs]*vol_scale
+            cell_em = EM[idxs]*spectral_norm
 
             number_of_photons = np.zeros(num_cells, dtype="uint64")
             energies = np.zeros(self.photons_per_chunk)
@@ -141,8 +160,6 @@
             start_e = 0
             end_e = 0
 
-            pbar = get_pbar("Generating photons for chunk ", num_cells)
-
             for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
                 kT = kT_bins[ikT] + 0.5*dkT
@@ -151,58 +168,57 @@
 
                 cem = cell_em[ibegin:iend]
 
-                em_sum_c = cem.sum()
-                if isinstance(self.Zmet, string_types):
-                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
-                else:
-                    em_sum_m = metalZ*em_sum_c
-
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
 
-                cumspec_c = np.cumsum(cspec.d)
-                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
-                cumspec_c /= cumspec_c[-1]
-                cumspec_c = np.insert(cumspec_c, 0, 0.0)
-
-                cumspec_m = np.cumsum(mspec.d)
-                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
-                cumspec_m /= cumspec_m[-1]
-                cumspec_m = np.insert(cumspec_m, 0, 0.0)
+                tot_ph_c = cspec.d.sum()
+                tot_ph_m = mspec.d.sum()
 
                 u = np.random.random(size=n_current)
 
-                cell_norm_c = tot_ph_c*cem/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
-            
-                if isinstance(self.Zmet, string_types):
-                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
-                else:
-                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
-            
-                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+                cell_norm_c = tot_ph_c*cem
+                cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
+                cell_norm = np.modf(cell_norm_c + cell_norm_m)
+                cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)
 
-                end_e += int((cell_n_c+cell_n_m).sum())
+                number_of_photons[ibegin:iend] = cell_n
+
+                end_e += int(cell_n.sum())
 
                 if end_e > self.photons_per_chunk:
                     raise RuntimeError("Number of photons generated for this chunk "+
                                        "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
                                        "Increase photons_per_chunk!")
 
-                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
-            
+                if self.method == 1:
+                    cumspec_c = np.cumsum(cspec.d)
+                    cumspec_m = np.cumsum(mspec.d)
+                    cumspec_c = np.insert(cumspec_c, 0, 0.0)
+                    cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+                ei = start_e
+                for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
+                    if cn > 0:
+                        if self.method == 1:
+                            cumspec = cumspec_c + Z*cumspec_m
+                            cumspec /= cumspec[-1]
+                            randvec = np.random.uniform(size=cn)
+                            randvec.sort()
+                            cell_e = np.interp(randvec, cumspec, ebins)
+                        elif self.method == 2:
+                            tot_spec = cspec.d+Z*mspec.d
+                            tot_spec /= tot_spec.sum()
+                            eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                            cell_e = emid[eidxs]
+                        energies[ei:ei+cn] = cell_e
+                        cell_counter += 1
+                    pbar.update(cell_counter)
+                    ei += cn
+
                 start_e = end_e
 
-                pbar.update(iend)
-
-            pbar.finish()
-
             active_cells = number_of_photons > 0
             idxs = idxs[active_cells]
 
-            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
-            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
-
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
             photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
@@ -213,23 +229,17 @@
             photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
+        pbar.finish()
+
         for key in photons:
             if len(photons[key]) > 0:
                 photons[key] = uconcatenate(photons[key])
+            elif key == "NumberOfPhotons":
+                photons[key] = np.array([])
+            else:
+                photons[key] = YTArray([], photon_units[key])
+
+        mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
+        mylog.info("Number of cells with photons: %d" % len(photons["x"]))
 
         return photons
-
-def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
-    energies = np.array([])
-    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
-        if cn_c > 0:
-            randvec_c = np.random.uniform(size=cn_c)
-            randvec_c.sort()
-            cell_e_c = np.interp(randvec_c, counts_c, energy)
-            energies = np.append(energies, cell_e_c)
-        if cn_m > 0: 
-            randvec_m = np.random.uniform(size=cn_m)
-            randvec_m.sort()
-            cell_e_m = np.interp(randvec_m, counts_m, energy)
-            energies = np.append(energies, cell_e_m)
-    return energies

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r e3d0784b79910a6d6af653572f4ab8bb644aacf0 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -25,6 +25,7 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
@@ -34,6 +35,10 @@
 
 comm = communication_system.communicators[-1]
 
+axes_lookup = {"x":("y","z"),
+               "y":("z","x"),
+               "z":("x","y")}
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -50,10 +55,10 @@
         self.cosmo = cosmo
         self.p_bins = p_bins
         self.num_cells = len(photons["x"])
-        
+
     def keys(self):
         return self.photons.keys()
-    
+
     def items(self):
         ret = []
         for k, v in self.photons.items():
@@ -62,7 +67,7 @@
             else:
                 ret.append((k,v))
         return ret
-    
+
     def values(self):
         ret = []
         for k, v in self.photons.items():
@@ -71,7 +76,7 @@
             else:
                 ret.append(v)
         return ret
-                                
+
     def __getitem__(self, key):
         if key == "Energy":
             return [self.photons["Energy"][self.p_bins[i]:self.p_bins[i+1]]
@@ -127,9 +132,9 @@
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-        
+
         photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
-        
+
         f.close()
 
         cosmo = Cosmology(hubble_constant=parameters["HubbleConstant"],
@@ -137,7 +142,7 @@
                           omega_lambda=parameters["OmegaLambda"])
 
         return cls(photons, parameters, cosmo, p_bins)
-    
+
     @classmethod
     def from_scratch(cls, data_source, redshift, area,
                      exp_time, photon_model, parameters=None,
@@ -277,9 +282,9 @@
             D_A = parse_value(dist, "Mpc")
             redshift = 0.0
 
-        if center == "c":
+        if center == "center" or center == "c":
             parameters["center"] = ds.domain_center
-        elif center == "max":
+        elif center == "max" or center == "m":
             parameters["center"] = ds.find_max("density")[-1]
         elif iterable(center):
             if isinstance(center, YTArray):
@@ -288,7 +293,7 @@
                 parameters["center"] = ds.arr(center, "code_length")
         elif center is None:
             parameters["center"] = data_source.get_field_parameter("center")
-            
+
         parameters["FiducialExposureTime"] = parse_value(exp_time, "s")
         parameters["FiducialArea"] = parse_value(area, "cm**2")
         parameters["FiducialRedshift"] = redshift
@@ -308,32 +313,32 @@
             dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
-                
+
         photons = photon_model(data_source, parameters)
 
         mylog.info("Finished generating photons.")
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-                        
+
         return cls(photons, parameters, cosmo, p_bins)
-        
+
     def write_h5_file(self, photonfile):
         """
         Write the photons to the HDF5 file *photonfile*.
         """
 
         if parallel_capable:
-            
+
             mpi_long = get_mpi_type("int64")
             mpi_double = get_mpi_type("float64")
-        
+
             local_num_cells = len(self.photons["x"])
             sizes_c = comm.comm.gather(local_num_cells, root=0)
-            
-            local_num_photons = self.photons["NumberOfPhotons"].sum()
+
+            local_num_photons = np.sum(self.photons["NumberOfPhotons"])
             sizes_p = comm.comm.gather(local_num_photons, root=0)
-            
+
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
                 num_photons = sum(sizes_p)        
@@ -362,24 +367,24 @@
                 dx = np.empty([])
                 n_ph = np.empty([])
                 e = np.empty([])
-                                                
-            comm.comm.Gatherv([self.photons["x"].ndarray_view(), local_num_cells, mpi_double],
+
+            comm.comm.Gatherv([self.photons["x"].d, local_num_cells, mpi_double],
                               [x, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["y"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["y"].d, local_num_cells, mpi_double],
                               [y, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["z"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["z"].d, local_num_cells, mpi_double],
                               [z, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vx"].d, local_num_cells, mpi_double],
                               [vx, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vy"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vy"].d, local_num_cells, mpi_double],
                               [vy, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vz"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vz"].d, local_num_cells, mpi_double],
                               [vz, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["dx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["dx"].d, local_num_cells, mpi_double],
                               [dx, (sizes_c, disps_c), mpi_double], root=0)
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
-            comm.comm.Gatherv([self.photons["Energy"].ndarray_view(), local_num_photons, mpi_double],
+            comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
                               [e, (sizes_p, disps_p), mpi_double], root=0) 
 
         else:
@@ -393,13 +398,13 @@
             dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
             e = self.photons["Energy"].d
-                                                
+
         if comm.rank == 0:
-            
+
             f = h5py.File(photonfile, "w")
 
             # Scalars
-       
+
             f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
             f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
             f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
@@ -409,7 +414,7 @@
             f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
             f.create_dataset("dimension", data=self.parameters["Dimension"])
             f.create_dataset("width", data=float(self.parameters["Width"]))
-                        
+
             # Arrays
 
             f.create_dataset("x", data=x)
@@ -426,19 +431,22 @@
 
         comm.barrier()
 
-    def project_photons(self, L, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False, no_shifting=False):
+                        convolve_energies=False, no_shifting=False,
+                        north_vector=None):
         r"""
         Projects photons onto an image plane given a line of sight.
 
         Parameters
         ----------
 
-        L : array_like
-            Normal vector to the plane of projection.
+        normal : character or array_like
+            Normal vector to the plane of projection. If "x", "y", or "z", will
+            assume to be along that axis (and will probably be faster). Otherwise, 
+            should be an off-axis normal vector, e.g [1.0,2.0,-3.0]
         area_new : float, optional
             New value for the effective area of the detector. If *responses*
             are specified the value of this keyword is ignored.
@@ -463,7 +471,11 @@
             If this is set, the photon energies will be convolved with the RMF.
         no_shifting : boolean, optional
             If set, the photon energies will not be Doppler shifted.
-            
+        north_vector : a sequence of floats
+            A vector defining the 'up' direction. This option sets the orientation of 
+            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
+            is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
+            "z") is explicitly specified.
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -475,7 +487,7 @@
         if redshift_new is not None and dist_new is not None:
             mylog.error("You may specify a new redshift or distance, "+
                         "but not both!")
-        
+
         if sky_center is None:
             sky_center = YTArray([30.,45.], "degree")
         else:
@@ -486,37 +498,36 @@
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
 
-        L = np.array(L)
-        L /= np.sqrt(np.dot(L, L))
-        vecs = np.identity(3)
-        t = np.cross(L, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(L, vecs[ax,:]).ravel()
-        orient = Orientation(L, north_vector=north)
-        
-        x_hat = orient.unit_vectors[0]
-        y_hat = orient.unit_vectors[1]
-        z_hat = orient.unit_vectors[2]
+        if not isinstance(normal, string_types):
+            L = np.array(normal)
+            orient = Orientation(L, north_vector=north_vector)
+            x_hat = orient.unit_vectors[0]
+            y_hat = orient.unit_vectors[1]
+            z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
         n_ph_tot = n_ph.sum()
-        
+
         eff_area = None
 
         parameters = {}
-        
+
         if responses is not None:
             responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
             if len(responses) == 2:
                 parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
-            
+
+        zobs0 = self.parameters["FiducialRedshift"]
+        D_A0 = self.parameters["FiducialAngularDiameterDistance"]
+        scale_factor = 1.0
+
         if (exp_time_new is None and area_new is None and
             redshift_new is None and dist_new is None):
             my_n_obs = n_ph_tot
-            zobs = self.parameters["FiducialRedshift"]
-            D_A = self.parameters["FiducialAngularDiameterDistance"]
+            zobs = zobs0
+            D_A = D_A0
         else:
             if exp_time_new is None:
                 Tratio = 1.
@@ -545,8 +556,8 @@
                 Aratio = parse_value(area_new, "cm**2")/self.parameters["FiducialArea"]
             if redshift_new is None and dist_new is None:
                 Dratio = 1.
-                zobs = self.parameters["FiducialRedshift"]
-                D_A = self.parameters["FiducialAngularDiameterDistance"]
+                zobs = zobs0
+                D_A = D_A0
             else:
                 if redshift_new is None:
                     zobs = 0.0
@@ -554,28 +565,17 @@
                 else:
                     zobs = redshift_new
                     D_A = self.cosmo.angular_diameter_distance(0.0,zobs).in_units("Mpc")
-                fid_D_A = self.parameters["FiducialAngularDiameterDistance"]
-                Dratio = fid_D_A*fid_D_A*(1.+self.parameters["FiducialRedshift"]**3) / \
+                    scale_factor = (1.+zobs0)/(1.+zobs)
+                Dratio = D_A0*D_A0*(1.+zobs0)**3 / \
                          (D_A*D_A*(1.+zobs)**3)
             fak = Aratio*Tratio*Dratio
             if fak > 1:
-                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % (fak))
+                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
             my_n_obs = np.uint64(n_ph_tot*fak)
 
         n_obs_all = comm.mpi_allreduce(my_n_obs)
         if comm.rank == 0:
             mylog.info("Total number of photons to use: %d" % (n_obs_all))
-        
-        x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-
-        if not no_shifting:
-            vz = self.photons["vx"]*z_hat[0] + \
-                 self.photons["vy"]*z_hat[1] + \
-                 self.photons["vz"]*z_hat[2]
-            shift = -vz.in_cgs()/clight
-            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -584,32 +584,57 @@
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
-        x *= delta
-        y *= delta
-        z *= delta
-        x += self.photons["x"][obs_cells]
-        y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]
+        if isinstance(normal, string_types):
+
+            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky *= delta
+            ysky *= delta
+            xsky += self.photons[axes_lookup[normal][0]][obs_cells]
+            ysky += self.photons[axes_lookup[normal][1]][obs_cells]
+
+            if not no_shifting:
+                vz = self.photons["v%s" % normal]
+
+        else:
+            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+
+            if not no_shifting:
+                vz = self.photons["vx"]*z_hat[0] + \
+                     self.photons["vy"]*z_hat[1] + \
+                     self.photons["vz"]*z_hat[2]
+
+            x *= delta
+            y *= delta
+            z *= delta
+            x += self.photons["x"][obs_cells]
+            y += self.photons["y"][obs_cells]
+            z += self.photons["z"][obs_cells]
+
+            xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
+            ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
+
         if no_shifting:
             eobs = self.photons["Energy"][idxs]
         else:
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
             eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        eobs *= scale_factor
 
-        xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
-        ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
-        eobs /= (1.+zobs)
-        
         if absorb_model is None:
             not_abs = np.ones(eobs.shape, dtype='bool')
         else:
             mylog.info("Absorbing.")
-            absorb_model.prepare()
+            absorb_model.prepare_spectrum()
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
             randvec = aspec.max()*np.random.random(eobs.shape)
             not_abs = randvec < absorb
-        
+
         if eff_area is None:
             detected = np.ones(eobs.shape, dtype='bool')
         else:
@@ -618,14 +643,14 @@
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
             randvec = eff_area.max()*np.random.random(eobs.shape)
             detected = randvec < earea
-        
+
         detected = np.logical_and(not_abs, detected)
-                    
+
         events = {}
 
         dx_min = self.parameters["Width"]/self.parameters["Dimension"]
         dtheta = YTQuantity(np.rad2deg(dx_min/D_A), "degree")
-        
+
         events["xpix"] = xsky[detected]/dx_min.v + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min.v + 0.5*(nx+1)
         events["eobs"] = eobs[detected]
@@ -635,15 +660,15 @@
         if psf_sigma is not None:
             events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
             events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
-        
+
         num_events = len(events["xpix"])
-            
+
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
-                
+
         if exp_time_new is None:
             parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
         else:
@@ -657,7 +682,7 @@
         parameters["sky_center"] = sky_center
         parameters["pix_center"] = np.array([0.5*(nx+1)]*2)
         parameters["dtheta"] = dtheta
-        
+
         return EventList(events, parameters)
 
     def _normalize_arf(self, respfile):
@@ -688,7 +713,7 @@
 
         eidxs = np.argsort(events["eobs"])
 
-        phEE = events["eobs"][eidxs].ndarray_view()
+        phEE = events["eobs"][eidxs].d
         phXX = events["xpix"][eidxs]
         phYY = events["ypix"][eidxs]
 
@@ -758,7 +783,7 @@
         self.num_events = events["xpix"].shape[0]
         self.wcs = _astropy.pywcs.WCS(naxis=2)
         self.wcs.wcs.crpix = parameters["pix_center"]
-        self.wcs.wcs.crval = parameters["sky_center"].ndarray_view()
+        self.wcs.wcs.crval = parameters["sky_center"].d
         self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2
@@ -785,8 +810,9 @@
         return self.events.__repr__()
 
     def __add__(self, other):
-        keys1 = self.parameters.keys()
-        keys2 = other.parameters.keys()
+        assert_same_wcs(self.wcs, other.wcs)
+        keys1 = list(self.parameters.keys())
+        keys2 = list(other.parameters.keys())
         keys1.sort()
         keys2.sort()
         if keys1 != keys2:
@@ -809,9 +835,9 @@
         return EventList(events, self.parameters)
 
     def filter_events(self, region):
-        """                                                                                                                                 
-        Filter events using a ds9 region. Requires the pyregion package.                                                                    
-        Returns a new EventList.                                                                                                            
+        """
+        Filter events using a ds9 region. Requires the pyregion package.
+        Returns a new EventList.
         """
         import pyregion
         import os
@@ -836,7 +862,7 @@
         """
         events = {}
         parameters = {}
-        
+
         f = h5py.File(h5file, "r")
 
         parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
@@ -923,7 +949,7 @@
         cols = []
 
         col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
-                             array=self.events["eobs"].in_units("eV").ndarray_view())
+                             array=self.events["eobs"].in_units("eV").d)
         col2 = pyfits.Column(name='X', format='D', unit='pixel',
                              array=self.events["xpix"])
         col3 = pyfits.Column(name='Y', format='D', unit='pixel',
@@ -942,7 +968,7 @@
              cols.append(col4)
 
         coldefs = pyfits.ColDefs(cols)
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
 
         tbhdu.header["MTYPE1"] = "sky"
@@ -1006,29 +1032,29 @@
         """
         pyfits = _astropy.pyfits
         if isinstance(self.parameters["Area"], string_types):
-             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
-             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
+             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with responses.")
+             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
 
         if emin is None:
-            emin = self.events["eobs"].min().value*0.95
+            emin = self.events["eobs"].min().value
         if emax is None:
-            emax = self.events["eobs"].max().value*1.05
+            emax = self.events["eobs"].max().value
 
-        idxs = np.logical_and(self.events["eobs"].ndarray_view() >= emin,
-                              self.events["eobs"].ndarray_view() <= emax)
+        idxs = np.logical_and(self.events["eobs"].d >= emin,
+                              self.events["eobs"].d <= emax)
         flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
                self.parameters["ExposureTime"]/self.parameters["Area"]
 
         col1 = pyfits.Column(name='ENERGY', format='E',
-                             array=self["eobs"].ndarray_view())
+                             array=self["eobs"].d)
         col2 = pyfits.Column(name='DEC', format='D',
-                             array=self["ysky"].ndarray_view())
+                             array=self["ysky"].d)
         col3 = pyfits.Column(name='RA', format='D',
-                             array=self["xsky"].ndarray_view())
+                             array=self["xsky"].d)
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("PHLIST")
 
         tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
@@ -1056,7 +1082,7 @@
 
         coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
 
-        wrhdu = pyfits.new_table(coldefs)
+        wrhdu = pyfits.BinTableHDU.from_columns(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
 
         wrhdu.header["HDUCLASS"] = "HEASARC"
@@ -1104,14 +1130,14 @@
 
         f.create_dataset("/xpix", data=self.events["xpix"])
         f.create_dataset("/ypix", data=self.events["ypix"])
-        f.create_dataset("/xsky", data=self.events["xsky"].ndarray_view())
-        f.create_dataset("/ysky", data=self.events["ysky"].ndarray_view())
-        f.create_dataset("/eobs", data=self.events["eobs"].ndarray_view())
+        f.create_dataset("/xsky", data=self.events["xsky"].d)
+        f.create_dataset("/ysky", data=self.events["ysky"].d)
+        f.create_dataset("/eobs", data=self.events["eobs"].d)
         if "PI" in self.events:
             f.create_dataset("/pi", data=self.events["PI"])
         if "PHA" in self.events:
-            f.create_dataset("/pha", data=self.events["PHA"])
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].ndarray_view())
+            f.create_dataset("/pha", data=self.events["PHA"])                  
+        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
         f.create_dataset("/pix_center", data=self.parameters["pix_center"])
         f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
 
@@ -1138,11 +1164,11 @@
         if emin is None:
             mask_emin = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emin = self.events["eobs"].ndarray_view() > emin
+            mask_emin = self.events["eobs"].d > emin
         if emax is None:
             mask_emax = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emax = self.events["eobs"].ndarray_view() < emax
+            mask_emax = self.events["eobs"].d < emax
 
         mask = np.logical_and(mask_emin, mask_emax)
 
@@ -1175,38 +1201,30 @@
         hdu.writeto(imagefile, clobber=clobber)
 
     @parallel_root_only
-    def write_spectrum(self, specfile, energy_bins=False, emin=0.1,
+    def write_spectrum(self, specfile, bin_type="energy", emin=0.1,
                        emax=10.0, nchan=2000, clobber=False):
         r"""
         Bin event energies into a spectrum and write it to a FITS binary table. Can bin
-        on energy or channel, the latter only if the photons were convolved with an
-        RMF. In that case, the spectral binning will be determined by the RMF binning.
+        on energy or channel. In that case, the spectral binning will be determined by 
+        the RMF binning.
 
         Parameters
         ----------
 
         specfile : string
             The name of the FITS file to be written.
-        energy_bins : boolean, optional
-            Bin on energy or channel. 
+        bin_type : string, optional
+            Bin on "energy" or "channel". If an RMF is detected, channel information will be
+            imported from it. 
         emin : float, optional
-            The minimum energy of the spectral bins in keV. Only used if binning on energy.
+            The minimum energy of the spectral bins in keV. Only used if binning without an RMF.
         emax : float, optional
-            The maximum energy of the spectral bins in keV. Only used if binning on energy.
+            The maximum energy of the spectral bins in keV. Only used if binning without an RMF.
         nchan : integer, optional
-            The number of channels. Only used if binning on energy.
+            The number of channels. Only used if binning without an RMF.
         """
         pyfits = _astropy.pyfits
-        if energy_bins:
-            spectype = "energy"
-            espec = self.events["eobs"].d
-            range = (emin, emax)
-            spec, ee = np.histogram(espec, bins=nchan, range=range)
-            bins = 0.5*(ee[1:]+ee[:-1])
-        else:
-            if "ChannelType" not in self.parameters:
-                mylog.error("These events were not convolved with an RMF. Set energy_bins=True.")
-                raise KeyError
+        if bin_type == "channel" and "ChannelType" in self.parameters:
             spectype = self.parameters["ChannelType"]
             espec = self.events[spectype]
             f = pyfits.open(self.parameters["RMF"])
@@ -1228,7 +1246,17 @@
             if cmin == 1: minlength += 1
             spec = np.bincount(self.events[spectype],minlength=minlength)
             if cmin == 1: spec = spec[1:]
-            bins = np.arange(nchan).astype("int32")+cmin
+            bins = (np.arange(nchan)+cmin).astype("int32")
+        else:    
+            espec = self.events["eobs"].d
+            erange = (emin, emax)
+            spec, ee = np.histogram(espec, bins=nchan, range=erange)
+            if bin_type == "energy":
+                bins = 0.5*(ee[1:]+ee[:-1])
+                spectype = "energy"
+            else:
+                bins = (np.arange(nchan)+1).astype("int32")
+                spectype = "pi"
 
         col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins)
         col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64"))
@@ -1240,46 +1268,45 @@
         tbhdu = pyfits.new_table(coldefs)
         tbhdu.update_ext_name("SPECTRUM")
 
-        if not energy_bins:
-            tbhdu.header["DETCHANS"] = spec.shape[0]
-            tbhdu.header["TOTCTS"] = spec.sum()
-            tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["CONTENT"] = spectype
-            tbhdu.header["HDUCLASS"] = "OGIP"
-            tbhdu.header["HDUCLAS1"] = "SPECTRUM"
-            tbhdu.header["HDUCLAS2"] = "TOTAL"
-            tbhdu.header["HDUCLAS3"] = "TYPE:I"
-            tbhdu.header["HDUCLAS4"] = "COUNT"
-            tbhdu.header["HDUVERS"] = "1.1.0"
-            tbhdu.header["HDUVERS1"] = "1.1.0"
-            tbhdu.header["CHANTYPE"] = spectype
-            tbhdu.header["BACKFILE"] = "none"
-            tbhdu.header["CORRFILE"] = "none"
-            tbhdu.header["POISSERR"] = True
-            if "RMF" in self.parameters:
-                 tbhdu.header["RESPFILE"] = self.parameters["RMF"]
-            else:
-                 tbhdu.header["RESPFILE"] = "none"
-            if "ARF" in self.parameters:
-                tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
-            else:        
-                tbhdu.header["ANCRFILE"] = "none"
-            if "Mission" in self.parameters:
-                tbhdu.header["MISSION"] = self.parameters["Mission"]
-            else:
-                tbhdu.header["MISSION"] = "none"
-            if "Telescope" in self.parameters:
-                tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
-            else:
-                tbhdu.header["TELESCOP"] = "none"
-            if "Instrument" in self.parameters:
-                tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
-            else:
-                tbhdu.header["INSTRUME"] = "none"
-            tbhdu.header["AREASCAL"] = 1.0
-            tbhdu.header["CORRSCAL"] = 0.0
-            tbhdu.header["BACKSCAL"] = 1.0
+        tbhdu.header["DETCHANS"] = spec.shape[0]
+        tbhdu.header["TOTCTS"] = spec.sum()
+        tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["CONTENT"] = spectype
+        tbhdu.header["HDUCLASS"] = "OGIP"
+        tbhdu.header["HDUCLAS1"] = "SPECTRUM"
+        tbhdu.header["HDUCLAS2"] = "TOTAL"
+        tbhdu.header["HDUCLAS3"] = "TYPE:I"
+        tbhdu.header["HDUCLAS4"] = "COUNT"
+        tbhdu.header["HDUVERS"] = "1.1.0"
+        tbhdu.header["HDUVERS1"] = "1.1.0"
+        tbhdu.header["CHANTYPE"] = spectype
+        tbhdu.header["BACKFILE"] = "none"
+        tbhdu.header["CORRFILE"] = "none"
+        tbhdu.header["POISSERR"] = True
+        if "RMF" in self.parameters:
+            tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+        else:
+            tbhdu.header["RESPFILE"] = "none"
+        if "ARF" in self.parameters:
+            tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
+        else:        
+            tbhdu.header["ANCRFILE"] = "none"
+        if "Mission" in self.parameters:
+            tbhdu.header["MISSION"] = self.parameters["Mission"]
+        else:
+            tbhdu.header["MISSION"] = "none"
+        if "Telescope" in self.parameters:
+            tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
+        else:
+            tbhdu.header["TELESCOP"] = "none"
+        if "Instrument" in self.parameters:
+            tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
+        else:
+            tbhdu.header["INSTRUME"] = "none"
+        tbhdu.header["AREASCAL"] = 1.0
+        tbhdu.header["CORRSCAL"] = 0.0
+        tbhdu.header["BACKSCAL"] = 1.0
 
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r e3d0784b79910a6d6af653572f4ab8bb644aacf0 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -32,17 +32,17 @@
         self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         pass
-    
+
     def get_spectrum(self):
         pass
-                                                        
+
 class XSpecThermalModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -54,19 +54,24 @@
         The maximum energy for the spectral model.
     nchan : integer
         The number of channels in the spectral model.
+    settings : dictionary, optional
+        A dictionary of key, value pairs (must both be strings)
+        that can be used to set various 
 
     Examples
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
+    def __init__(self, model_name, emin, emax, nchan, 
+                 thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        self.settings = settings
+        super(XSpecThermalModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self, zobs):
         """
-        Prepare the thermal model for execution.
+        Prepare the thermal model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -79,9 +84,12 @@
         else:
             self.norm = 1.0e-14
         self.thermal_comp.norm = 1.0
-        self.thermal_comp.Redshift = 0.0
+        self.thermal_comp.Redshift = zobs
         if self.thermal_broad:
             xspec.Xset.addModelString("APECTHERMAL","yes")
+        if self.settings is not None:
+            for k,v in self.settings.items():
+                xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self, kT):
         """
@@ -89,18 +97,20 @@
         """
         self.thermal_comp.kT = kT
         self.thermal_comp.Abundanc = 0.0
-        cosmic_spec = self.norm*np.array(self.model.values(0))
+        cosmic_spec = np.array(self.model.values(0))
         if self.model_name == "bremss":
             metal_spec = np.zeros(self.nchan)
         else:
             self.thermal_comp.Abundanc = 1.0
-            metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
+            metal_spec = np.array(self.model.values(0)) - cosmic_spec
+        cosmic_spec *= self.norm
+        metal_spec *= self.norm
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
-        
+
 class XSpecAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -119,14 +129,16 @@
     --------
     >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
     """
-    def __init__(self, model_name, nH, emin=0.01, emax=50.0, nchan=100000):
+    def __init__(self, model_name, nH, emin=0.01, emax=50.0, 
+                 nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        self.settings = settings
+        super(XSpecAbsorbModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self):
         """
-        Prepare the absorption model for execution.
+        Prepare the absorption model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -135,6 +147,9 @@
         self.model = xspec.Model(self.model_name+"*powerlaw")
         self.model.powerlaw.norm = self.nchan/(self.emax.value-self.emin.value)
         self.model.powerlaw.PhoIndex = 0.0
+        if self.settings is not None:
+            for k,v in self.settings.items():
+                xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self):
         """
@@ -150,7 +165,7 @@
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
     CfA (afoster at cfa.harvard.edu). 
-    
+
     Parameters
     ----------
 
@@ -183,7 +198,7 @@
                                      self.apec_prefix+"_coco.fits")
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableApecModel, self).__init__(emin, emax, nchan)  
         self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
@@ -196,8 +211,8 @@
                            32.0650,35.4530,39.9480,39.0983,40.0780,
                            44.9559,47.8670,50.9415,51.9961,54.9380,
                            55.8450,58.9332,58.6934,63.5460,65.3800])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self, zobs):
         """
         Prepare the thermal model for execution.
         """
@@ -216,17 +231,18 @@
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
         self.maxlam = self.wvbins.max()
-    
+        self.scale_factor = 1.0/(1.+zobs)
+
     def _make_spectrum(self, element, tindex):
-        
+
         tmpspec = np.zeros(self.nchan)
-        
+
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]*self.scale_factor
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.d
         if self.thermal_broad:
@@ -246,19 +262,19 @@
             return tmpspec
         else:
             ind = ind[0]
-                                                    
+
         n_cont = self.coco_handle[tindex].data.field('N_Cont')[ind]
-        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
+        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]*self.scale_factor
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
         tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
-        
+
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
-        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
+        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]*self.scale_factor
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
-        
+
         tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
-        
+
         return tmpspec
 
     def get_spectrum(self, kT):
@@ -288,7 +304,7 @@
 class TableAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from a table stored in an HDF5 file.
-    
+
     Parameters
     ----------
 
@@ -299,7 +315,7 @@
 
     Examples
     --------
-    >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
+    >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
 
     def __init__(self, filename, nH):
@@ -312,15 +328,15 @@
         self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableAbsorbModel, self).__init__(emin, emax, nchan)
         self.nH = YTQuantity(nH*1.0e22, "cm**-2")
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         """
         Prepare the absorption model for execution.
         """
         pass
-        
+
     def get_spectrum(self):
         """
         Get the absorption spectrum.


https://bitbucket.org/yt_analysis/yt/commits/3ddb593c51c2/
Changeset:   3ddb593c51c2
Branch:      yt
User:        jzuhone
Date:        2015-06-19 14:27:05+00:00
Summary:     Better way to handle the settings dict.
Affected #:  1 file

diff -r e3d0784b79910a6d6af653572f4ab8bb644aacf0 -r 3ddb593c51c23ad52d82e0db1328fa173f16f303 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -56,7 +56,7 @@
         The number of channels in the spectral model.
     settings : dictionary, optional
         A dictionary of key, value pairs (must both be strings)
-        that can be used to set various 
+        that can be used to set various options in XSPEC.
 
     Examples
     --------
@@ -66,6 +66,7 @@
                  thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
+        if settings is None: settings = {}
         self.settings = settings
         super(XSpecThermalModel, self).__init__(emin, emax, nchan)
 
@@ -87,9 +88,8 @@
         self.thermal_comp.Redshift = zobs
         if self.thermal_broad:
             xspec.Xset.addModelString("APECTHERMAL","yes")
-        if self.settings is not None:
-            for k,v in self.settings.items():
-                xspec.Xset.addModelString(k,v)
+        for k,v in self.settings.items():
+            xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self, kT):
         """
@@ -124,6 +124,9 @@
         The maximum energy for the spectral model.
     nchan : integer, optional
         The number of channels in the spectral model.
+    settings : dictionary, optional
+        A dictionary of key, value pairs (must both be strings)
+        that can be used to set various options in XSPEC.
 
     Examples
     --------
@@ -133,6 +136,7 @@
                  nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
+        if settings is None: settings = {}
         self.settings = settings
         super(XSpecAbsorbModel, self).__init__(emin, emax, nchan)
 
@@ -147,9 +151,8 @@
         self.model = xspec.Model(self.model_name+"*powerlaw")
         self.model.powerlaw.norm = self.nchan/(self.emax.value-self.emin.value)
         self.model.powerlaw.PhoIndex = 0.0
-        if self.settings is not None:
-            for k,v in self.settings.items():
-                xspec.Xset.addModelString(k,v)
+        for k,v in self.settings.items():
+            xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/d7d6a43438e9/
Changeset:   d7d6a43438e9
Branch:      yt
User:        jzuhone
Date:        2015-06-19 14:43:27+00:00
Summary:     Prettying up the center and method keyword args, adding a couple more options
Affected #:  3 files

diff -r 3ddb593c51c23ad52d82e0db1328fa173f16f303 -r d7d6a43438e93b6de749cc0fb27eea86a9fcd6e3 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -175,14 +175,14 @@
 The ``method`` keyword argument is also optional, and determines how the individual
 photon energies are generated from the spectrum. It may be set to one of two values:
 
-* ``method=1``: Construct the cumulative distribution function of the spectrum and invert
+* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
   it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
   on construction of the CDF and interpolation between the points, so for some spectra it
   may not be accurate enough). 
-* ``method=2``: Generate the photon energies from the spectrum using an acceptance-rejection
+* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
   technique (accurate, but likely to be slow). 
 
-``method=1`` (the default) should be sufficient for most cases. 
+``method="invert_cdf"`` (the default) should be sufficient for most cases. 
 
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
@@ -221,6 +221,8 @@
   length units. 
 * ``"center"`` or ``"c"`` corresponds to the domain center. 
 * ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+* A two-element tuple specifying the max or min of a specific field, e.g.,
+  ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
 
 If ``center`` is not specified, ``from_scratch`` will attempt to use the 
 ``"center"`` field parameter of the ``data_source``. 

diff -r 3ddb593c51c23ad52d82e0db1328fa173f16f303 -r d7d6a43438e93b6de749cc0fb27eea86a9fcd6e3 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -67,14 +67,14 @@
     photons_per_chunk : integer
         The maximum number of photons that are allocated per chunk. Increase or decrease
         as needed.
-    method : integer, optional
+    method : string, optional
         The method used to generate the photon energies from the spectrum:
-        1: Invert the cumulative distribution function of the spectrum.
-        2: Acceptance-rejection method using the spectrum. 
-        Method 1 should be sufficient for most cases. 
+        "invert_cdf": Invert the cumulative distribution function of the spectrum.
+        "accept_reject": Acceptance-rejection method using the spectrum. 
+        The first method should be sufficient for most cases. 
     """
     def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
-                 photons_per_chunk=10000000, method=1):
+                 photons_per_chunk=10000000, method="invert_cdf"):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
@@ -189,7 +189,7 @@
                                        "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
                                        "Increase photons_per_chunk!")
 
-                if self.method == 1:
+                if self.method == "invert_cdf":
                     cumspec_c = np.cumsum(cspec.d)
                     cumspec_m = np.cumsum(mspec.d)
                     cumspec_c = np.insert(cumspec_c, 0, 0.0)
@@ -197,20 +197,20 @@
 
                 ei = start_e
                 for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
-                    if cn > 0:
-                        if self.method == 1:
-                            cumspec = cumspec_c + Z*cumspec_m
-                            cumspec /= cumspec[-1]
-                            randvec = np.random.uniform(size=cn)
-                            randvec.sort()
-                            cell_e = np.interp(randvec, cumspec, ebins)
-                        elif self.method == 2:
-                            tot_spec = cspec.d+Z*mspec.d
-                            tot_spec /= tot_spec.sum()
-                            eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
-                            cell_e = emid[eidxs]
-                        energies[ei:ei+cn] = cell_e
-                        cell_counter += 1
+                    if cn == 0: continue
+                    if self.method == "invert_cdf":
+                        cumspec = cumspec_c + Z*cumspec_m
+                        cumspec /= cumspec[-1]
+                        randvec = np.random.uniform(size=cn)
+                        randvec.sort()
+                        cell_e = np.interp(randvec, cumspec, ebins)
+                    elif self.method == "accept_reject":
+                        tot_spec = cspec.d+Z*mspec.d
+                        tot_spec /= tot_spec.sum()
+                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                        cell_e = emid[eidxs]
+                    energies[ei:ei+cn] = cell_e
+                    cell_counter += 1
                     pbar.update(cell_counter)
                     ei += cn
 

diff -r 3ddb593c51c23ad52d82e0db1328fa173f16f303 -r d7d6a43438e93b6de749cc0fb27eea86a9fcd6e3 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -282,13 +282,20 @@
             D_A = parse_value(dist, "Mpc")
             redshift = 0.0
 
-        if center == "center" or center == "c":
+        if center in ("center", "c"):
             parameters["center"] = ds.domain_center
-        elif center == "max" or center == "m":
+        elif center in ("max", "m"):
             parameters["center"] = ds.find_max("density")[-1]
         elif iterable(center):
             if isinstance(center, YTArray):
                 parameters["center"] = center.in_units("code_length")
+            elif isinstance(center, tuple):
+                if center[0] == "min":
+                    parameters["center"] = ds.find_min(center[1])[-1]
+                elif center[0] == "max":
+                    parameters["center"] = ds.find_max(center[1])[-1]
+                else:
+                    raise RuntimeError
             else:
                 parameters["center"] = ds.arr(center, "code_length")
         elif center is None:


https://bitbucket.org/yt_analysis/yt/commits/63bfa2cb3d95/
Changeset:   63bfa2cb3d95
Branch:      yt
User:        jzuhone
Date:        2015-06-24 04:48:29+00:00
Summary:     Missed this
Affected #:  1 file

diff -r d7d6a43438e93b6de749cc0fb27eea86a9fcd6e3 -r 63bfa2cb3d95ff1e5f6ec871ddac370d2747a382 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -155,7 +155,7 @@
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
                                        photons_per_chunk=100000000,
-                                       method=1)
+                                       method="invert_cdf")
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If


https://bitbucket.org/yt_analysis/yt/commits/5d488bd7c5c4/
Changeset:   5d488bd7c5c4
Branch:      yt
User:        jzuhone
Date:        2015-07-09 14:23:34+00:00
Summary:     Adding a link to the SciPy Proceedings to the documentation
Affected #:  2 files

diff -r 63bfa2cb3d95ff1e5f6ec871ddac370d2747a382 -r 5d488bd7c5c45e0c0752e4251d46767c052d3eaf doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -10,6 +10,10 @@
 simulated X-ray photon lists of events from datasets that yt is able
 to read. The simulated events then can be exported to X-ray telescope
 simulators to produce realistic observations or can be analyzed in-line.
+
+For detailed information about the design of the algorithm in yt, check 
+out `the SciPy 2014 Proceedings. <http://conference.scipy.org/proceedings/scipy2014/zuhone.html>`_.
+
 The algorithm is based off of that implemented in
 `PHOX <http://www.mpa-garching.mpg.de/~kdolag/Phox/>`_ for SPH datasets
 by Veronica Biffi and Klaus Dolag. There are two relevant papers:

diff -r 63bfa2cb3d95ff1e5f6ec871ddac370d2747a382 -r 5d488bd7c5c45e0c0752e4251d46767c052d3eaf yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1,5 +1,10 @@
 """
 Classes for generating lists of photons and detected events
+
+The SciPy Proceeding that describes this module in detail may be found at:
+
+http://conference.scipy.org/proceedings/scipy2014/zuhone.html
+
 The algorithms used here are based off of the method used by the
 PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
 developed by Veronica Biffi and Klaus Dolag. References for


https://bitbucket.org/yt_analysis/yt/commits/8b398054b8fa/
Changeset:   8b398054b8fa
Branch:      yt
User:        jzuhone
Date:        2015-07-09 17:16:44+00:00
Summary:     Restoring energy_bins and adding a deprecation warning.
Affected #:  1 file

diff -r 5d488bd7c5c45e0c0752e4251d46767c052d3eaf -r 8b398054b8fac7adf74d677330d6916ccf73f168 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -37,6 +37,7 @@
 from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy
+import warnings
 
 comm = communication_system.communicators[-1]
 
@@ -1213,8 +1214,8 @@
         hdu.writeto(imagefile, clobber=clobber)
 
     @parallel_root_only
-    def write_spectrum(self, specfile, bin_type="energy", emin=0.1,
-                       emax=10.0, nchan=2000, clobber=False):
+    def write_spectrum(self, specfile, bin_type="channel", emin=0.1,
+                       emax=10.0, nchan=2000, clobber=False, energy_bins=False):
         r"""
         Bin event energies into a spectrum and write it to a FITS binary table. Can bin
         on energy or channel. In that case, the spectral binning will be determined by 
@@ -1234,11 +1235,16 @@
             The maximum energy of the spectral bins in keV. Only used if binning without an RMF.
         nchan : integer, optional
             The number of channels. Only used if binning without an RMF.
+        energy_bins : boolean, optional
+            Bin on energy or channel. Deprecated in favor of *bin_type*. 
         """
+        if energy_bins:
+            bin_type = "energy"
+            warnings.warn("The energy_bins keyword is deprecated. Please use "
+                          "the bin_type keyword instead. Setting bin_type == 'energy'.")
         pyfits = _astropy.pyfits
         if bin_type == "channel" and "ChannelType" in self.parameters:
             spectype = self.parameters["ChannelType"]
-            espec = self.events[spectype]
             f = pyfits.open(self.parameters["RMF"])
             nchan = int(f[1].header["DETCHANS"])
             try:
@@ -1247,19 +1253,13 @@
                 mylog.warning("Cannot determine minimum allowed value for channel. " +
                               "Setting to 0, which may be wrong.")
                 cmin = int(0)
-            try:
-                cmax = int(f[1].header["TLMAX4"])
-            except KeyError:
-                mylog.warning("Cannot determine maximum allowed value for channel. " +
-                              "Setting to DETCHANS, which may be wrong.")
-                cmax = int(nchan)
             f.close()
             minlength = nchan
             if cmin == 1: minlength += 1
             spec = np.bincount(self.events[spectype],minlength=minlength)
             if cmin == 1: spec = spec[1:]
             bins = (np.arange(nchan)+cmin).astype("int32")
-        else:    
+        else:
             espec = self.events["eobs"].d
             erange = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=erange)
@@ -1302,7 +1302,7 @@
             tbhdu.header["RESPFILE"] = "none"
         if "ARF" in self.parameters:
             tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
-        else:        
+        else:
             tbhdu.header["ANCRFILE"] = "none"
         if "Mission" in self.parameters:
             tbhdu.header["MISSION"] = self.parameters["Mission"]


https://bitbucket.org/yt_analysis/yt/commits/98c8bdfbc1e0/
Changeset:   98c8bdfbc1e0
Branch:      yt
User:        chummels
Date:        2015-07-09 23:38:02+00:00
Summary:     Merged in jzuhone/yt (pull request #1616)

Speedups and improvements for photon_simulator
Affected #:  4 files

diff -r c66deb7d69cab3114e9563d6cf9ed57521675ff4 -r 98c8bdfbc1e00d4ee2388663a7f4072bbf98fc57 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -10,6 +10,10 @@
 simulated X-ray photon lists of events from datasets that yt is able
 to read. The simulated events then can be exported to X-ray telescope
 simulators to produce realistic observations or can be analyzed in-line.
+
+For detailed information about the design of the algorithm in yt, check 
+out `the SciPy 2014 Proceedings. <http://conference.scipy.org/proceedings/scipy2014/zuhone.html>`_.
+
 The algorithm is based off of that implemented in
 `PHOX <http://www.mpa-garching.mpg.de/~kdolag/Phox/>`_ for SPH datasets
 by Veronica Biffi and Klaus Dolag. There are two relevant papers:
@@ -139,6 +143,12 @@
 the optional keyword ``thermal_broad`` is set to ``True``, the spectral
 lines will be thermally broadened.
 
+.. note:: 
+
+   ``SpectralModel`` objects based on XSPEC models (both the thermal 
+   emission and Galactic absorption models mentioned below) only work 
+   in Python 2.7, since currently PyXspec only works with Python 2.x. 
+   
 Now that we have our ``SpectralModel`` that gives us a spectrum, we need
 to connect this model to a ``PhotonModel`` that will connect the field
 data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +158,8 @@
 .. code:: python
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
-                                       photons_per_chunk=100000000)
+                                       photons_per_chunk=100000000,
+                                       method="invert_cdf")
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +176,18 @@
 this parameter needs to be set higher, or if you are looking to decrease memory
 usage, you might set this parameter lower.
 
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
+  it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+  on construction of the CDF and interpolation between the points, so for some spectra it
+  may not be accurate enough). 
+* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
+  technique (accurate, but likely to be slow). 
+
+``method="invert_cdf"`` (the default) should be sufficient for most cases. 
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +214,29 @@
 By default, the angular diameter distance to the object is determined
 from the ``cosmology`` and the cosmological ``redshift``. If a
 ``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be 
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+  units of code length. 
+* A ``YTArray`` corresponding to the coordinates of the center in some
+  length units. 
+* ``"center"`` or ``"c"`` corresponds to the domain center. 
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+* A two-element tuple specifying the max or min of a specific field, e.g.,
+  ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the 
+``"center"`` field parameter of the ``data_source``. 
+
+``from_scratch`` takes a few other optional keyword arguments. If your 
+source is local to the galaxy, you can set its distance directly, using 
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and 
+``cosmology`` will be ignored. Finally, if the photon generating 
+function accepts any parameters, they can be passed to ``from_scratch`` 
+via a ``parameters`` dictionary.
 
 At this point, the ``photons`` are distributed in the three-dimensional
 space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +305,7 @@
     abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
 Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
 specify a ``sky_center`` in RA,DEC on the sky in degrees.
 
@@ -274,26 +314,40 @@
 course far short of a full simulation of a telescope ray-trace, but it's
 a quick-and-dirty way to get something close to the real thing. We'll
 discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve 
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``. 
 
 .. code:: python
 
     ARF = "chandra_ACIS-S3_onaxis_arf.fits"
     RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
-    L = [0.0,0.0,1.0]
-    events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
-                                     sky_center=(187.5,12.333), responses=[ARF,RMF])
+    normal = [0.0,0.0,1.0]
+    events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
+                                     absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
+                                     convolve_energies=True, no_shifting=False, north_vector=None,
+                                     psf_sigma=None)
 
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary 
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to 
+project along one of those axes. 
 
-.. warning::
+``project_photons`` takes several other optional keyword arguments. 
 
-   The binned images that result, even if you convolve with responses,
-   are still of the same resolution as the finest cell size of the
-   simulation dataset. If you want a more accurate simulation of a
-   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler 
+  shifting of photon energies is turned on. 
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+  angular diameter distance by hand instead of having it determined
+  by the cosmology and the value of the redshift. Should only be used
+  for simulations of nearby objects. 
+* For off-axis ``normal`` vectors,  the ``north_vector`` argument can 
+  be used to control what vector corresponds to the "up" direction in 
+  the resulting event list. 
+* ``psf_sigma`` may be specified to provide a crude representation of 
+  a PSF, and corresponds to the standard deviation (in degress) of a 
+  Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:
 
@@ -343,19 +397,27 @@
 
 Which is starting to look like a real observation!
 
+.. warning::
+
+   The binned images that result, even if you convolve with responses,
+   are still of the same resolution as the finest cell size of the
+   simulation dataset. If you want a more accurate simulation of a
+   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
 We can also bin up the spectrum into energy bins, and write it to a FITS
 table file. This is an example where we've binned up the spectrum
 according to the unconvolved photon energy:
 
 .. code:: python
 
-    events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+    events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
 
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
 with response files, then any other keywords will be ignored and it will
 try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``, 
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. 
+For now, we'll stick with the energy spectrum, and plot it up:
 
 .. code:: python
 

diff -r c66deb7d69cab3114e9563d6cf9ed57521675ff4 -r 98c8bdfbc1e00d4ee2388663a7f4072bbf98fc57 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -25,7 +25,7 @@
 from yt.extern.six import string_types
 import numpy as np
 from yt.funcs import *
-from yt.utilities.physical_constants import mp, kboltz
+from yt.utilities.physical_constants import mp
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_objects
 from yt.units.yt_array import uconcatenate
@@ -34,6 +34,12 @@
 kT_min = 8.08e-2
 kT_max = 50.
 
+photon_units = {"Energy":"keV",
+                "dx":"kpc"}
+for ax in "xyz":
+    photon_units[ax] = "kpc"
+    photon_units["v"+ax] = "km/s"
+
 class PhotonModel(object):
 
     def __init__(self):
@@ -46,7 +52,7 @@
 class ThermalPhotonModel(PhotonModel):
     r"""
     Initialize a ThermalPhotonModel from a thermal spectrum. 
-    
+
     Parameters
     ----------
 
@@ -61,30 +67,37 @@
     photons_per_chunk : integer
         The maximum number of photons that are allocated per chunk. Increase or decrease
         as needed.
+    method : string, optional
+        The method used to generate the photon energies from the spectrum:
+        "invert_cdf": Invert the cumulative distribution function of the spectrum.
+        "accept_reject": Acceptance-rejection method using the spectrum. 
+        The first method should be sufficient for most cases. 
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, photons_per_chunk=10000000):
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
+                 photons_per_chunk=10000000, method="invert_cdf"):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
+        self.method = method
 
     def __call__(self, data_source, parameters):
-        
+
         ds = data_source.ds
 
         exp_time = parameters["FiducialExposureTime"]
         area = parameters["FiducialArea"]
         redshift = parameters["FiducialRedshift"]
         D_A = parameters["FiducialAngularDiameterDistance"].in_cgs()
-        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**3)
+        dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2)
         src_ctr = parameters["center"]
 
-        vol_scale = 1.0/np.prod(ds.domain_width.in_cgs().to_ndarray())
-
         my_kT_min, my_kT_max = data_source.quantities.extrema("kT")
 
-        self.spectral_model.prepare()
-        energy = self.spectral_model.ebins
+        self.spectral_model.prepare_spectrum(redshift)
+        emid = self.spectral_model.emid
+        ebins = self.spectral_model.ebins
+        nchan = len(emid)
 
         citer = data_source.chunks([], "io")
 
@@ -99,7 +112,13 @@
         photons["Energy"] = []
         photons["NumberOfPhotons"] = []
 
-        spectral_norm = area.v*exp_time.v*dist_fac/vol_scale
+        spectral_norm = area.v*exp_time.v*dist_fac
+
+        tot_num_cells = data_source.ires.shape[0]
+
+        pbar = get_pbar("Generating photons ", tot_num_cells)
+
+        cell_counter = 0
 
         for chunk in parallel_objects(citer):
 
@@ -114,7 +133,7 @@
             if isinstance(self.Zmet, string_types):
                 metalZ = chunk[self.Zmet].v
             else:
-                metalZ = self.Zmet
+                metalZ = self.Zmet*np.ones(num_cells)
 
             idxs = np.argsort(kT)
 
@@ -133,7 +152,7 @@
                 n += bcount
             kT_idxs = np.unique(kT_idxs)
 
-            cell_em = EM[idxs]*vol_scale
+            cell_em = EM[idxs]*spectral_norm
 
             number_of_photons = np.zeros(num_cells, dtype="uint64")
             energies = np.zeros(self.photons_per_chunk)
@@ -141,8 +160,6 @@
             start_e = 0
             end_e = 0
 
-            pbar = get_pbar("Generating photons for chunk ", num_cells)
-
             for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs):
 
                 kT = kT_bins[ikT] + 0.5*dkT
@@ -151,58 +168,57 @@
 
                 cem = cell_em[ibegin:iend]
 
-                em_sum_c = cem.sum()
-                if isinstance(self.Zmet, string_types):
-                    em_sum_m = (metalZ[ibegin:iend]*cem).sum()
-                else:
-                    em_sum_m = metalZ*em_sum_c
-
                 cspec, mspec = self.spectral_model.get_spectrum(kT)
 
-                cumspec_c = np.cumsum(cspec.d)
-                tot_ph_c = cumspec_c[-1]*spectral_norm*em_sum_c
-                cumspec_c /= cumspec_c[-1]
-                cumspec_c = np.insert(cumspec_c, 0, 0.0)
-
-                cumspec_m = np.cumsum(mspec.d)
-                tot_ph_m = cumspec_m[-1]*spectral_norm*em_sum_m
-                cumspec_m /= cumspec_m[-1]
-                cumspec_m = np.insert(cumspec_m, 0, 0.0)
+                tot_ph_c = cspec.d.sum()
+                tot_ph_m = mspec.d.sum()
 
                 u = np.random.random(size=n_current)
 
-                cell_norm_c = tot_ph_c*cem/em_sum_c
-                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u)
-            
-                if isinstance(self.Zmet, string_types):
-                    cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem/em_sum_m
-                else:
-                    cell_norm_m = tot_ph_m*metalZ*cem/em_sum_m
-                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u)
-            
-                number_of_photons[ibegin:iend] = cell_n_c + cell_n_m
+                cell_norm_c = tot_ph_c*cem
+                cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
+                cell_norm = np.modf(cell_norm_c + cell_norm_m)
+                cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u)
 
-                end_e += int((cell_n_c+cell_n_m).sum())
+                number_of_photons[ibegin:iend] = cell_n
+
+                end_e += int(cell_n.sum())
 
                 if end_e > self.photons_per_chunk:
                     raise RuntimeError("Number of photons generated for this chunk "+
                                        "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk +
                                        "Increase photons_per_chunk!")
 
-                energies[start_e:end_e] = _generate_energies(cell_n_c, cell_n_m, cumspec_c, cumspec_m, energy)
-            
+                if self.method == "invert_cdf":
+                    cumspec_c = np.cumsum(cspec.d)
+                    cumspec_m = np.cumsum(mspec.d)
+                    cumspec_c = np.insert(cumspec_c, 0, 0.0)
+                    cumspec_m = np.insert(cumspec_m, 0, 0.0)
+
+                ei = start_e
+                for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
+                    if cn == 0: continue
+                    if self.method == "invert_cdf":
+                        cumspec = cumspec_c + Z*cumspec_m
+                        cumspec /= cumspec[-1]
+                        randvec = np.random.uniform(size=cn)
+                        randvec.sort()
+                        cell_e = np.interp(randvec, cumspec, ebins)
+                    elif self.method == "accept_reject":
+                        tot_spec = cspec.d+Z*mspec.d
+                        tot_spec /= tot_spec.sum()
+                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                        cell_e = emid[eidxs]
+                    energies[ei:ei+cn] = cell_e
+                    cell_counter += 1
+                    pbar.update(cell_counter)
+                    ei += cn
+
                 start_e = end_e
 
-                pbar.update(iend)
-
-            pbar.finish()
-
             active_cells = number_of_photons > 0
             idxs = idxs[active_cells]
 
-            mylog.info("Number of photons generated for this chunk: %d" % int(number_of_photons.sum()))
-            mylog.info("Number of cells with photons: %d" % int(active_cells.sum()))
-
             photons["NumberOfPhotons"].append(number_of_photons[active_cells])
             photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV"))
             photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc"))
@@ -213,23 +229,17 @@
             photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s"))
             photons["dx"].append(chunk["dx"][idxs].in_units("kpc"))
 
+        pbar.finish()
+
         for key in photons:
             if len(photons[key]) > 0:
                 photons[key] = uconcatenate(photons[key])
+            elif key == "NumberOfPhotons":
+                photons[key] = np.array([])
+            else:
+                photons[key] = YTArray([], photon_units[key])
+
+        mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
+        mylog.info("Number of cells with photons: %d" % len(photons["x"]))
 
         return photons
-
-def _generate_energies(cell_n_c, cell_n_m, counts_c, counts_m, energy):
-    energies = np.array([])
-    for cn_c, cn_m in zip(cell_n_c, cell_n_m):
-        if cn_c > 0:
-            randvec_c = np.random.uniform(size=cn_c)
-            randvec_c.sort()
-            cell_e_c = np.interp(randvec_c, counts_c, energy)
-            energies = np.append(energies, cell_e_c)
-        if cn_m > 0: 
-            randvec_m = np.random.uniform(size=cn_m)
-            randvec_m.sort()
-            cell_e_m = np.interp(randvec_m, counts_m, energy)
-            energies = np.append(energies, cell_e_m)
-    return energies

diff -r c66deb7d69cab3114e9563d6cf9ed57521675ff4 -r 98c8bdfbc1e00d4ee2388663a7f4072bbf98fc57 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1,5 +1,10 @@
 """
 Classes for generating lists of photons and detected events
+
+The SciPy Proceeding that describes this module in detail may be found at:
+
+http://conference.scipy.org/proceedings/scipy2014/zuhone.html
+
 The algorithms used here are based off of the method used by the
 PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
 developed by Veronica Biffi and Klaus Dolag. References for
@@ -25,15 +30,21 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      parallel_capable
 from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy
+import warnings
 
 comm = communication_system.communicators[-1]
 
+axes_lookup = {"x":("y","z"),
+               "y":("z","x"),
+               "z":("x","y")}
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -50,10 +61,10 @@
         self.cosmo = cosmo
         self.p_bins = p_bins
         self.num_cells = len(photons["x"])
-        
+
     def keys(self):
         return self.photons.keys()
-    
+
     def items(self):
         ret = []
         for k, v in self.photons.items():
@@ -62,7 +73,7 @@
             else:
                 ret.append((k,v))
         return ret
-    
+
     def values(self):
         ret = []
         for k, v in self.photons.items():
@@ -71,7 +82,7 @@
             else:
                 ret.append(v)
         return ret
-                                
+
     def __getitem__(self, key):
         if key == "Energy":
             return [self.photons["Energy"][self.p_bins[i]:self.p_bins[i+1]]
@@ -127,9 +138,9 @@
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-        
+
         photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
-        
+
         f.close()
 
         cosmo = Cosmology(hubble_constant=parameters["HubbleConstant"],
@@ -137,7 +148,7 @@
                           omega_lambda=parameters["OmegaLambda"])
 
         return cls(photons, parameters, cosmo, p_bins)
-    
+
     @classmethod
     def from_scratch(cls, data_source, redshift, area,
                      exp_time, photon_model, parameters=None,
@@ -277,18 +288,25 @@
             D_A = parse_value(dist, "Mpc")
             redshift = 0.0
 
-        if center == "c":
+        if center in ("center", "c"):
             parameters["center"] = ds.domain_center
-        elif center == "max":
+        elif center in ("max", "m"):
             parameters["center"] = ds.find_max("density")[-1]
         elif iterable(center):
             if isinstance(center, YTArray):
                 parameters["center"] = center.in_units("code_length")
+            elif isinstance(center, tuple):
+                if center[0] == "min":
+                    parameters["center"] = ds.find_min(center[1])[-1]
+                elif center[0] == "max":
+                    parameters["center"] = ds.find_max(center[1])[-1]
+                else:
+                    raise RuntimeError
             else:
                 parameters["center"] = ds.arr(center, "code_length")
         elif center is None:
             parameters["center"] = data_source.get_field_parameter("center")
-            
+
         parameters["FiducialExposureTime"] = parse_value(exp_time, "s")
         parameters["FiducialArea"] = parse_value(area, "cm**2")
         parameters["FiducialRedshift"] = redshift
@@ -308,32 +326,32 @@
             dimension = max(dimension, int(width/delta_min))
         parameters["Dimension"] = 2*dimension
         parameters["Width"] = 2.*width.in_units("kpc")
-                
+
         photons = photon_model(data_source, parameters)
 
         mylog.info("Finished generating photons.")
 
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
-                        
+
         return cls(photons, parameters, cosmo, p_bins)
-        
+
     def write_h5_file(self, photonfile):
         """
         Write the photons to the HDF5 file *photonfile*.
         """
 
         if parallel_capable:
-            
+
             mpi_long = get_mpi_type("int64")
             mpi_double = get_mpi_type("float64")
-        
+
             local_num_cells = len(self.photons["x"])
             sizes_c = comm.comm.gather(local_num_cells, root=0)
-            
-            local_num_photons = self.photons["NumberOfPhotons"].sum()
+
+            local_num_photons = np.sum(self.photons["NumberOfPhotons"])
             sizes_p = comm.comm.gather(local_num_photons, root=0)
-            
+
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
                 num_photons = sum(sizes_p)        
@@ -362,24 +380,24 @@
                 dx = np.empty([])
                 n_ph = np.empty([])
                 e = np.empty([])
-                                                
-            comm.comm.Gatherv([self.photons["x"].ndarray_view(), local_num_cells, mpi_double],
+
+            comm.comm.Gatherv([self.photons["x"].d, local_num_cells, mpi_double],
                               [x, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["y"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["y"].d, local_num_cells, mpi_double],
                               [y, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["z"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["z"].d, local_num_cells, mpi_double],
                               [z, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vx"].d, local_num_cells, mpi_double],
                               [vx, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vy"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vy"].d, local_num_cells, mpi_double],
                               [vy, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["vz"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["vz"].d, local_num_cells, mpi_double],
                               [vz, (sizes_c, disps_c), mpi_double], root=0)
-            comm.comm.Gatherv([self.photons["dx"].ndarray_view(), local_num_cells, mpi_double],
+            comm.comm.Gatherv([self.photons["dx"].d, local_num_cells, mpi_double],
                               [dx, (sizes_c, disps_c), mpi_double], root=0)
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
-            comm.comm.Gatherv([self.photons["Energy"].ndarray_view(), local_num_photons, mpi_double],
+            comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
                               [e, (sizes_p, disps_p), mpi_double], root=0) 
 
         else:
@@ -393,13 +411,13 @@
             dx = self.photons["dx"].d
             n_ph = self.photons["NumberOfPhotons"]
             e = self.photons["Energy"].d
-                                                
+
         if comm.rank == 0:
-            
+
             f = h5py.File(photonfile, "w")
 
             # Scalars
-       
+
             f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
             f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
             f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
@@ -409,7 +427,7 @@
             f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
             f.create_dataset("dimension", data=self.parameters["Dimension"])
             f.create_dataset("width", data=float(self.parameters["Width"]))
-                        
+
             # Arrays
 
             f.create_dataset("x", data=x)
@@ -426,19 +444,22 @@
 
         comm.barrier()
 
-    def project_photons(self, L, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
-                        convolve_energies=False, no_shifting=False):
+                        convolve_energies=False, no_shifting=False,
+                        north_vector=None):
         r"""
         Projects photons onto an image plane given a line of sight.
 
         Parameters
         ----------
 
-        L : array_like
-            Normal vector to the plane of projection.
+        normal : character or array_like
+            Normal vector to the plane of projection. If "x", "y", or "z", will
+            assume to be along that axis (and will probably be faster). Otherwise, 
+            should be an off-axis normal vector, e.g [1.0,2.0,-3.0]
         area_new : float, optional
             New value for the effective area of the detector. If *responses*
             are specified the value of this keyword is ignored.
@@ -463,7 +484,11 @@
             If this is set, the photon energies will be convolved with the RMF.
         no_shifting : boolean, optional
             If set, the photon energies will not be Doppler shifted.
-            
+        north_vector : a sequence of floats
+            A vector defining the 'up' direction. This option sets the orientation of 
+            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
+            is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
+            "z") is explicitly specified.
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -475,7 +500,7 @@
         if redshift_new is not None and dist_new is not None:
             mylog.error("You may specify a new redshift or distance, "+
                         "but not both!")
-        
+
         if sky_center is None:
             sky_center = YTArray([30.,45.], "degree")
         else:
@@ -486,37 +511,36 @@
         if psf_sigma is not None:
              psf_sigma = parse_value(psf_sigma, "degree")
 
-        L = np.array(L)
-        L /= np.sqrt(np.dot(L, L))
-        vecs = np.identity(3)
-        t = np.cross(L, vecs).sum(axis=1)
-        ax = t.argmax()
-        north = np.cross(L, vecs[ax,:]).ravel()
-        orient = Orientation(L, north_vector=north)
-        
-        x_hat = orient.unit_vectors[0]
-        y_hat = orient.unit_vectors[1]
-        z_hat = orient.unit_vectors[2]
+        if not isinstance(normal, string_types):
+            L = np.array(normal)
+            orient = Orientation(L, north_vector=north_vector)
+            x_hat = orient.unit_vectors[0]
+            y_hat = orient.unit_vectors[1]
+            z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
         n_ph_tot = n_ph.sum()
-        
+
         eff_area = None
 
         parameters = {}
-        
+
         if responses is not None:
             responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
             if len(responses) == 2:
                 parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
-            
+
+        zobs0 = self.parameters["FiducialRedshift"]
+        D_A0 = self.parameters["FiducialAngularDiameterDistance"]
+        scale_factor = 1.0
+
         if (exp_time_new is None and area_new is None and
             redshift_new is None and dist_new is None):
             my_n_obs = n_ph_tot
-            zobs = self.parameters["FiducialRedshift"]
-            D_A = self.parameters["FiducialAngularDiameterDistance"]
+            zobs = zobs0
+            D_A = D_A0
         else:
             if exp_time_new is None:
                 Tratio = 1.
@@ -545,8 +569,8 @@
                 Aratio = parse_value(area_new, "cm**2")/self.parameters["FiducialArea"]
             if redshift_new is None and dist_new is None:
                 Dratio = 1.
-                zobs = self.parameters["FiducialRedshift"]
-                D_A = self.parameters["FiducialAngularDiameterDistance"]
+                zobs = zobs0
+                D_A = D_A0
             else:
                 if redshift_new is None:
                     zobs = 0.0
@@ -554,28 +578,17 @@
                 else:
                     zobs = redshift_new
                     D_A = self.cosmo.angular_diameter_distance(0.0,zobs).in_units("Mpc")
-                fid_D_A = self.parameters["FiducialAngularDiameterDistance"]
-                Dratio = fid_D_A*fid_D_A*(1.+self.parameters["FiducialRedshift"]**3) / \
+                    scale_factor = (1.+zobs0)/(1.+zobs)
+                Dratio = D_A0*D_A0*(1.+zobs0)**3 / \
                          (D_A*D_A*(1.+zobs)**3)
             fak = Aratio*Tratio*Dratio
             if fak > 1:
-                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % (fak))
+                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
             my_n_obs = np.uint64(n_ph_tot*fak)
 
         n_obs_all = comm.mpi_allreduce(my_n_obs)
         if comm.rank == 0:
             mylog.info("Total number of photons to use: %d" % (n_obs_all))
-        
-        x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-        z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-
-        if not no_shifting:
-            vz = self.photons["vx"]*z_hat[0] + \
-                 self.photons["vy"]*z_hat[1] + \
-                 self.photons["vz"]*z_hat[2]
-            shift = -vz.in_cgs()/clight
-            shift = np.sqrt((1.-shift)/(1.+shift))
 
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
@@ -584,32 +597,57 @@
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
-        x *= delta
-        y *= delta
-        z *= delta
-        x += self.photons["x"][obs_cells]
-        y += self.photons["y"][obs_cells]
-        z += self.photons["z"][obs_cells]
+        if isinstance(normal, string_types):
+
+            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky *= delta
+            ysky *= delta
+            xsky += self.photons[axes_lookup[normal][0]][obs_cells]
+            ysky += self.photons[axes_lookup[normal][1]][obs_cells]
+
+            if not no_shifting:
+                vz = self.photons["v%s" % normal]
+
+        else:
+            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+
+            if not no_shifting:
+                vz = self.photons["vx"]*z_hat[0] + \
+                     self.photons["vy"]*z_hat[1] + \
+                     self.photons["vz"]*z_hat[2]
+
+            x *= delta
+            y *= delta
+            z *= delta
+            x += self.photons["x"][obs_cells]
+            y += self.photons["y"][obs_cells]
+            z += self.photons["z"][obs_cells]
+
+            xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
+            ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
+
         if no_shifting:
             eobs = self.photons["Energy"][idxs]
         else:
+            shift = -vz.in_cgs()/clight
+            shift = np.sqrt((1.-shift)/(1.+shift))
             eobs = self.photons["Energy"][idxs]*shift[obs_cells]
+        eobs *= scale_factor
 
-        xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2]
-        ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2]
-        eobs /= (1.+zobs)
-        
         if absorb_model is None:
             not_abs = np.ones(eobs.shape, dtype='bool')
         else:
             mylog.info("Absorbing.")
-            absorb_model.prepare()
+            absorb_model.prepare_spectrum()
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
             randvec = aspec.max()*np.random.random(eobs.shape)
             not_abs = randvec < absorb
-        
+
         if eff_area is None:
             detected = np.ones(eobs.shape, dtype='bool')
         else:
@@ -618,14 +656,14 @@
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
             randvec = eff_area.max()*np.random.random(eobs.shape)
             detected = randvec < earea
-        
+
         detected = np.logical_and(not_abs, detected)
-                    
+
         events = {}
 
         dx_min = self.parameters["Width"]/self.parameters["Dimension"]
         dtheta = YTQuantity(np.rad2deg(dx_min/D_A), "degree")
-        
+
         events["xpix"] = xsky[detected]/dx_min.v + 0.5*(nx+1)
         events["ypix"] = ysky[detected]/dx_min.v + 0.5*(nx+1)
         events["eobs"] = eobs[detected]
@@ -635,15 +673,15 @@
         if psf_sigma is not None:
             events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
             events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
-        
+
         num_events = len(events["xpix"])
-            
+
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
-                
+
         if exp_time_new is None:
             parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
         else:
@@ -657,7 +695,7 @@
         parameters["sky_center"] = sky_center
         parameters["pix_center"] = np.array([0.5*(nx+1)]*2)
         parameters["dtheta"] = dtheta
-        
+
         return EventList(events, parameters)
 
     def _normalize_arf(self, respfile):
@@ -688,7 +726,7 @@
 
         eidxs = np.argsort(events["eobs"])
 
-        phEE = events["eobs"][eidxs].ndarray_view()
+        phEE = events["eobs"][eidxs].d
         phXX = events["xpix"][eidxs]
         phYY = events["ypix"][eidxs]
 
@@ -758,7 +796,7 @@
         self.num_events = events["xpix"].shape[0]
         self.wcs = _astropy.pywcs.WCS(naxis=2)
         self.wcs.wcs.crpix = parameters["pix_center"]
-        self.wcs.wcs.crval = parameters["sky_center"].ndarray_view()
+        self.wcs.wcs.crval = parameters["sky_center"].d
         self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2
@@ -785,8 +823,9 @@
         return self.events.__repr__()
 
     def __add__(self, other):
-        keys1 = self.parameters.keys()
-        keys2 = other.parameters.keys()
+        assert_same_wcs(self.wcs, other.wcs)
+        keys1 = list(self.parameters.keys())
+        keys2 = list(other.parameters.keys())
         keys1.sort()
         keys2.sort()
         if keys1 != keys2:
@@ -809,9 +848,9 @@
         return EventList(events, self.parameters)
 
     def filter_events(self, region):
-        """                                                                                                                                 
-        Filter events using a ds9 region. Requires the pyregion package.                                                                    
-        Returns a new EventList.                                                                                                            
+        """
+        Filter events using a ds9 region. Requires the pyregion package.
+        Returns a new EventList.
         """
         import pyregion
         import os
@@ -836,7 +875,7 @@
         """
         events = {}
         parameters = {}
-        
+
         f = h5py.File(h5file, "r")
 
         parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
@@ -923,7 +962,7 @@
         cols = []
 
         col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
-                             array=self.events["eobs"].in_units("eV").ndarray_view())
+                             array=self.events["eobs"].in_units("eV").d)
         col2 = pyfits.Column(name='X', format='D', unit='pixel',
                              array=self.events["xpix"])
         col3 = pyfits.Column(name='Y', format='D', unit='pixel',
@@ -942,7 +981,7 @@
              cols.append(col4)
 
         coldefs = pyfits.ColDefs(cols)
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
 
         tbhdu.header["MTYPE1"] = "sky"
@@ -1006,29 +1045,29 @@
         """
         pyfits = _astropy.pyfits
         if isinstance(self.parameters["Area"], string_types):
-             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
-             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with an ARF.")
+             mylog.error("Writing SIMPUT files is only supported if you didn't convolve with responses.")
+             raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
 
         if emin is None:
-            emin = self.events["eobs"].min().value*0.95
+            emin = self.events["eobs"].min().value
         if emax is None:
-            emax = self.events["eobs"].max().value*1.05
+            emax = self.events["eobs"].max().value
 
-        idxs = np.logical_and(self.events["eobs"].ndarray_view() >= emin,
-                              self.events["eobs"].ndarray_view() <= emax)
+        idxs = np.logical_and(self.events["eobs"].d >= emin,
+                              self.events["eobs"].d <= emax)
         flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
                self.parameters["ExposureTime"]/self.parameters["Area"]
 
         col1 = pyfits.Column(name='ENERGY', format='E',
-                             array=self["eobs"].ndarray_view())
+                             array=self["eobs"].d)
         col2 = pyfits.Column(name='DEC', format='D',
-                             array=self["ysky"].ndarray_view())
+                             array=self["ysky"].d)
         col3 = pyfits.Column(name='RA', format='D',
-                             array=self["xsky"].ndarray_view())
+                             array=self["xsky"].d)
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("PHLIST")
 
         tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
@@ -1056,7 +1095,7 @@
 
         coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
 
-        wrhdu = pyfits.new_table(coldefs)
+        wrhdu = pyfits.BinTableHDU.from_columns(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
 
         wrhdu.header["HDUCLASS"] = "HEASARC"
@@ -1104,14 +1143,14 @@
 
         f.create_dataset("/xpix", data=self.events["xpix"])
         f.create_dataset("/ypix", data=self.events["ypix"])
-        f.create_dataset("/xsky", data=self.events["xsky"].ndarray_view())
-        f.create_dataset("/ysky", data=self.events["ysky"].ndarray_view())
-        f.create_dataset("/eobs", data=self.events["eobs"].ndarray_view())
+        f.create_dataset("/xsky", data=self.events["xsky"].d)
+        f.create_dataset("/ysky", data=self.events["ysky"].d)
+        f.create_dataset("/eobs", data=self.events["eobs"].d)
         if "PI" in self.events:
             f.create_dataset("/pi", data=self.events["PI"])
         if "PHA" in self.events:
-            f.create_dataset("/pha", data=self.events["PHA"])
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].ndarray_view())
+            f.create_dataset("/pha", data=self.events["PHA"])                  
+        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
         f.create_dataset("/pix_center", data=self.parameters["pix_center"])
         f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
 
@@ -1138,11 +1177,11 @@
         if emin is None:
             mask_emin = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emin = self.events["eobs"].ndarray_view() > emin
+            mask_emin = self.events["eobs"].d > emin
         if emax is None:
             mask_emax = np.ones((self.num_events), dtype='bool')
         else:
-            mask_emax = self.events["eobs"].ndarray_view() < emax
+            mask_emax = self.events["eobs"].d < emax
 
         mask = np.logical_and(mask_emin, mask_emax)
 
@@ -1175,40 +1214,37 @@
         hdu.writeto(imagefile, clobber=clobber)
 
     @parallel_root_only
-    def write_spectrum(self, specfile, energy_bins=False, emin=0.1,
-                       emax=10.0, nchan=2000, clobber=False):
+    def write_spectrum(self, specfile, bin_type="channel", emin=0.1,
+                       emax=10.0, nchan=2000, clobber=False, energy_bins=False):
         r"""
         Bin event energies into a spectrum and write it to a FITS binary table. Can bin
-        on energy or channel, the latter only if the photons were convolved with an
-        RMF. In that case, the spectral binning will be determined by the RMF binning.
+        on energy or channel. In that case, the spectral binning will be determined by 
+        the RMF binning.
 
         Parameters
         ----------
 
         specfile : string
             The name of the FITS file to be written.
+        bin_type : string, optional
+            Bin on "energy" or "channel". If an RMF is detected, channel information will be
+            imported from it. 
+        emin : float, optional
+            The minimum energy of the spectral bins in keV. Only used if binning without an RMF.
+        emax : float, optional
+            The maximum energy of the spectral bins in keV. Only used if binning without an RMF.
+        nchan : integer, optional
+            The number of channels. Only used if binning without an RMF.
         energy_bins : boolean, optional
-            Bin on energy or channel. 
-        emin : float, optional
-            The minimum energy of the spectral bins in keV. Only used if binning on energy.
-        emax : float, optional
-            The maximum energy of the spectral bins in keV. Only used if binning on energy.
-        nchan : integer, optional
-            The number of channels. Only used if binning on energy.
+            Bin on energy or channel. Deprecated in favor of *bin_type*. 
         """
+        if energy_bins:
+            bin_type = "energy"
+            warnings.warn("The energy_bins keyword is deprecated. Please use "
+                          "the bin_type keyword instead. Setting bin_type == 'energy'.")
         pyfits = _astropy.pyfits
-        if energy_bins:
-            spectype = "energy"
-            espec = self.events["eobs"].d
-            range = (emin, emax)
-            spec, ee = np.histogram(espec, bins=nchan, range=range)
-            bins = 0.5*(ee[1:]+ee[:-1])
-        else:
-            if "ChannelType" not in self.parameters:
-                mylog.error("These events were not convolved with an RMF. Set energy_bins=True.")
-                raise KeyError
+        if bin_type == "channel" and "ChannelType" in self.parameters:
             spectype = self.parameters["ChannelType"]
-            espec = self.events[spectype]
             f = pyfits.open(self.parameters["RMF"])
             nchan = int(f[1].header["DETCHANS"])
             try:
@@ -1217,18 +1253,22 @@
                 mylog.warning("Cannot determine minimum allowed value for channel. " +
                               "Setting to 0, which may be wrong.")
                 cmin = int(0)
-            try:
-                cmax = int(f[1].header["TLMAX4"])
-            except KeyError:
-                mylog.warning("Cannot determine maximum allowed value for channel. " +
-                              "Setting to DETCHANS, which may be wrong.")
-                cmax = int(nchan)
             f.close()
             minlength = nchan
             if cmin == 1: minlength += 1
             spec = np.bincount(self.events[spectype],minlength=minlength)
             if cmin == 1: spec = spec[1:]
-            bins = np.arange(nchan).astype("int32")+cmin
+            bins = (np.arange(nchan)+cmin).astype("int32")
+        else:
+            espec = self.events["eobs"].d
+            erange = (emin, emax)
+            spec, ee = np.histogram(espec, bins=nchan, range=erange)
+            if bin_type == "energy":
+                bins = 0.5*(ee[1:]+ee[:-1])
+                spectype = "energy"
+            else:
+                bins = (np.arange(nchan)+1).astype("int32")
+                spectype = "pi"
 
         col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins)
         col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64"))
@@ -1240,46 +1280,45 @@
         tbhdu = pyfits.new_table(coldefs)
         tbhdu.update_ext_name("SPECTRUM")
 
-        if not energy_bins:
-            tbhdu.header["DETCHANS"] = spec.shape[0]
-            tbhdu.header["TOTCTS"] = spec.sum()
-            tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
-            tbhdu.header["CONTENT"] = spectype
-            tbhdu.header["HDUCLASS"] = "OGIP"
-            tbhdu.header["HDUCLAS1"] = "SPECTRUM"
-            tbhdu.header["HDUCLAS2"] = "TOTAL"
-            tbhdu.header["HDUCLAS3"] = "TYPE:I"
-            tbhdu.header["HDUCLAS4"] = "COUNT"
-            tbhdu.header["HDUVERS"] = "1.1.0"
-            tbhdu.header["HDUVERS1"] = "1.1.0"
-            tbhdu.header["CHANTYPE"] = spectype
-            tbhdu.header["BACKFILE"] = "none"
-            tbhdu.header["CORRFILE"] = "none"
-            tbhdu.header["POISSERR"] = True
-            if "RMF" in self.parameters:
-                 tbhdu.header["RESPFILE"] = self.parameters["RMF"]
-            else:
-                 tbhdu.header["RESPFILE"] = "none"
-            if "ARF" in self.parameters:
-                tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
-            else:        
-                tbhdu.header["ANCRFILE"] = "none"
-            if "Mission" in self.parameters:
-                tbhdu.header["MISSION"] = self.parameters["Mission"]
-            else:
-                tbhdu.header["MISSION"] = "none"
-            if "Telescope" in self.parameters:
-                tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
-            else:
-                tbhdu.header["TELESCOP"] = "none"
-            if "Instrument" in self.parameters:
-                tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
-            else:
-                tbhdu.header["INSTRUME"] = "none"
-            tbhdu.header["AREASCAL"] = 1.0
-            tbhdu.header["CORRSCAL"] = 0.0
-            tbhdu.header["BACKSCAL"] = 1.0
+        tbhdu.header["DETCHANS"] = spec.shape[0]
+        tbhdu.header["TOTCTS"] = spec.sum()
+        tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["CONTENT"] = spectype
+        tbhdu.header["HDUCLASS"] = "OGIP"
+        tbhdu.header["HDUCLAS1"] = "SPECTRUM"
+        tbhdu.header["HDUCLAS2"] = "TOTAL"
+        tbhdu.header["HDUCLAS3"] = "TYPE:I"
+        tbhdu.header["HDUCLAS4"] = "COUNT"
+        tbhdu.header["HDUVERS"] = "1.1.0"
+        tbhdu.header["HDUVERS1"] = "1.1.0"
+        tbhdu.header["CHANTYPE"] = spectype
+        tbhdu.header["BACKFILE"] = "none"
+        tbhdu.header["CORRFILE"] = "none"
+        tbhdu.header["POISSERR"] = True
+        if "RMF" in self.parameters:
+            tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+        else:
+            tbhdu.header["RESPFILE"] = "none"
+        if "ARF" in self.parameters:
+            tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
+        else:
+            tbhdu.header["ANCRFILE"] = "none"
+        if "Mission" in self.parameters:
+            tbhdu.header["MISSION"] = self.parameters["Mission"]
+        else:
+            tbhdu.header["MISSION"] = "none"
+        if "Telescope" in self.parameters:
+            tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
+        else:
+            tbhdu.header["TELESCOP"] = "none"
+        if "Instrument" in self.parameters:
+            tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
+        else:
+            tbhdu.header["INSTRUME"] = "none"
+        tbhdu.header["AREASCAL"] = 1.0
+        tbhdu.header["CORRSCAL"] = 0.0
+        tbhdu.header["BACKSCAL"] = 1.0
 
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 

diff -r c66deb7d69cab3114e9563d6cf9ed57521675ff4 -r 98c8bdfbc1e00d4ee2388663a7f4072bbf98fc57 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -32,17 +32,17 @@
         self.ebins = np.linspace(self.emin, self.emax, nchan+1)
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         pass
-    
+
     def get_spectrum(self):
         pass
-                                                        
+
 class XSpecThermalModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -54,19 +54,25 @@
         The maximum energy for the spectral model.
     nchan : integer
         The number of channels in the spectral model.
+    settings : dictionary, optional
+        A dictionary of key, value pairs (must both be strings)
+        that can be used to set various options in XSPEC.
 
     Examples
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan, thermal_broad=False):
+    def __init__(self, model_name, emin, emax, nchan, 
+                 thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        if settings is None: settings = {}
+        self.settings = settings
+        super(XSpecThermalModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self, zobs):
         """
-        Prepare the thermal model for execution.
+        Prepare the thermal model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -79,9 +85,11 @@
         else:
             self.norm = 1.0e-14
         self.thermal_comp.norm = 1.0
-        self.thermal_comp.Redshift = 0.0
+        self.thermal_comp.Redshift = zobs
         if self.thermal_broad:
             xspec.Xset.addModelString("APECTHERMAL","yes")
+        for k,v in self.settings.items():
+            xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self, kT):
         """
@@ -89,18 +97,20 @@
         """
         self.thermal_comp.kT = kT
         self.thermal_comp.Abundanc = 0.0
-        cosmic_spec = self.norm*np.array(self.model.values(0))
+        cosmic_spec = np.array(self.model.values(0))
         if self.model_name == "bremss":
             metal_spec = np.zeros(self.nchan)
         else:
             self.thermal_comp.Abundanc = 1.0
-            metal_spec = self.norm*np.array(self.model.values(0)) - cosmic_spec
+            metal_spec = np.array(self.model.values(0)) - cosmic_spec
+        cosmic_spec *= self.norm
+        metal_spec *= self.norm
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
-        
+
 class XSpecAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from PyXspec.
-    
+
     Parameters
     ----------
 
@@ -114,19 +124,25 @@
         The maximum energy for the spectral model.
     nchan : integer, optional
         The number of channels in the spectral model.
+    settings : dictionary, optional
+        A dictionary of key, value pairs (must both be strings)
+        that can be used to set various options in XSPEC.
 
     Examples
     --------
     >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
     """
-    def __init__(self, model_name, nH, emin=0.01, emax=50.0, nchan=100000):
+    def __init__(self, model_name, nH, emin=0.01, emax=50.0, 
+                 nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
-        SpectralModel.__init__(self, emin, emax, nchan)
-        
-    def prepare(self):
+        if settings is None: settings = {}
+        self.settings = settings
+        super(XSpecAbsorbModel, self).__init__(emin, emax, nchan)
+
+    def prepare_spectrum(self):
         """
-        Prepare the absorption model for execution.
+        Prepare the absorption model for execution given a redshift *zobs* for the spectrum.
         """
         import xspec
         xspec.Xset.chatter = 0
@@ -135,6 +151,8 @@
         self.model = xspec.Model(self.model_name+"*powerlaw")
         self.model.powerlaw.norm = self.nchan/(self.emax.value-self.emin.value)
         self.model.powerlaw.PhoIndex = 0.0
+        for k,v in self.settings.items():
+            xspec.Xset.addModelString(k,v)
 
     def get_spectrum(self):
         """
@@ -150,7 +168,7 @@
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
     CfA (afoster at cfa.harvard.edu). 
-    
+
     Parameters
     ----------
 
@@ -183,7 +201,7 @@
                                      self.apec_prefix+"_coco.fits")
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableApecModel, self).__init__(emin, emax, nchan)  
         self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
@@ -196,8 +214,8 @@
                            32.0650,35.4530,39.9480,39.0983,40.0780,
                            44.9559,47.8670,50.9415,51.9961,54.9380,
                            55.8450,58.9332,58.6934,63.5460,65.3800])
-        
-    def prepare(self):
+
+    def prepare_spectrum(self, zobs):
         """
         Prepare the thermal model for execution.
         """
@@ -216,17 +234,18 @@
         self.dTvals = np.diff(self.Tvals)
         self.minlam = self.wvbins.min()
         self.maxlam = self.wvbins.max()
-    
+        self.scale_factor = 1.0/(1.+zobs)
+
     def _make_spectrum(self, element, tindex):
-        
+
         tmpspec = np.zeros(self.nchan)
-        
+
         i = np.where((self.line_handle[tindex].data.field('element') == element) &
                      (self.line_handle[tindex].data.field('lambda') > self.minlam) &
                      (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
 
         vec = np.zeros(self.nchan)
-        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]
+        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]*self.scale_factor
         amp = self.line_handle[tindex].data.field('epsilon')[i]
         ebins = self.ebins.d
         if self.thermal_broad:
@@ -246,19 +265,19 @@
             return tmpspec
         else:
             ind = ind[0]
-                                                    
+
         n_cont = self.coco_handle[tindex].data.field('N_Cont')[ind]
-        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]
+        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]*self.scale_factor
         continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
 
         tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
-        
+
         n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
-        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]
+        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]*self.scale_factor
         pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
-        
+
         tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
-        
+
         return tmpspec
 
     def get_spectrum(self, kT):
@@ -288,7 +307,7 @@
 class TableAbsorbModel(SpectralModel):
     r"""
     Initialize an absorption model from a table stored in an HDF5 file.
-    
+
     Parameters
     ----------
 
@@ -299,7 +318,7 @@
 
     Examples
     --------
-    >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
+    >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
 
     def __init__(self, filename, nH):
@@ -312,15 +331,15 @@
         self.sigma = YTArray(f["cross_section"][:], "cm**2")
         nchan = self.sigma.shape[0]
         f.close()
-        SpectralModel.__init__(self, emin, emax, nchan)
+        super(TableAbsorbModel, self).__init__(emin, emax, nchan)
         self.nH = YTQuantity(nH*1.0e22, "cm**-2")
-        
-    def prepare(self):
+
+    def prepare_spectrum(self):
         """
         Prepare the absorption model for execution.
         """
         pass
-        
+
     def get_spectrum(self):
         """
         Get the absorption spectrum.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list