[yt-svn] commit/yt: 13 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 23 14:25:04 PST 2015


13 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/5553847fc3a1/
Changeset:   5553847fc3a1
Branch:      yt
User:        jzuhone
Date:        2015-10-12 19:27:32+00:00
Summary:     Separate parameters and data for PhotonList and EventList into two groups in the HDF5 file. Add two helper functions: one to merge multiple files, another to convert old files to the new format.
Affected #:  1 file

diff -r 508299bf88f92f1191465d63e84a69eebd161954 -r 5553847fc3a186ba7d48a88c356b87cfb5d74851 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -25,6 +25,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
+from collections import defaultdict
 import numpy as np
 from yt.funcs import mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
@@ -38,6 +39,7 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.utilities.on_demand_imports import _astropy
 import warnings
+import os
 
 comm = communication_system.communicators[-1]
 
@@ -104,29 +106,31 @@
 
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
-        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
-        parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
-        parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
-        parameters["HubbleConstant"] = f["/hubble"].value
-        parameters["OmegaMatter"] = f["/omega_matter"].value
-        parameters["OmegaLambda"] = f["/omega_lambda"].value
+        p = f["/parameters"]
+        parameters["FiducialExposureTime"] = YTQuantity(p["fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(p["fid_area"].value, "cm**2")
+        parameters["FiducialRedshift"] = p["fid_redshift"].value
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(p["fid_d_a"].value, "Mpc")
+        parameters["Dimension"] = p["dimension"].value
+        parameters["Width"] = YTQuantity(p["width"].value, "kpc")
+        parameters["HubbleConstant"] = p["hubble"].value
+        parameters["OmegaMatter"] = p["omega_matter"].value
+        parameters["OmegaLambda"] = p["omega_lambda"].value
 
         num_cells = f["/x"][:].shape[0]
         start_c = comm.rank*num_cells//comm.size
         end_c = (comm.rank+1)*num_cells//comm.size
 
-        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
-        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
-        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
-        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
-        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
-        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
-        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
+        d = f["/data"]
+        photons["x"] = YTArray(d["x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(d["y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(d["z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(d["dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(d["vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(d["vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(d["vz"][start_c:end_c], "km/s")
 
-        n_ph = f["/num_photons"][:]
+        n_ph = d["num_photons"][:]
 
         if comm.rank == 0:
             start_e = np.uint64(0)
@@ -139,7 +143,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
 
-        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
+        photons["Energy"] = YTArray(d["energy"][start_e:end_e], "keV")
 
         f.close()
 
@@ -398,7 +402,7 @@
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
             comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
-                              [e, (sizes_p, disps_p), mpi_double], root=0) 
+                              [e, (sizes_p, disps_p), mpi_double], root=0)
 
         else:
 
@@ -416,29 +420,31 @@
 
             f = h5py.File(photonfile, "w")
 
-            # Scalars
+            # Parameters
 
-            f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
-            f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
-            f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
-            f.create_dataset("hubble", data=self.parameters["HubbleConstant"])
-            f.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
-            f.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
-            f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
-            f.create_dataset("dimension", data=self.parameters["Dimension"])
-            f.create_dataset("width", data=float(self.parameters["Width"]))
+            p = f.create_group("parameters")
+            p.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
+            p.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
+            p.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
+            p.create_dataset("hubble", data=self.parameters["HubbleConstant"])
+            p.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
+            p.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
+            p.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
+            p.create_dataset("dimension", data=self.parameters["Dimension"])
+            p.create_dataset("width", data=float(self.parameters["Width"]))
 
-            # Arrays
+            # Data
 
-            f.create_dataset("x", data=x)
-            f.create_dataset("y", data=y)
-            f.create_dataset("z", data=z)
-            f.create_dataset("vx", data=vx)
-            f.create_dataset("vy", data=vy)
-            f.create_dataset("vz", data=vz)
-            f.create_dataset("dx", data=dx)
-            f.create_dataset("num_photons", data=n_ph)
-            f.create_dataset("energy", data=e)
+            d = f.create_group("parameters")
+            d.create_dataset("x", data=x)
+            d.create_dataset("y", data=y)
+            d.create_dataset("z", data=z)
+            d.create_dataset("vx", data=vx)
+            d.create_dataset("vy", data=vy)
+            d.create_dataset("vz", data=vz)
+            d.create_dataset("dx", data=dx)
+            d.create_dataset("num_photons", data=n_ph)
+            d.create_dataset("energy", data=e)
 
             f.close()
 
@@ -804,7 +810,7 @@
 
         return events, info
 
-class EventList(object) :
+class EventList(object):
 
     def __init__(self, events, parameters):
         self.events = events
@@ -899,36 +905,38 @@
 
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        p = f["/parameters"]
+        parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
         if isinstance(f["/area"].value, (string_types, bytes)):
-            parameters["Area"] = f["/area"].value.decode("utf8")
+            parameters["Area"] = p["area"].value.decode("utf8")
         else:
-            parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
-        parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
-        if "rmf" in f:
-            parameters["RMF"] = f["/rmf"].value.decode("utf8")
-        if "arf" in f:
-            parameters["ARF"] = f["/arf"].value.decode("utf8")
-        if "channel_type" in f:
-            parameters["ChannelType"] = f["/channel_type"].value.decode("utf8")
-        if "mission" in f:
-            parameters["Mission"] = f["/mission"].value.decode("utf8")
-        if "telescope" in f:
-            parameters["Telescope"] = f["/telescope"].value.decode("utf8")
-        if "instrument" in f:
-            parameters["Instrument"] = f["/instrument"].value.decode("utf8")
+            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+        parameters["Redshift"] = p["redshift"].value
+        parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
+        parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
+        parameters["pix_center"] = p["pix_center"][:]
+        if "rmf" in p:
+            parameters["RMF"] = p["rmf"].value.decode("utf8")
+        if "arf" in p:
+            parameters["ARF"] = p["arf"].value.decode("utf8")
+        if "channel_type" in p:
+            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+        if "mission" in p:
+            parameters["Mission"] = p["mission"].value.decode("utf8")
+        if "telescope" in p:
+            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+        if "instrument" in p:
+            parameters["Instrument"] = p["instrument"].value.decode("utf8")
 
-        events["xpix"] = f["/xpix"][:]
-        events["ypix"] = f["/ypix"][:]
-        events["eobs"] = YTArray(f["/eobs"][:], "keV")
-        if "pi" in f:
-            events["PI"] = f["/pi"][:]
-        if "pha" in f:
-            events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
-        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
-        parameters["pix_center"] = f["/pix_center"][:]
+        d = f["/data"]
+        events["xpix"] = d["xpix"][:]
+        events["ypix"] = d["ypix"][:]
+        events["eobs"] = YTArray(d["eobs"][:], "keV")
+        if "pi" in d:
+            events["PI"] = d["pi"][:]
+        if "pha" in d:
+            events["PHA"] = d["pha"][:]
 
         f.close()
 
@@ -1197,38 +1205,40 @@
         """
         f = h5py.File(h5file, "w")
 
-        f.create_dataset("/exp_time", data=float(self.parameters["ExposureTime"]))
+        p = f.create_group("parameters")
+        p.create_dataset("exp_time", data=float(self.parameters["ExposureTime"]))
         area = self.parameters["Area"]
         if not isinstance(area, string_types):
             area = float(area)
-        f.create_dataset("/area", data=area)
-        f.create_dataset("/redshift", data=self.parameters["Redshift"])
-        f.create_dataset("/d_a", data=float(self.parameters["AngularDiameterDistance"]))
+        p.create_dataset("area", data=area)
+        p.create_dataset("redshift", data=self.parameters["Redshift"])
+        p.create_dataset("d_a", data=float(self.parameters["AngularDiameterDistance"]))
         if "ARF" in self.parameters:
-            f.create_dataset("/arf", data=self.parameters["ARF"])
+            p.create_dataset("arf", data=self.parameters["ARF"])
         if "RMF" in self.parameters:
-            f.create_dataset("/rmf", data=self.parameters["RMF"])
+            p.create_dataset("rmf", data=self.parameters["RMF"])
         if "ChannelType" in self.parameters:
-            f.create_dataset("/channel_type", data=self.parameters["ChannelType"])
+            p.create_dataset("channel_type", data=self.parameters["ChannelType"])
         if "Mission" in self.parameters:
-            f.create_dataset("/mission", data=self.parameters["Mission"]) 
+            p.create_dataset("mission", data=self.parameters["Mission"])
         if "Telescope" in self.parameters:
-            f.create_dataset("/telescope", data=self.parameters["Telescope"])
+            p.create_dataset("telescope", data=self.parameters["Telescope"])
         if "Instrument" in self.parameters:
-            f.create_dataset("/instrument", data=self.parameters["Instrument"])
+            p.create_dataset("instrument", data=self.parameters["Instrument"])
+        p.create_dataset("sky_center", data=self.parameters["sky_center"].d)
+        p.create_dataset("pix_center", data=self.parameters["pix_center"])
+        p.create_dataset("dtheta", data=float(self.parameters["dtheta"]))
 
-        f.create_dataset("/xpix", data=self["xpix"])
-        f.create_dataset("/ypix", data=self["ypix"])
-        f.create_dataset("/xsky", data=self["xsky"].d)
-        f.create_dataset("/ysky", data=self["ysky"].d)
-        f.create_dataset("/eobs", data=self["eobs"].d)
-        if "PI" in self:
-            f.create_dataset("/pi", data=self["PI"])
-        if "PHA" in self:
-            f.create_dataset("/pha", data=self["PHA"])                  
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
-        f.create_dataset("/pix_center", data=self.parameters["pix_center"])
-        f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
+        d = f.create_group("data")
+        d.create_dataset("xpix", data=self["xpix"])
+        d.create_dataset("ypix", data=self["ypix"])
+        d.create_dataset("xsky", data=self["xsky"].d)
+        d.create_dataset("ysky", data=self["ysky"].d)
+        d.create_dataset("eobs", data=self["eobs"].d)
+        if "PI" in self.events:
+            d.create_dataset("pi", data=self.events["PI"])
+        if "PHA" in self.events:
+            d.create_dataset("pha", data=self.events["PHA"])
 
         f.close()
 
@@ -1405,3 +1415,71 @@
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 
         hdulist.writeto(specfile, clobber=clobber)
+
+def merge_files(input_files, output_file, clobber=False,
+                add_exposure_times=False):
+
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, use --clobber.")
+
+    f_in = h5py.File(input_files[0], "r")
+    f_out = h5py.File(output_file, "w")
+
+    exp_time_key = ""
+    p_out = f_out.create_group("parameters")
+    for key, param in f_in["parameters"].items():
+        if key.endswidth("exp_time"):
+            exp_time_key = key
+        else:
+            p_out[key] = param.value
+
+    f_in.close()
+
+    data = defaultdict(list)
+    tot_exp_time = 0.0
+
+    for fn in input_files:
+        f = h5py.File(fn, "r")
+        if add_exposure_times:
+            tot_exp_time += f["/parameters"][exp_time_key].value
+        for key in f["/data"]:
+            data[key].append(f["/data"][key][:])
+        f.close()
+
+    d = f_out.create_group("data")
+    for k in data:
+        d.create_dataset(key, data=np.concatenate(data[k]))
+
+    f_out.close()
+
+def convert_old_file(input_file, output_file):
+
+    f_in = h5py.File(input_file, "r")
+
+    if "NumberOfPhotons" in f_in:
+        params = ["fid_exp_time", "fid_area", "fid_d_a", "fid_redshift",
+                  "dimension", "width", "hubble", "omega_matter",
+                  "omega_lambda"]
+        data = ["x", "y", "z", "vx", "vy", "vz", "energy", "num_photons"]
+    elif "pix_center" in f_in:
+        params = ["exp_time", "area", "redshift", "d_a", "arf",
+                  "rmf", "channel_type", "mission", "telescope",
+                  "instruemnt", "sky_center", "pix_center", "dtheta"]
+        data = ["xsky", "ysky", "xpix", "ypix", "eobs", "pi", "pha"]
+
+    f_out = h5py.File(output_file, "w")
+
+    p = f_out.create_group("parameters")
+    d = f_out.create_group("data")
+
+    for key in params:
+        if key in f_in:
+            p.create_dataset(key, data=f_in[key].value)
+
+    for key in data:
+        if key in f_in:
+            d.create_dataset(key, data=f_in[key].value)
+
+    f_in.close()
+    f_out.close()
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/b3864c345542/
Changeset:   b3864c345542
Branch:      yt
User:        jzuhone
Date:        2015-10-12 19:39:05+00:00
Summary:     Validate the parameters when we add events or merge files.
Affected #:  1 file

diff -r 5553847fc3a186ba7d48a88c356b87cfb5d74851 -r b3864c345542f921ef182f2b19dfaf4006f2cf65 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -55,6 +55,25 @@
     else:
         return YTQuantity(value, default_units)
 
+def validate_parameters(first, second, skip=[]):
+    keys1 = list(first.keys())
+    keys2 = list(first.keys())
+    keys1.sort()
+    keys2.sort()
+    if keys1 != keys2:
+        raise RuntimeError("The two inputs do not have the same parameters!")
+    for k1, k2 in zip(keys1, keys2):
+        if k1 not in skip:
+            v1 = first[k1]
+            v2 = second[k2]
+            if isinstance(v1, string_types) or isinstance(v2, string_types):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
+                raise RuntimeError("The values for the parameter '%s' in the two inputs" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
+
 class PhotonList(object):
 
     def __init__(self, photons, parameters, cosmo, p_bins):
@@ -851,22 +870,7 @@
 
     def __add__(self, other):
         assert_same_wcs(self.wcs, other.wcs)
-        keys1 = list(self.parameters.keys())
-        keys2 = list(other.parameters.keys())
-        keys1.sort()
-        keys2.sort()
-        if keys1 != keys2:
-            raise RuntimeError("The two EventLists do not have the same parameters!")
-        for k1, k2 in zip(keys1, keys2):
-            v1 = self.parameters[k1]
-            v2 = other.parameters[k2]
-            if isinstance(v1, string_types) or isinstance(v2, string_types):
-                check_equal = v1 == v2
-            else:
-                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
-            if not check_equal:
-                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
-                                   " are not identical (%s vs. %s)!" % (v1, v2))
+        validate_parameters(self.parameters, other.parameters)
         events = {}
         for item1, item2 in zip(self.items(), other.items()):
             k1, v1 = item1
@@ -1434,6 +1438,15 @@
         else:
             p_out[key] = param.value
 
+    skip = []
+    if add_exposure_times:
+        skip.append(exp_time_key)
+
+    for fn in input_files[1:]:
+        f = h5py.File(fn, "r")
+        validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
+        f.close()
+
     f_in.close()
 
     data = defaultdict(list)


https://bitbucket.org/yt_analysis/yt/commits/ae3a9a144b59/
Changeset:   ae3a9a144b59
Branch:      yt
User:        jzuhone
Date:        2015-10-12 20:14:40+00:00
Summary:     typo
Affected #:  1 file

diff -r b3864c345542f921ef182f2b19dfaf4006f2cf65 -r ae3a9a144b5997867c158947ff1b4df223dcda43 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1478,7 +1478,7 @@
     elif "pix_center" in f_in:
         params = ["exp_time", "area", "redshift", "d_a", "arf",
                   "rmf", "channel_type", "mission", "telescope",
-                  "instruemnt", "sky_center", "pix_center", "dtheta"]
+                  "instrument", "sky_center", "pix_center", "dtheta"]
         data = ["xsky", "ysky", "xpix", "ypix", "eobs", "pi", "pha"]
 
     f_out = h5py.File(output_file, "w")


https://bitbucket.org/yt_analysis/yt/commits/52c304012e6b/
Changeset:   52c304012e6b
Branch:      yt
User:        jzuhone
Date:        2015-10-12 20:31:34+00:00
Summary:     Add helper functions to API
Affected #:  1 file

diff -r ae3a9a144b5997867c158947ff1b4df223dcda43 -r 52c304012e6b226e7fe527c235a9593a18c7ab30 yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -16,7 +16,9 @@
 
 from .photon_simulator import \
      PhotonList, \
-     EventList
+     EventList, \
+     merge_files, \
+     convert_old_file
 
 from .spectral_models import \
      SpectralModel, \


https://bitbucket.org/yt_analysis/yt/commits/5903092a952b/
Changeset:   5903092a952b
Branch:      yt
User:        jzuhone
Date:        2015-10-12 20:32:08+00:00
Summary:     Add new docstrings, fix up old docstrings and whitespace
Affected #:  3 files

diff -r 52c304012e6b226e7fe527c235a9593a18c7ab30 -r 5903092a952bd3f2f9b8faa7c4a36c525a494493 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -52,14 +52,13 @@
 
 class ThermalPhotonModel(PhotonModel):
     r"""
-    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    Initialize a ThermalPhotonModel from a thermal spectrum.
 
     Parameters
     ----------
-
     spectral_model : `SpectralModel`
         A thermal spectral model instance, either of `XSpecThermalModel`
-        or `TableApecModel`. 
+        or `TableApecModel`.
     X_H : float, optional
         The hydrogen mass fraction.
     Zmet : float or string, optional
@@ -71,10 +70,10 @@
     method : string, optional
         The method used to generate the photon energies from the spectrum:
         "invert_cdf": Invert the cumulative distribution function of the spectrum.
-        "accept_reject": Acceptance-rejection method using the spectrum. 
-        The first method should be sufficient for most cases. 
+        "accept_reject": Acceptance-rejection method using the spectrum.
+        The first method should be sufficient for most cases.
     """
-    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3,
                  photons_per_chunk=10000000, method="invert_cdf"):
         self.X_H = X_H
         self.Zmet = Zmet

diff -r 52c304012e6b226e7fe527c235a9593a18c7ab30 -r 5903092a952bd3f2f9b8faa7c4a36c525a494493 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -183,7 +183,6 @@
 
         Parameters
         ----------
-
         data_source : `yt.data_objects.data_containers.YTSelectionContainer`
             The data source from which the photons will be generated.
         redshift : float
@@ -197,9 +196,9 @@
             dictionary and returns a *photons* dictionary. Must be of the
             form: photon_model(data_source, parameters)
         parameters : dict, optional
-            A dictionary of parameters to be passed to the user function. 
+            A dictionary of parameters to be passed to the user function.
         center : string or array_like, optional
-            The origin of the photons. Accepts "c", "max", or a coordinate.                
+            The origin of the photons. Accepts "c", "max", or a coordinate.
         dist : tuple, optional
             The angular diameter distance in the form (value, unit), used
             mainly for nearby sources. This may be optionally supplied
@@ -211,7 +210,6 @@
 
         Examples
         --------
-
         This is the simplest possible example, where we call the built-in thermal model:
 
         >>> thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
@@ -247,7 +245,7 @@
 
         >>> import numpy as np
         >>> import yt
-        >>> from yt.analysis_modules.photon_simulator import *
+        >>> from yt.analysis_modules.photon_simulator.api import PhotonList
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -277,7 +275,7 @@
         >>> ddims = (128,128,128)
         >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
         >>> ds = yt.load_uniform_grid(random_data, ddims)
-        >>> dd = ds.all_data
+        >>> dd = ds.all_data()
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
         ...                                         time, line_func,
         ...                                         parameters=parameters)
@@ -377,7 +375,7 @@
 
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
-                num_photons = sum(sizes_p)        
+                num_photons = sum(sizes_p)
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
                 x = np.zeros(num_cells)
@@ -469,7 +467,7 @@
 
         comm.barrier()
 
-    def project_photons(self, normal, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None,
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
@@ -480,7 +478,6 @@
 
         Parameters
         ----------
-
         normal : character or array_like
             Normal vector to the plane of projection. If "x", "y", or "z", will
             assume to be along that axis (and will probably be faster). Otherwise, 
@@ -514,6 +511,7 @@
             the plane of projection. If not set, an arbitrary grid-aligned north_vector 
             is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
             "z") is explicitly specified.
+
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -751,9 +749,6 @@
         return weights
 
     def _convolve_with_rmf(self, respfile, events, mat_key):
-        """
-        Convolve the events with a RMF file.
-        """
         mylog.info("Reading response matrix file (RMF): %s" % (respfile))
 
         hdulist = _astropy.pyfits.open(respfile)
@@ -880,7 +875,7 @@
 
     def filter_events(self, region):
         """
-        Filter events using a ds9 region. Requires the pyregion package.
+        Filter events using a ds9 *region*. Requires the pyregion package.
         Returns a new EventList.
         """
         import pyregion
@@ -1033,10 +1028,10 @@
 
              time = np.random.uniform(size=self.num_events, low=0.0,
                                       high=float(self.parameters["ExposureTime"]))
-             col_t = pyfits.Column(name="TIME", format='1D', unit='s', 
+             col_t = pyfits.Column(name="TIME", format='1D', unit='s',
                                    array=time)
              cols.append(col_t)
-        
+
         coldefs = pyfits.ColDefs(cols)
         tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
@@ -1125,7 +1120,6 @@
 
         Parameters
         ----------
-
         prefix : string
             The filename prefix.
         clobber : boolean, optional
@@ -1254,7 +1248,6 @@
 
         Parameters
         ----------
-
         imagefile : string
             The name of the image file to write.
         clobber : boolean, optional
@@ -1313,7 +1306,6 @@
 
         Parameters
         ----------
-
         specfile : string
             The name of the FITS file to be written.
         bin_type : string, optional
@@ -1422,10 +1414,37 @@
 
 def merge_files(input_files, output_file, clobber=False,
                 add_exposure_times=False):
+    r"""
+    Helper function for merging PhotonList or EventList HDF5 files.
 
+    Parameters
+    ----------
+    input_files : list of strings
+        List of filenames that will be merged together.
+    output_file : string
+        Name of the merged file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+    add_exposure_times : boolean, default False
+        If set to True, exposure times will be added together. Otherwise,
+        the exposure times of all of the files must be the same.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import merge_files
+    >>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
+    ...             clobber=True, add_exposure_times=True)
+
+    Notes
+    -----
+    Currently, to merge files it is mandated that all of the parameters have the
+    same values, with the possible exception of the exposure time parameter "exp_time"
+    if add_exposure_times=False.
+    """
     if os.path.exists(output_file) and not clobber:
         raise IOError("Cannot overwrite existing file %s. " % output_file +
-                      "If you want to do this, use --clobber.")
+                      "If you want to do this, set clobber=True.")
 
     f_in = h5py.File(input_files[0], "r")
     f_out = h5py.File(output_file, "w")
@@ -1466,7 +1485,29 @@
 
     f_out.close()
 
-def convert_old_file(input_file, output_file):
+def convert_old_file(input_file, output_file, clobber=False):
+    r"""
+    Helper function for converting old PhotonList or EventList HDF5
+    files (pre yt v3.3) to their new versions.
+
+    Parameters
+    ----------
+    input_file : list of strings
+        The filename of the old-versioned file to be converted.
+    output_file : string
+        Name of the new file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import convert_old_file
+    >>> merge_files("photons_old.h5", "photons_new.h5", clobber=True)
+    """
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, set clobber=True.")
 
     f_in = h5py.File(input_file, "r")
 

diff -r 52c304012e6b226e7fe527c235a9593a18c7ab30 -r 5903092a952bd3f2f9b8faa7c4a36c525a494493 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -46,7 +46,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the thermal emission model.
     emin : float
@@ -63,7 +62,7 @@
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan, 
+    def __init__(self, model_name, emin, emax, nchan,
                  thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
@@ -114,7 +113,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the absorption model.
     nH : float
@@ -133,7 +131,7 @@
     --------
     >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
     """
-    def __init__(self, model_name, nH, emin=0.01, emax=50.0, 
+    def __init__(self, model_name, nH, emin=0.01, emax=50.0,
                  nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
@@ -168,11 +166,10 @@
     Initialize a thermal gas emission model from the AtomDB APEC tables
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
-    CfA (afoster at cfa.harvard.edu). 
+    CfA (afoster at cfa.harvard.edu).
 
     Parameters
     ----------
-
     apec_root : string
         The directory root where the APEC model files are stored.
     emin : float
@@ -202,7 +199,7 @@
                                      self.apec_prefix+"_coco.fits")
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
-        super(TableApecModel, self).__init__(emin, emax, nchan)  
+        super(TableApecModel, self).__init__(emin, emax, nchan)
         self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
@@ -296,7 +293,7 @@
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
             cspec_l += self._make_spectrum(elem, tindex+2)
-            cspec_r += self._make_spectrum(elem, tindex+3)            
+            cspec_r += self._make_spectrum(elem, tindex+3)
         # Next do the metals
         for elem in self.metal_elem:
             mspec_l += self._make_spectrum(elem, tindex+2)
@@ -311,7 +308,6 @@
 
     Parameters
     ----------
-
     filename : string
         The name of the table file.
     nH : float
@@ -321,7 +317,6 @@
     --------
     >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
-
     def __init__(self, filename, nH):
         if not os.path.exists(filename):
             raise IOError("File does not exist: %s." % filename)


https://bitbucket.org/yt_analysis/yt/commits/9ba58304b9fd/
Changeset:   9ba58304b9fd
Branch:      yt
User:        jzuhone
Date:        2015-11-10 19:56:05+00:00
Summary:     Merge
Affected #:  4 files

diff -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 -r 9ba58304b9fdcb2414da652534508633d0a81477 yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -16,7 +16,9 @@
 
 from .photon_simulator import \
      PhotonList, \
-     EventList
+     EventList, \
+     merge_files, \
+     convert_old_file
 
 from .spectral_models import \
      SpectralModel, \

diff -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 -r 9ba58304b9fdcb2414da652534508633d0a81477 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -52,14 +52,13 @@
 
 class ThermalPhotonModel(PhotonModel):
     r"""
-    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    Initialize a ThermalPhotonModel from a thermal spectrum.
 
     Parameters
     ----------
-
     spectral_model : `SpectralModel`
         A thermal spectral model instance, either of `XSpecThermalModel`
-        or `TableApecModel`. 
+        or `TableApecModel`.
     X_H : float, optional
         The hydrogen mass fraction.
     Zmet : float or string, optional

diff -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 -r 9ba58304b9fdcb2414da652534508633d0a81477 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -25,6 +25,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
+from collections import defaultdict
 import numpy as np
 from yt.funcs import mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
@@ -38,6 +39,7 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.utilities.on_demand_imports import _astropy
 import warnings
+import os
 
 comm = communication_system.communicators[-1]
 
@@ -53,6 +55,25 @@
     else:
         return YTQuantity(value, default_units)
 
+def validate_parameters(first, second, skip=[]):
+    keys1 = list(first.keys())
+    keys2 = list(first.keys())
+    keys1.sort()
+    keys2.sort()
+    if keys1 != keys2:
+        raise RuntimeError("The two inputs do not have the same parameters!")
+    for k1, k2 in zip(keys1, keys2):
+        if k1 not in skip:
+            v1 = first[k1]
+            v2 = second[k2]
+            if isinstance(v1, string_types) or isinstance(v2, string_types):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
+                raise RuntimeError("The values for the parameter '%s' in the two inputs" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
+
 class PhotonList(object):
 
     def __init__(self, photons, parameters, cosmo, p_bins):
@@ -107,29 +128,31 @@
 
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
-        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
-        parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
-        parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
-        parameters["HubbleConstant"] = f["/hubble"].value
-        parameters["OmegaMatter"] = f["/omega_matter"].value
-        parameters["OmegaLambda"] = f["/omega_lambda"].value
+        p = f["/parameters"]
+        parameters["FiducialExposureTime"] = YTQuantity(p["fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(p["fid_area"].value, "cm**2")
+        parameters["FiducialRedshift"] = p["fid_redshift"].value
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(p["fid_d_a"].value, "Mpc")
+        parameters["Dimension"] = p["dimension"].value
+        parameters["Width"] = YTQuantity(p["width"].value, "kpc")
+        parameters["HubbleConstant"] = p["hubble"].value
+        parameters["OmegaMatter"] = p["omega_matter"].value
+        parameters["OmegaLambda"] = p["omega_lambda"].value
 
         num_cells = f["/x"][:].shape[0]
         start_c = comm.rank*num_cells//comm.size
         end_c = (comm.rank+1)*num_cells//comm.size
 
-        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
-        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
-        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
-        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
-        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
-        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
-        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
+        d = f["/data"]
+        photons["x"] = YTArray(d["x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(d["y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(d["z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(d["dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(d["vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(d["vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(d["vz"][start_c:end_c], "km/s")
 
-        n_ph = f["/num_photons"][:]
+        n_ph = d["num_photons"][:]
 
         if comm.rank == 0:
             start_e = np.uint64(0)
@@ -142,7 +165,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
 
-        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
+        photons["Energy"] = YTArray(d["energy"][start_e:end_e], "keV")
 
         f.close()
 
@@ -163,7 +186,6 @@
 
         Parameters
         ----------
-
         data_source : `yt.data_objects.data_containers.YTSelectionContainer`
             The data source from which the photons will be generated.
         redshift : float
@@ -177,9 +199,9 @@
             dictionary and returns a *photons* dictionary. Must be of the
             form: photon_model(data_source, parameters)
         parameters : dict, optional
-            A dictionary of parameters to be passed to the user function. 
+            A dictionary of parameters to be passed to the user function.
         center : string or array_like, optional
-            The origin of the photons. Accepts "c", "max", or a coordinate.                
+            The origin of the photons. Accepts "c", "max", or a coordinate.
         dist : tuple, optional
             The angular diameter distance in the form (value, unit), used
             mainly for nearby sources. This may be optionally supplied
@@ -191,7 +213,6 @@
 
         Examples
         --------
-
         This is the simplest possible example, where we call the built-in thermal model:
 
         >>> thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
@@ -227,7 +248,7 @@
 
         >>> import numpy as np
         >>> import yt
-        >>> from yt.analysis_modules.photon_simulator import *
+        >>> from yt.analysis_modules.photon_simulator.api import PhotonList
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -257,7 +278,7 @@
         >>> ddims = (128,128,128)
         >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
         >>> ds = yt.load_uniform_grid(random_data, ddims)
-        >>> dd = ds.all_data
+        >>> dd = ds.all_data()
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
         ...                                         time, line_func,
         ...                                         parameters=parameters)
@@ -357,7 +378,7 @@
 
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
-                num_photons = sum(sizes_p)        
+                num_photons = sum(sizes_p)
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
                 x = np.zeros(num_cells)
@@ -401,7 +422,7 @@
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
             comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
-                              [e, (sizes_p, disps_p), mpi_double], root=0) 
+                              [e, (sizes_p, disps_p), mpi_double], root=0)
 
         else:
 
@@ -419,35 +440,37 @@
 
             f = h5py.File(photonfile, "w")
 
-            # Scalars
+            # Parameters
 
-            f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
-            f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
-            f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
-            f.create_dataset("hubble", data=self.parameters["HubbleConstant"])
-            f.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
-            f.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
-            f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
-            f.create_dataset("dimension", data=self.parameters["Dimension"])
-            f.create_dataset("width", data=float(self.parameters["Width"]))
+            p = f.create_group("parameters")
+            p.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
+            p.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
+            p.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
+            p.create_dataset("hubble", data=self.parameters["HubbleConstant"])
+            p.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
+            p.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
+            p.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
+            p.create_dataset("dimension", data=self.parameters["Dimension"])
+            p.create_dataset("width", data=float(self.parameters["Width"]))
 
-            # Arrays
+            # Data
 
-            f.create_dataset("x", data=x)
-            f.create_dataset("y", data=y)
-            f.create_dataset("z", data=z)
-            f.create_dataset("vx", data=vx)
-            f.create_dataset("vy", data=vy)
-            f.create_dataset("vz", data=vz)
-            f.create_dataset("dx", data=dx)
-            f.create_dataset("num_photons", data=n_ph)
-            f.create_dataset("energy", data=e)
+            d = f.create_group("parameters")
+            d.create_dataset("x", data=x)
+            d.create_dataset("y", data=y)
+            d.create_dataset("z", data=z)
+            d.create_dataset("vx", data=vx)
+            d.create_dataset("vy", data=vy)
+            d.create_dataset("vz", data=vz)
+            d.create_dataset("dx", data=dx)
+            d.create_dataset("num_photons", data=n_ph)
+            d.create_dataset("energy", data=e)
 
             f.close()
 
         comm.barrier()
 
-    def project_photons(self, normal, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None,
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
@@ -458,7 +481,6 @@
 
         Parameters
         ----------
-
         normal : character or array_like
             Normal vector to the plane of projection. If "x", "y", or "z", will
             assume to be along that axis (and will probably be faster). Otherwise, 
@@ -814,7 +836,7 @@
 
         return events, info
 
-class EventList(object) :
+class EventList(object):
 
     def __init__(self, events, parameters):
         self.events = events
@@ -855,22 +877,7 @@
 
     def __add__(self, other):
         assert_same_wcs(self.wcs, other.wcs)
-        keys1 = list(self.parameters.keys())
-        keys2 = list(other.parameters.keys())
-        keys1.sort()
-        keys2.sort()
-        if keys1 != keys2:
-            raise RuntimeError("The two EventLists do not have the same parameters!")
-        for k1, k2 in zip(keys1, keys2):
-            v1 = self.parameters[k1]
-            v2 = other.parameters[k2]
-            if isinstance(v1, string_types) or isinstance(v2, string_types):
-                check_equal = v1 == v2
-            else:
-                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
-            if not check_equal:
-                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
-                                   " are not identical (%s vs. %s)!" % (v1, v2))
+        validate_parameters(self.parameters, other.parameters)
         events = {}
         for item1, item2 in zip(self.items(), other.items()):
             k1, v1 = item1
@@ -880,7 +887,7 @@
 
     def filter_events(self, region):
         """
-        Filter events using a ds9 region. Requires the pyregion package.
+        Filter events using a ds9 *region*. Requires the pyregion package.
         Returns a new EventList.
         """
         import pyregion
@@ -909,36 +916,38 @@
 
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
+        p = f["/parameters"]
+        parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
         if isinstance(f["/area"].value, (string_types, bytes)):
-            parameters["Area"] = f["/area"].value.decode("utf8")
+            parameters["Area"] = p["area"].value.decode("utf8")
         else:
-            parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
-        parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
-        if "rmf" in f:
-            parameters["RMF"] = f["/rmf"].value.decode("utf8")
-        if "arf" in f:
-            parameters["ARF"] = f["/arf"].value.decode("utf8")
-        if "channel_type" in f:
-            parameters["ChannelType"] = f["/channel_type"].value.decode("utf8")
-        if "mission" in f:
-            parameters["Mission"] = f["/mission"].value.decode("utf8")
-        if "telescope" in f:
-            parameters["Telescope"] = f["/telescope"].value.decode("utf8")
-        if "instrument" in f:
-            parameters["Instrument"] = f["/instrument"].value.decode("utf8")
+            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+        parameters["Redshift"] = p["redshift"].value
+        parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
+        parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
+        parameters["pix_center"] = p["pix_center"][:]
+        if "rmf" in p:
+            parameters["RMF"] = p["rmf"].value.decode("utf8")
+        if "arf" in p:
+            parameters["ARF"] = p["arf"].value.decode("utf8")
+        if "channel_type" in p:
+            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+        if "mission" in p:
+            parameters["Mission"] = p["mission"].value.decode("utf8")
+        if "telescope" in p:
+            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+        if "instrument" in p:
+            parameters["Instrument"] = p["instrument"].value.decode("utf8")
 
-        events["xpix"] = f["/xpix"][:]
-        events["ypix"] = f["/ypix"][:]
-        events["eobs"] = YTArray(f["/eobs"][:], "keV")
-        if "pi" in f:
-            events["PI"] = f["/pi"][:]
-        if "pha" in f:
-            events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
-        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
-        parameters["pix_center"] = f["/pix_center"][:]
+        d = f["/data"]
+        events["xpix"] = d["xpix"][:]
+        events["ypix"] = d["ypix"][:]
+        events["eobs"] = YTArray(d["eobs"][:], "keV")
+        if "pi" in d:
+            events["PI"] = d["pi"][:]
+        if "pha" in d:
+            events["PHA"] = d["pha"][:]
 
         f.close()
 
@@ -1031,10 +1040,10 @@
 
              time = np.random.uniform(size=self.num_events, low=0.0,
                                       high=float(self.parameters["ExposureTime"]))
-             col_t = pyfits.Column(name="TIME", format='1D', unit='s', 
+             col_t = pyfits.Column(name="TIME", format='1D', unit='s',
                                    array=time)
              cols.append(col_t)
-        
+
         coldefs = pyfits.ColDefs(cols)
         tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
@@ -1123,7 +1132,6 @@
 
         Parameters
         ----------
-
         prefix : string
             The filename prefix.
         clobber : boolean, optional
@@ -1207,38 +1215,40 @@
         """
         f = h5py.File(h5file, "w")
 
-        f.create_dataset("/exp_time", data=float(self.parameters["ExposureTime"]))
+        p = f.create_group("parameters")
+        p.create_dataset("exp_time", data=float(self.parameters["ExposureTime"]))
         area = self.parameters["Area"]
         if not isinstance(area, string_types):
             area = float(area)
-        f.create_dataset("/area", data=area)
-        f.create_dataset("/redshift", data=self.parameters["Redshift"])
-        f.create_dataset("/d_a", data=float(self.parameters["AngularDiameterDistance"]))
+        p.create_dataset("area", data=area)
+        p.create_dataset("redshift", data=self.parameters["Redshift"])
+        p.create_dataset("d_a", data=float(self.parameters["AngularDiameterDistance"]))
         if "ARF" in self.parameters:
-            f.create_dataset("/arf", data=self.parameters["ARF"])
+            p.create_dataset("arf", data=self.parameters["ARF"])
         if "RMF" in self.parameters:
-            f.create_dataset("/rmf", data=self.parameters["RMF"])
+            p.create_dataset("rmf", data=self.parameters["RMF"])
         if "ChannelType" in self.parameters:
-            f.create_dataset("/channel_type", data=self.parameters["ChannelType"])
+            p.create_dataset("channel_type", data=self.parameters["ChannelType"])
         if "Mission" in self.parameters:
-            f.create_dataset("/mission", data=self.parameters["Mission"]) 
+            p.create_dataset("mission", data=self.parameters["Mission"])
         if "Telescope" in self.parameters:
-            f.create_dataset("/telescope", data=self.parameters["Telescope"])
+            p.create_dataset("telescope", data=self.parameters["Telescope"])
         if "Instrument" in self.parameters:
-            f.create_dataset("/instrument", data=self.parameters["Instrument"])
+            p.create_dataset("instrument", data=self.parameters["Instrument"])
+        p.create_dataset("sky_center", data=self.parameters["sky_center"].d)
+        p.create_dataset("pix_center", data=self.parameters["pix_center"])
+        p.create_dataset("dtheta", data=float(self.parameters["dtheta"]))
 
-        f.create_dataset("/xpix", data=self["xpix"])
-        f.create_dataset("/ypix", data=self["ypix"])
-        f.create_dataset("/xsky", data=self["xsky"].d)
-        f.create_dataset("/ysky", data=self["ysky"].d)
-        f.create_dataset("/eobs", data=self["eobs"].d)
-        if "PI" in self:
-            f.create_dataset("/pi", data=self["PI"])
-        if "PHA" in self:
-            f.create_dataset("/pha", data=self["PHA"])                  
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
-        f.create_dataset("/pix_center", data=self.parameters["pix_center"])
-        f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
+        d = f.create_group("data")
+        d.create_dataset("xpix", data=self["xpix"])
+        d.create_dataset("ypix", data=self["ypix"])
+        d.create_dataset("xsky", data=self["xsky"].d)
+        d.create_dataset("ysky", data=self["ysky"].d)
+        d.create_dataset("eobs", data=self["eobs"].d)
+        if "PI" in self.events:
+            d.create_dataset("pi", data=self.events["PI"])
+        if "PHA" in self.events:
+            d.create_dataset("pha", data=self.events["PHA"])
 
         f.close()
 
@@ -1250,7 +1260,6 @@
 
         Parameters
         ----------
-
         imagefile : string
             The name of the image file to write.
         clobber : boolean, optional
@@ -1309,7 +1318,6 @@
 
         Parameters
         ----------
-
         specfile : string
             The name of the FITS file to be written.
         bin_type : string, optional
@@ -1417,3 +1425,129 @@
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 
         hdulist.writeto(specfile, clobber=clobber)
+
+def merge_files(input_files, output_file, clobber=False,
+                add_exposure_times=False):
+    r"""
+    Helper function for merging PhotonList or EventList HDF5 files.
+
+    Parameters
+    ----------
+    input_files : list of strings
+        List of filenames that will be merged together.
+    output_file : string
+        Name of the merged file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+    add_exposure_times : boolean, default False
+        If set to True, exposure times will be added together. Otherwise,
+        the exposure times of all of the files must be the same.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import merge_files
+    >>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
+    ...             clobber=True, add_exposure_times=True)
+
+    Notes
+    -----
+    Currently, to merge files it is mandated that all of the parameters have the
+    same values, with the possible exception of the exposure time parameter "exp_time"
+    if add_exposure_times=False.
+    """
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, set clobber=True.")
+
+    f_in = h5py.File(input_files[0], "r")
+    f_out = h5py.File(output_file, "w")
+
+    exp_time_key = ""
+    p_out = f_out.create_group("parameters")
+    for key, param in f_in["parameters"].items():
+        if key.endswidth("exp_time"):
+            exp_time_key = key
+        else:
+            p_out[key] = param.value
+
+    skip = []
+    if add_exposure_times:
+        skip.append(exp_time_key)
+
+    for fn in input_files[1:]:
+        f = h5py.File(fn, "r")
+        validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
+        f.close()
+
+    f_in.close()
+
+    data = defaultdict(list)
+    tot_exp_time = 0.0
+
+    for fn in input_files:
+        f = h5py.File(fn, "r")
+        if add_exposure_times:
+            tot_exp_time += f["/parameters"][exp_time_key].value
+        for key in f["/data"]:
+            data[key].append(f["/data"][key][:])
+        f.close()
+
+    d = f_out.create_group("data")
+    for k in data:
+        d.create_dataset(key, data=np.concatenate(data[k]))
+
+    f_out.close()
+
+def convert_old_file(input_file, output_file, clobber=False):
+    r"""
+    Helper function for converting old PhotonList or EventList HDF5
+    files (pre yt v3.3) to their new versions.
+
+    Parameters
+    ----------
+    input_file : list of strings
+        The filename of the old-versioned file to be converted.
+    output_file : string
+        Name of the new file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import convert_old_file
+    >>> merge_files("photons_old.h5", "photons_new.h5", clobber=True)
+    """
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, set clobber=True.")
+
+    f_in = h5py.File(input_file, "r")
+
+    if "NumberOfPhotons" in f_in:
+        params = ["fid_exp_time", "fid_area", "fid_d_a", "fid_redshift",
+                  "dimension", "width", "hubble", "omega_matter",
+                  "omega_lambda"]
+        data = ["x", "y", "z", "vx", "vy", "vz", "energy", "num_photons"]
+    elif "pix_center" in f_in:
+        params = ["exp_time", "area", "redshift", "d_a", "arf",
+                  "rmf", "channel_type", "mission", "telescope",
+                  "instrument", "sky_center", "pix_center", "dtheta"]
+        data = ["xsky", "ysky", "xpix", "ypix", "eobs", "pi", "pha"]
+
+    f_out = h5py.File(output_file, "w")
+
+    p = f_out.create_group("parameters")
+    d = f_out.create_group("data")
+
+    for key in params:
+        if key in f_in:
+            p.create_dataset(key, data=f_in[key].value)
+
+    for key in data:
+        if key in f_in:
+            d.create_dataset(key, data=f_in[key].value)
+
+    f_in.close()
+    f_out.close()
\ No newline at end of file

diff -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 -r 9ba58304b9fdcb2414da652534508633d0a81477 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -51,7 +51,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the thermal emission model.
     emin : float
@@ -123,7 +122,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the absorption model.
     nH : float
@@ -180,11 +178,10 @@
     Initialize a thermal gas emission model from the AtomDB APEC tables
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
-    CfA (afoster at cfa.harvard.edu). 
+    CfA (afoster at cfa.harvard.edu).
 
     Parameters
     ----------
-
     apec_root : string
         The directory root where the APEC model files are stored.
     emin : float
@@ -323,7 +320,6 @@
 
     Parameters
     ----------
-
     filename : string
         The name of the table file.
     nH : float
@@ -333,7 +329,6 @@
     --------
     >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
-
     def __init__(self, filename, nH):
         if not os.path.exists(filename):
             raise IOError("File does not exist: %s." % filename)


https://bitbucket.org/yt_analysis/yt/commits/4f9fc0c537e4/
Changeset:   4f9fc0c537e4
Branch:      yt
User:        jzuhone
Date:        2015-11-16 15:32:12+00:00
Summary:     Merge
Affected #:  5 files

diff -r 9ba58304b9fdcb2414da652534508633d0a81477 -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -314,12 +314,12 @@
 echo
 echo "    export PATH=$DEST_DIR/bin:\$PATH"
 echo
-echo "and on csh-style shells"
+echo "and on csh-style shells:"
 echo
 echo "    setenv PATH $DEST_DIR/bin:\$PATH"
 echo
-echo "You can also the init file appropriate for your shell to include the same"
-echo "command."
+echo "You can also update the init file appropriate for your shell to include"
+echo "the same command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo

diff -r 9ba58304b9fdcb2414da652534508633d0a81477 -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 setup.py
--- a/setup.py
+++ b/setup.py
@@ -200,8 +200,8 @@
                 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
             ]
         },
-        author="Matthew J. Turk",
-        author_email="matthewturk at gmail.com",
+        author="The yt project",
+        author_email="yt-dev at lists.spacepope.org",
         url="http://yt-project.org/",
         license="BSD",
         configuration=configuration,

diff -r 9ba58304b9fdcb2414da652534508633d0a81477 -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -43,7 +43,7 @@
 @requires_file(rmf)
 def test_beta_model():
     import xspec
-    
+
     xspec.Fit.statMethod = "cstat"
     xspec.Xset.addModelString("APECTHERMAL","yes")
     xspec.Fit.query = "yes"
@@ -119,7 +119,7 @@
     norm_sim = float(norm_sim.in_cgs())
 
     events = photons.project_photons("z", responses=[arf,rmf],
-                                     absorb_model=abs_model, 
+                                     absorb_model=abs_model,
                                      convolve_energies=True, prng=my_prng)
     events.write_spectrum("beta_model_evt.pi", clobber=True)
 
@@ -143,7 +143,7 @@
     xspec.Fit.renorm()
     xspec.Fit.nIterations = 100
     xspec.Fit.perform()
-    
+
     kT  = m.bapec.kT.values[0]
     mu = (m.bapec.Redshift.values[0]-redshift)*ckms
     Z = m.bapec.Abundanc.values[0]
@@ -156,10 +156,8 @@
     dsigma = m.bapec.Velocity.sigma
     dnorm = m.bapec.norm.sigma
 
-    print kT, kT_sim, dkT
-
     assert np.abs(mu-mu_sim) < dmu
-    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(kT-kT_sim) < dkT
     assert np.abs(Z-Z_sim) < dZ
     assert np.abs(sigma-sigma_sim) < dsigma
     assert np.abs(norm-norm_sim) < dnorm

diff -r 9ba58304b9fdcb2414da652534508633d0a81477 -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -69,7 +69,8 @@
     temp = np.bincount(factors)
     return np.array(
         [(prime, temp[prime], (temp[prime] + 1) * (temp[prime] + 2) / 2)
-         for prime in np.unique(factors)]
+         for prime in np.unique(factors)],
+        dtype=np.int64
     )
 
 
@@ -81,12 +82,12 @@
     fac = factorize_number(pieces)
     nfactors = len(fac[:, 2])
     best = 0.0
-    p_size = np.ones(3, dtype=np.int)
+    p_size = np.ones(3, dtype=np.int64)
     if pieces == 1:
         return p_size
 
     while np.all(fac[:, 2] > 0):
-        ldom = np.ones(3, dtype=np.int)
+        ldom = np.ones(3, dtype=np.int64)
         for nfac in range(nfactors):
             i = int(np.sqrt(0.25 + 2 * (fac[nfac, 2] - 1)) - 0.5)
             k = fac[nfac, 2] - int(1 + i * (i + 1) / 2)

diff -r 9ba58304b9fdcb2414da652534508633d0a81477 -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -15,7 +15,7 @@
 import numpy as np
 from collections import OrderedDict
 from yt.funcs import mylog, get_image_suffix
-from yt.extern.six import iteritems, itervalues
+from yt.extern.six import iteritems, itervalues, string_types
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
     GridSource, RenderSource
@@ -220,7 +220,7 @@
             if len(rensources) > 0:
                 rs = rensources[0]
                 basename = rs.data_source.ds.basename
-                if isinstance(rs.field, basestring):
+                if isinstance(rs.field, string_types):
                     field = rs.field
                 else:
                     field = rs.field[-1]


https://bitbucket.org/yt_analysis/yt/commits/c8e4838249ef/
Changeset:   c8e4838249ef
Branch:      yt
User:        jzuhone
Date:        2015-11-17 21:54:46+00:00
Summary:     Adding tests for all of this new stuff
Affected #:  1 file

diff -r 4f9fc0c537e4373ab2e4dd6d8093c4dd1c30c936 -r c8e4838249ef8e57e9cb95b9de4d6b1943a22b2b yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -12,14 +12,18 @@
 
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
+    ThermalPhotonModel, PhotonList, EventList, \
+    convert_old_file, merge_files
 from yt.config import ytcfg
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
 import numpy as np
+from numpy.testing import assert_array_equal
 from numpy.random import RandomState
 import os
+import tempfile
+import shutil
 
 def setup():
     from yt.config import ytcfg
@@ -35,9 +39,11 @@
         "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
         "sxt-s_120210_ts02um_intallpxl.arf"]
 
-gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
 APEC = xray_data_dir
-TBABS = xray_data_dir+"/tbabs_table.h5"
+TBABS = os.path.join(xray_data_dir, "tbabs_table.h5")
+old_photons = os.path.join(xray_data_dir, "old_photons.h5")
+old_events = os.path.join(xray_data_dir, "old_events.h5")
 
 def return_data(data):
     def _return_data(name):
@@ -47,8 +53,14 @@
 @requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
+ at requires_file(old_photons)
+ at requires_file(old_events)
 def test_sloshing():
 
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
     prng = RandomState(0x4d3d3d3)
 
     ds = data_dir_load(gslr)
@@ -67,12 +79,11 @@
 
     return_photons = return_data(photons.photons)
 
-    tests = []
-    tests.append(GenericArrayTest(ds, return_photons, args=["photons"]))
+    tests = [GenericArrayTest(ds, return_photons, args=["photons"])]
 
     for a, r in zip(arfs, rmfs):
-        arf = os.path.join(xray_data_dir,a)
-        rmf = os.path.join(xray_data_dir,r)
+        arf = os.path.join(xray_data_dir, a)
+        rmf = os.path.join(xray_data_dir, r)
         events = photons.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                          absorb_model=tbabs_model, 
                                          convolve_energies=True, prng=prng)
@@ -84,3 +95,42 @@
     for test in tests:
         test_sloshing.__name__ = test.description
         yield test
+
+    photons.write_h5_file("test_photons.h5")
+    events.write_h5_file("test_events.h5")
+
+    photons2 = PhotonList.from_file("test_photons.h5")
+    events2 = EventList.from_h5_file("test_events.h5")
+
+    convert_old_file("old_photons.h5", "converted_photons.h5")
+    convert_old_file("old_events.h5", "converted_events.h5")
+
+    photons3 = PhotonList.from_file("converted_photons.h5")
+    events3 = EventList.from_h5_file("converted_events.h5")
+
+    for k in photons:
+        yield assert_array_equal, photons[k].d, photons2[k].d
+        yield assert_array_equal, photons[k].d, photons3[k].d
+    for k in events:
+        yield assert_array_equal, events[k].d, events2[k].d
+        yield assert_array_equal, events[k].d, events3[k].d
+
+    nevents = 0
+
+    for i in range(4):
+        events = photons.project_photons([1.0,-0.5,0.2],
+                                         exp_time=0.25*exp_time,
+                                         absorb_model=tbabs_model,
+                                         prng=prng)
+        events.write_h5_file("split_events_%d.h5" % i)
+        nevents += len(events["xsky"])
+
+    merge_files(["split_events_%d.h5" % i for i in range(4)],
+                "merged_events.h5", add_exposure_times=True)
+
+    merged_events = EventList.from_h5_file("merged_events.h5")
+    assert len(merged_events["xsky"]) == nevents
+    assert merged_events.parameters["ExposureTime"] == exp_time
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/01a74b79167e/
Changeset:   01a74b79167e
Branch:      yt
User:        jzuhone
Date:        2015-11-17 21:57:32+00:00
Summary:     Need to use the full pathnames
Affected #:  1 file

diff -r c8e4838249ef8e57e9cb95b9de4d6b1943a22b2b -r 01a74b79167eb9009f0e2866cfdc70e71e41ce97 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -102,8 +102,8 @@
     photons2 = PhotonList.from_file("test_photons.h5")
     events2 = EventList.from_h5_file("test_events.h5")
 
-    convert_old_file("old_photons.h5", "converted_photons.h5")
-    convert_old_file("old_events.h5", "converted_events.h5")
+    convert_old_file(old_photons, "converted_photons.h5")
+    convert_old_file(old_events, "converted_events.h5")
 
     photons3 = PhotonList.from_file("converted_photons.h5")
     events3 = EventList.from_h5_file("converted_events.h5")


https://bitbucket.org/yt_analysis/yt/commits/0af65bbd5282/
Changeset:   0af65bbd5282
Branch:      yt
User:        jzuhone
Date:        2015-11-17 22:11:24+00:00
Summary:     Documenting the new features
Affected #:  2 files

diff -r 01a74b79167eb9009f0e2866cfdc70e71e41ce97 -r 0af65bbd5282d936213d91956fe5d78c261ab040 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -627,3 +627,31 @@
 .. image:: _images/ds9_sloshing.png
 
 .. image:: _images/ds9_bubbles.png
+
+In November 2015, the structure of the photon and event HDF5 files changed. To 
+convert an old-format file to the new format, use the ``convert_old_file`` utility:
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import convert_old_file
+   convert_old_file("old_photons.h5", "new_photons.h5", clobber=True)
+   convert_old_file("old_events.h5", "new_events.h5", clobber=True)
+
+This utility will auto-detect the kind of file (photons or events) and will write 
+the correct replacement for the new version.
+
+At times it may be convenient to write several ``EventLists`` to disk to be merged 
+together later. This can be achieved with the ``merge_files`` utility. It takes a 
+list of 
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import merge_files
+   merge_files(["events_0.h5", "events_1.h5", "events_2.h5"], "merged_events.h5",
+                add_exposure_times=True, clobber=False)
+
+At the current time this utility is very limited, as it only allows merging of 
+``EventLists`` which have the same parameters, with the exception of the exposure
+time. If the ``add_exposure_times`` argument to ``merge_files`` is set to ``True``, 
+the lists will be merged together with the exposure times added. Otherwise, the 
+exposure times of the different files must be equal. 

diff -r 01a74b79167eb9009f0e2866cfdc70e71e41ce97 -r 0af65bbd5282d936213d91956fe5d78c261ab040 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -18,7 +18,6 @@
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-import numpy as np
 from numpy.testing import assert_array_equal
 from numpy.random import RandomState
 import os


https://bitbucket.org/yt_analysis/yt/commits/08eaadd3894f/
Changeset:   08eaadd3894f
Branch:      yt
User:        jzuhone
Date:        2015-11-18 17:10:05+00:00
Summary:     Fixing bugs
Affected #:  2 files

diff -r 0af65bbd5282d936213d91956fe5d78c261ab040 -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -139,11 +139,12 @@
         parameters["OmegaMatter"] = p["omega_matter"].value
         parameters["OmegaLambda"] = p["omega_lambda"].value
 
-        num_cells = f["/x"][:].shape[0]
+        d = f["/data"]
+
+        num_cells = d["x"][:].shape[0]
         start_c = comm.rank*num_cells//comm.size
         end_c = (comm.rank+1)*num_cells//comm.size
 
-        d = f["/data"]
         photons["x"] = YTArray(d["x"][start_c:end_c], "kpc")
         photons["y"] = YTArray(d["y"][start_c:end_c], "kpc")
         photons["z"] = YTArray(d["z"][start_c:end_c], "kpc")
@@ -455,7 +456,7 @@
 
             # Data
 
-            d = f.create_group("parameters")
+            d = f.create_group("data")
             d.create_dataset("x", data=x)
             d.create_dataset("y", data=y)
             d.create_dataset("z", data=z)
@@ -918,7 +919,7 @@
 
         p = f["/parameters"]
         parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
-        if isinstance(f["/area"].value, (string_types, bytes)):
+        if isinstance(p["area"].value, (string_types, bytes)):
             parameters["Area"] = p["area"].value.decode("utf8")
         else:
             parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
@@ -1466,15 +1467,12 @@
     exp_time_key = ""
     p_out = f_out.create_group("parameters")
     for key, param in f_in["parameters"].items():
-        if key.endswidth("exp_time"):
+        if key.endswith("exp_time"):
             exp_time_key = key
         else:
             p_out[key] = param.value
 
-    skip = []
-    if add_exposure_times:
-        skip.append(exp_time_key)
-
+    skip = [exp_time_key] if add_exposure_times else []
     for fn in input_files[1:]:
         f = h5py.File(fn, "r")
         validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
@@ -1485,17 +1483,21 @@
     data = defaultdict(list)
     tot_exp_time = 0.0
 
-    for fn in input_files:
+    for i, fn in enumerate(input_files):
         f = h5py.File(fn, "r")
         if add_exposure_times:
             tot_exp_time += f["/parameters"][exp_time_key].value
+        elif i == 0:
+            tot_exp_time = f["/parameters"][exp_time_key].value
         for key in f["/data"]:
             data[key].append(f["/data"][key][:])
         f.close()
 
+    p_out["exp_time"] = tot_exp_time
+
     d = f_out.create_group("data")
     for k in data:
-        d.create_dataset(key, data=np.concatenate(data[k]))
+        d.create_dataset(k, data=np.concatenate(data[k]))
 
     f_out.close()
 
@@ -1517,7 +1519,7 @@
     Examples
     --------
     >>> from yt.analysis_modules.photon_simulator.api import convert_old_file
-    >>> merge_files("photons_old.h5", "photons_new.h5", clobber=True)
+    >>> convert_old_file("photons_old.h5", "photons_new.h5", clobber=True)
     """
     if os.path.exists(output_file) and not clobber:
         raise IOError("Cannot overwrite existing file %s. " % output_file +
@@ -1525,11 +1527,11 @@
 
     f_in = h5py.File(input_file, "r")
 
-    if "NumberOfPhotons" in f_in:
+    if "num_photons" in f_in:
         params = ["fid_exp_time", "fid_area", "fid_d_a", "fid_redshift",
                   "dimension", "width", "hubble", "omega_matter",
                   "omega_lambda"]
-        data = ["x", "y", "z", "vx", "vy", "vz", "energy", "num_photons"]
+        data = ["x", "y", "z", "dx", "vx", "vy", "vz", "energy", "num_photons"]
     elif "pix_center" in f_in:
         params = ["exp_time", "area", "redshift", "d_a", "arf",
                   "rmf", "channel_type", "mission", "telescope",

diff -r 0af65bbd5282d936213d91956fe5d78c261ab040 -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -20,6 +20,7 @@
     GenericArrayTest, data_dir_load
 from numpy.testing import assert_array_equal
 from numpy.random import RandomState
+from yt.units.yt_array import uconcatenate
 import os
 import tempfile
 import shutil
@@ -41,8 +42,8 @@
 gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
 APEC = xray_data_dir
 TBABS = os.path.join(xray_data_dir, "tbabs_table.h5")
-old_photons = os.path.join(xray_data_dir, "old_photons.h5")
-old_events = os.path.join(xray_data_dir, "old_events.h5")
+old_photon_file = os.path.join(xray_data_dir, "old_photons.h5")
+old_event_file = os.path.join(xray_data_dir, "old_events.h5")
 
 def return_data(data):
     def _return_data(name):
@@ -52,8 +53,8 @@
 @requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
- at requires_file(old_photons)
- at requires_file(old_events)
+ at requires_file(old_photon_file)
+ at requires_file(old_event_file)
 def test_sloshing():
 
     tmpdir = tempfile.mkdtemp()
@@ -73,21 +74,21 @@
     sphere = ds.sphere("c", (0.1, "Mpc"))
 
     thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
+    photons1 = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                       thermal_model)
 
-    return_photons = return_data(photons.photons)
+    return_photons = return_data(photons1.photons)
 
     tests = [GenericArrayTest(ds, return_photons, args=["photons"])]
 
     for a, r in zip(arfs, rmfs):
         arf = os.path.join(xray_data_dir, a)
         rmf = os.path.join(xray_data_dir, r)
-        events = photons.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
-                                         absorb_model=tbabs_model, 
-                                         convolve_energies=True, prng=prng)
+        events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
+                                          absorb_model=tbabs_model, 
+                                          convolve_energies=True, prng=prng)
 
-        return_events = return_data(events.events)
+        return_events = return_data(events1.events)
 
         tests.append(GenericArrayTest(ds, return_events, args=[a]))
 
@@ -95,37 +96,46 @@
         test_sloshing.__name__ = test.description
         yield test
 
-    photons.write_h5_file("test_photons.h5")
-    events.write_h5_file("test_events.h5")
+    photons1.write_h5_file("test_photons.h5")
+    events1.write_h5_file("test_events.h5")
 
     photons2 = PhotonList.from_file("test_photons.h5")
     events2 = EventList.from_h5_file("test_events.h5")
 
-    convert_old_file(old_photons, "converted_photons.h5")
-    convert_old_file(old_events, "converted_events.h5")
+    convert_old_file(old_photon_file, "converted_photons.h5")
+    convert_old_file(old_event_file, "converted_events.h5")
 
     photons3 = PhotonList.from_file("converted_photons.h5")
     events3 = EventList.from_h5_file("converted_events.h5")
 
-    for k in photons:
-        yield assert_array_equal, photons[k].d, photons2[k].d
-        yield assert_array_equal, photons[k].d, photons3[k].d
-    for k in events:
-        yield assert_array_equal, events[k].d, events2[k].d
-        yield assert_array_equal, events[k].d, events3[k].d
+    for k in photons1.keys():
+        if k == "Energy":
+            arr1 = uconcatenate(photons1[k])
+            arr2 = uconcatenate(photons2[k])
+            arr3 = uconcatenate(photons3[k])
+        else:
+            arr1 = photons1[k]
+            arr2 = photons2[k]
+            arr3 = photons3[k]
+        yield assert_array_equal, arr1, arr2
+        yield assert_array_equal, arr1, arr3
+    for k in events1.keys():
+        yield assert_array_equal, events1[k], events2[k]
+        yield assert_array_equal, events1[k], events3[k]
 
     nevents = 0
 
     for i in range(4):
-        events = photons.project_photons([1.0,-0.5,0.2],
-                                         exp_time=0.25*exp_time,
+        events = photons1.project_photons([1.0,-0.5,0.2],
+                                         exp_time_new=0.25*exp_time,
                                          absorb_model=tbabs_model,
                                          prng=prng)
         events.write_h5_file("split_events_%d.h5" % i)
         nevents += len(events["xsky"])
 
     merge_files(["split_events_%d.h5" % i for i in range(4)],
-                "merged_events.h5", add_exposure_times=True)
+                "merged_events.h5", add_exposure_times=True,
+                clobber=True)
 
     merged_events = EventList.from_h5_file("merged_events.h5")
     assert len(merged_events["xsky"]) == nevents


https://bitbucket.org/yt_analysis/yt/commits/8ab9fd55e2d0/
Changeset:   8ab9fd55e2d0
Branch:      yt
User:        jzuhone
Date:        2015-11-18 18:34:25+00:00
Summary:     Merge
Affected #:  173 files

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
-   fiducial example of a docstring.
-
-API Guide
----------
-
- * Do not import "*" from anything other than "yt.funcs".
- * Internally, only import from source files directly -- instead of:
-
-   from yt.visualization.api import ProjectionPlot
-
-   do:
-
-   from yt.visualization.plot_window import ProjectionPlot
-
- * Numpy is to be imported as "np", after a long time of using "na".
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,7 +67,7 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3'
+version = '3.3-dev'
 # The full version, including alpha/beta/rc tags.
 release = '3.3-dev'
 

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,80 +494,4 @@
 
 .. _code-style-guide:
 
-Code Style Guide
-----------------
-
-To keep things tidy, we try to stick with a couple simple guidelines.
-
-General Guidelines
-++++++++++++++++++
-
-* In general, follow `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_ guidelines.
-* Classes are ConjoinedCapitals, methods and functions are
-  ``lowercase_with_underscores.``
-* Use 4 spaces, not tabs, to represent indentation.
-* Line widths should not be more than 80 characters.
-* Do not use nested classes unless you have a very good reason to, such as
-  requiring a namespace or class-definition modification.  Classes should live
-  at the top level.  ``__metaclass__`` is exempt from this.
-* Do not use unnecessary parentheses in conditionals.  ``if((something) and
-  (something_else))`` should be rewritten as ``if something and
-  something_else``.  Python is more forgiving than C.
-* Avoid copying memory when possible. For example, don't do ``a =
-  a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-  ``np.multiply(a, 3, a)``.
-* In general, avoid all double-underscore method names: ``__something`` is
-  usually unnecessary.
-* Doc strings should describe input, output, behavior, and any state changes
-  that occur on an object.  See the file `doc/docstring_example.txt` for a
-  fiducial example of a docstring.
-
-API Guide
-+++++++++
-
-* Do not import "*" from anything other than ``yt.funcs``.
-* Internally, only import from source files directly; instead of: ``from
-  yt.visualization.api import SlicePlot`` do
-  ``from yt.visualization.plot_window import SlicePlot``.
-* Numpy is to be imported as ``np``.
-* Do not use too many keyword arguments.  If you have a lot of keyword
-  arguments, then you are doing too much in ``__init__`` and not enough via
-  parameter setting.
-* In function arguments, place spaces before commas.  ``def something(a,b,c)``
-  should be ``def something(a, b, c)``.
-* Don't create a new class to replicate the functionality of an old class --
-  replace the old class.  Too many options makes for a confusing user
-  experience.
-* Parameter files external to yt are a last resort.
-* The usage of the ``**kwargs`` construction should be avoided.  If they
-  cannot be avoided, they must be explained, even if they are only to be
-  passed on to a nested function.
-* Constructor APIs should be kept as *simple* as possible.
-* Variable names should be short but descriptive.
-* No global variables!
-
-Variable Names and Enzo-isms
-++++++++++++++++++++++++++++
-
-* Avoid Enzo-isms.  This includes but is not limited to:
-
-  + Hard-coding parameter names that are the same as those in Enzo.  The
-    following translation table should be of some help.  Note that the
-    parameters are now properties on a Dataset subclass: you access them
-    like ``ds.refine_by`` .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
-  + Do not assume that the domain runs from 0 to 1.  This is not true
-    for many codes and datasets.
+.. include:: ../../../coding_styleguide.txt

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -6,10 +6,10 @@
 Beginning with version 3.3, yt has the ability to volume render unstructured
 meshes from, for example, finite element calculations. In order to use this
 capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`
+when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>` page. Once
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
 Embree is installed, you must also create a symlink next to the library. For
 example, if the libraries were installed at /usr/local/lib/, you must do
 
@@ -18,7 +18,7 @@
     sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
 
 Second, the python bindings for embree (called 
-`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. To
+`pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
 .. code-block:: bash

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -23,7 +23,12 @@
     with hglib.open(dest_repo_path) as client:
         # Changesets that are on the yt branch but aren't topological ancestors
         # of whichever changeset the experimental bookmark is pointing at
-        client.update('heads(branch(yt) - ::bookmark(experimental))')
+        bookmarks, _ = client.bookmarks()
+        bookmark_names = [b[0] for b in bookmarks]
+        if 'experimental' in bookmark_names:
+            client.update('heads(branch(yt) - ::bookmark(experimental))')
+        else:
+            client.update('heads(branch(yt))')
     return dest_repo_path
 
 
@@ -51,9 +56,13 @@
 def get_branch_tip(repo_path, branch, exclude=None):
     """Returns the SHA1 hash of the most recent commit on the given branch"""
     revset = "head() and branch(%s)" % branch
-    if exclude is not None:
-        revset += "and not %s" % exclude
     with hglib.open(repo_path) as client:
+        if exclude is not None:
+            try:
+                client.log(exclude)
+                revset += "and not %s" % exclude
+            except hglib.error.CommandError:
+                pass
         change = client.log(revset)[0][1][:12]
     return change
 

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,11 @@
 with-xunit=1
 
 [flake8]
-# if we include api.py files, we get tons of spurious "imported but unused" errors
-exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+# we exclude:
+#      api.py and __init__.py files to avoid spurious unused import errors
+#      _mpl_imports.py for the same reason
+#      autogenerated __config__.py files
+#      vendored libraries
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W293,W391,W503
\ No newline at end of file

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/README
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,3 @@
+This directory contains two tiny enzo cosmological datasets. 
+
+They were added a long time ago and are provided for testing purposes.
\ No newline at end of file

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.boolean_region_tests import \
-    TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
-
-create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
-
-create_test(TestBooleanORGridQuantity, "BooleanORGrid")
-
-create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
-
-create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
-
-create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
-
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# We want to test several things.  We need to be able to run the
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
-    "z-velocity",
-    # Now some derived fields
-    "Pressure", "SoundSpeed", "particle_density", "Entropy",
-    # Ghost zones
-    "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/halos.py
--- a/tests/halos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
-
-create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
-
-create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
-
-create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-
-class HierarchyConsistency(YTDatasetTest):
-    name = "index_consistency"
-
-    def run(self):
-        self.result = \
-            all(g in ensure_list(c.Parent) for g in self.ds.index.grids
-                                            for c in g.Children)
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-
-class GridLocationsProperties(YTDatasetTest):
-    name = "level_consistency"
-
-    def run(self):
-        self.result = dict(grid_left_edge=self.ds.grid_left_edge,
-                           grid_right_edge=self.ds.grid_right_edge,
-                           grid_levels=self.ds.grid_levels,
-                           grid_particle_count=self.ds.grid_particle_count,
-                           grid_dimensions=self.ds.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-
-class GridRelationships(YTDatasetTest):
-
-    name = "grid_relationships"
-
-    def run(self):
-        self.result = [[p.id for p in ensure_list(g.Parent) \
-            if g.Parent is not None]
-            for g in self.ds.index.grids]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
-                raise GridRelationshipsChanged()
-
-
-class GridGlobalIndices(YTDatasetTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.ds.index.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/object_field_values.py
--- a/tests/object_field_values.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException, create_test
-from yt.funcs import ensure_list, iterable
-from fields_to_test import field_list, particle_field_list
-
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-known_objects = {}
-
-
-def register_object(func):
-    known_objects[func.func_name] = func
-    return func
-
-
- at register_object
-def centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center, width / 0.25)
-
-
- at register_object
-def off_centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center - 0.25 * width, width / 0.25)
-
-
- at register_object
-def corner_sphere(tobj):
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(tobj.ds.domain_left_edge, width / 0.25)
-
-
- at register_object
-def disk(self):
-    center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-    radius = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    height = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    normal = na.array([1.] * 3)
-    self.data_object = self.ds.disk(center, normal, radius, height)
-
-
- at register_object
-def all_data(self):
-    self.data_object = self.ds.all_data()
-
-_new_known_objects = {}
-for field in ["Density"]:  # field_list:
-    for object_name in known_objects:
-
-        def _rfunc(oname, fname):
-
-            def func(tobj):
-                known_objects[oname](tobj)
-                tobj.orig_data_object = tobj.data_object
-                avg_value = tobj.orig_data_object.quantities[
-                        "WeightedAverageQuantity"](fname, "Density")
-                tobj.data_object = tobj.orig_data_object.cut_region(
-                        ["grid['%s'] > %s" % (fname, avg_value)])
-            return func
-        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
-                _rfunc(object_name, field)
-known_objects.update(_new_known_objects)
-
-
-class YTFieldValuesTest(YTDatasetTest):
-
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-
-class YTExtractIsocontoursTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        rset = self.data_object.extract_isocontours("Density",
-            val, rescale=False, sample_values="Temperature")
-        self.result = rset
-
-    def compare(self, old_result):
-        if self.result[0].size == 0 and old_result[0].size == 0:
-            return True
-        self.compare_array_delta(self.result[0].ravel(),
-                                 old_result[0].ravel(), 1e-7)
-        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
-
-
-class YTIsocontourFluxTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        flux = self.data_object.calculate_isocontour_flux(
-           "Density", val, "x-velocity", "y-velocity", "z-velocity")
-        self.result = flux
-
-    def compare(self, old_result):
-        self.compare_value_delta(self.result, old_result, 1e-7)
-
-for object_name in known_objects:
-    for field in field_list + particle_field_list:
-        if "cut_region" in object_name and field in particle_field_list:
-            continue
-        create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field=field, object_name=object_name)
-    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name=object_name)
-    create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name=object_name)
-
-
-class YTDerivedQuantityTest(YTDatasetTest):
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-    def compare(self, old_result):
-        if hasattr(self.result, 'tostring'):
-            self.compare_array_delta(self.result, old_result, 1e-7)
-            return
-        elif iterable(self.result):
-            a1 = na.array(self.result)
-            a2 = na.array(old_result)
-            self.compare_array_delta(a1, a2, 1e-7)
-        else:
-            if self.result != old_result: raise FieldHashesDontMatch
-
-    def run(self):
-        # This only works if it takes no arguments
-        self.result = self.data_object.quantities[self.dq_name]()
-
-dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
-            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
-
-# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
-# MinLocation
-
-for object_name in known_objects:
-    for dq in dq_names:
-        # Some special exceptions
-        if "cut_region" in object_name and (
-            "SpinParameter" in dq or
-            "TotalMass" in dq):
-            continue
-        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name=dq, object_name=object_name)
-
-
-class YTDerivedQuantityTestField(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities[self.dq_name](
-            self.field_name)
-
-for object_name in known_objects:
-    for field in field_list:
-        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
-            create_test(YTDerivedQuantityTestField,
-                        "%s_%s" % (object_name, field),
-                        field_name=field, dq_name=dq,
-                        object_name=object_name)
-
-
-class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities["WeightedAverageQuantity"](
-            self.field_name, weight="CellMassMsun")
-
-for object_name in known_objects:
-    for field in field_list:
-        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
-                    "%s_%s" % (object_name, field),
-                    field_name=field,
-                    object_name=object_name)

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/projections.py
--- a/tests/projections.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, \
-    TestRay, TestGasDistribution, Test2DGasDistribution
-
-from fields_to_test import field_list
-
-for field in field_list:
-    create_test(TestRay, "%s" % field, field=field)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestSlice, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-        create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field=field, axis=axis)
-    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    if field != "Density":
-        create_test(TestGasDistribution, "density_%s" % field,
-                    field_x="Density", field_y=field)
-    if field not in ("x-velocity", "Density"):
-        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                    field_x="Density", field_y="x-velocity", field_z=field,
-                    weight="CellMassMsun")

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/runall.py
--- a/tests/runall.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-from yt.config import ytcfg
-ytcfg["yt", "loglevel"] = "50"
-ytcfg["yt", "serialize"] = "False"
-
-from yt.utilities.answer_testing.api import \
-    RegressionTestRunner, clear_registry, create_test, \
-    TestFieldStatistics, TestAllProjections, registry_entries, \
-    Xunit
-from yt.utilities.command_line import get_yt_version
-
-from yt.mods import *
-import fnmatch
-import imp
-import optparse
-import itertools
-import time
-
-#
-# We assume all tests are to be run, unless explicitly given the name of a
-# single test or something that can be run through fnmatch.
-#
-# Keep in mind that we use a different nomenclature here than is used in the
-# Enzo testing system.  Our 'tests' are actually tests that are small and that
-# run relatively quickly on a single dataset; in Enzo's system, a 'test'
-# encompasses both the creation and the examination of data.  Here we assume
-# the data is kept constant.
-#
-
-cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
-
-
-def load_tests(iname, idir):
-    f, filename, desc = imp.find_module(iname, [idir])
-    tmod = imp.load_module(iname, f, filename, desc)
-    return tmod
-
-
-def find_and_initialize_tests():
-    mapping = {}
-    for f in glob.glob(os.path.join(cwd, "*.py")):
-        clear_registry()
-        iname = os.path.basename(f[:-3])
-        try:
-            load_tests(iname, cwd)
-            mapping[iname] = registry_entries()
-            #print "Associating %s with" % (iname)
-            #print "\n    ".join(registry_entries())
-        except ImportError:
-            pass
-    return mapping
-
-if __name__ == "__main__":
-    clear_registry()
-    mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
-    try:
-        my_hash = get_yt_version()
-    except:
-        my_hash = "UNKNOWN%s" % (time.time())
-    parser = optparse.OptionParser()
-    parser.add_option("-f", "--parameter-file", dest="parameter_file",
-        default=os.path.join(cwd, "DD0010/moving7_0010"),
-        help="The parameter file value to feed to 'load' to test against")
-    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-        default=False, help="List all tests and then exit")
-    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-        help="The test name pattern to match.  Can include wildcards.")
-    parser.add_option("-o", "--output", dest="storage_dir",
-        default=test_storage_directory,
-        help="Base directory for storing test output.")
-    parser.add_option("-c", "--compare", dest="compare_name",
-        default=None,
-        help="The name against which we will compare")
-    parser.add_option("-n", "--name", dest="this_name",
-        default=my_hash,
-        help="The name we'll call this set of tests")
-    opts, args = parser.parse_args()
-
-    if opts.list_tests:
-        tests_to_run = []
-        for m, vals in mapping.items():
-            new_tests = fnmatch.filter(vals, opts.test_pattern)
-            if len(new_tests) == 0: continue
-            load_tests(m, cwd)
-            keys = set(registry_entries())
-            tests_to_run += [t for t in new_tests if t in keys]
-        tests = list(set(tests_to_run))
-        print ("\n    ".join(tests))
-        sys.exit(0)
-
-    # Load the test ds and make sure it's good.
-    ds = load(opts.parameter_file)
-    if ds is None:
-        print "Couldn't load the specified parameter file."
-        sys.exit(1)
-
-    # Now we modify our compare name and self name to include the ds.
-    compare_id = opts.compare_name
-    watcher = None
-    if compare_id is not None:
-        compare_id += "_%s_%s" % (ds, ds._hash())
-        watcher = Xunit()
-    this_id = opts.this_name + "_%s_%s" % (ds, ds._hash())
-
-    rtr = RegressionTestRunner(this_id, compare_id,
-                               results_path=opts.storage_dir,
-                               compare_results_path=opts.storage_dir,
-                               io_log=[opts.parameter_file])
-
-    rtr.watcher = watcher
-    tests_to_run = []
-    for m, vals in mapping.items():
-        new_tests = fnmatch.filter(vals, opts.test_pattern)
-
-        if len(new_tests) == 0: continue
-        load_tests(m, cwd)
-        keys = set(registry_entries())
-        tests_to_run += [t for t in new_tests if t in keys]
-    for test_name in sorted(tests_to_run):
-        print "RUNNING TEST", test_name
-        rtr.run_test(test_name)
-    if watcher is not None:
-        rtr.watcher.report()
-    failures = 0
-    passes = 1
-    for test_name, result in sorted(rtr.passed_tests.items()):
-        if not result:
-            print "TEST %s: %s" % (test_name, result)
-            print "    %s" % rtr.test_messages[test_name]
-        if result: passes += 1
-        else: failures += 1
-    print "Number of passes  : %s" % passes
-    print "Number of failures: %s" % failures

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from yt.mods import *
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class VolumeRenderingInconsistent(RegressionTestException):
-    pass
-
-
-class VolumeRenderingConsistency(YTDatasetTest):
-    name = "volume_rendering_consistency"
-
-    def run(self):
-        c = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-        W = na.sqrt(3.) * (self.ds.domain_right_edge - \
-            self.ds.domain_left_edge)
-        N = 512
-        n_contours = 5
-        cmap = 'algae'
-        field = 'Density'
-        mi, ma = self.ds.all_data().quantities['Extrema'](field)[0]
-        mi, ma = na.log10(mi), na.log10(ma)
-        contour_width = (ma - mi) / 100.
-        L = na.array([1.] * 3)
-        tf = ColorTransferFunction((mi - 2, ma + 2))
-        tf.add_layers(n_contours, w=contour_width,
-                      col_bounds=(mi * 1.001, ma * 0.999),
-                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
-        cam = self.ds.camera(c, L, W, (N, N), transfer_function=tf,
-            no_ghost=True)
-        image = cam.snapshot()
-        # image = cam.snapshot('test_rendering_%s.png'%field)
-        self.result = image
-
-    def compare(self, old_result):
-        # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax(na.abs(self.result - old_result) /
-                                 (self.result + old_result))
-        if delta > 1e-9: raise VolumeRenderingInconsistent()

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -121,7 +121,6 @@
     derived_field
 
 from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     DatasetSeries, ImageArray, \
     particle_filter, add_particle_filter, \
     create_profile, Profile1D, Profile2D, Profile3D, \

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -116,7 +116,8 @@
 
     def make_spectrum(self, input_file, output_file="spectrum.h5",
                       line_list_file="lines.txt",
-                      use_peculiar_velocity=True, njobs="auto"):
+                      use_peculiar_velocity=True, 
+                      subgrid_resolution=10, njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -141,6 +142,17 @@
         use_peculiar_velocity : optional, bool
            if True, include line of sight velocity for shifting lines.
            Default: True
+        subgrid_resolution : optional, int
+           When a line is being added that is unresolved (ie its thermal
+           width is less than the spectral bin width), the voigt profile of
+           the line is deposited into an array of virtual bins at higher
+           resolution.  The optical depth from these virtual bins is integrated
+           and then added to the coarser spectral bin.  The subgrid_resolution
+           value determines the ratio between the thermal width and the 
+           bin width of the virtual bins.  Increasing this value yields smaller
+           virtual bins, which increases accuracy, but is more expensive.
+           A value of 10 yields accuracy to the 4th significant digit.
+           Default: 10
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -182,7 +194,9 @@
             njobs = min(comm.size, len(self.line_list))
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
-                                    line_list_file is not None, njobs=njobs)
+                                    line_list_file is not None, 
+                                    subgrid_resolution=subgrid_resolution,
+                                    njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
         self.flux_field = np.exp(-self.tau_field)
@@ -204,7 +218,7 @@
         Add continuum features to the spectrum.
         """
         # Only add continuum features down to tau of 1.e-4.
-        tau_min = 1.e-4
+        min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
@@ -216,12 +230,12 @@
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
-                                     np.power((tau_min * continuum['normalization'] /
+                                     np.power((min_tau * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
             valid_continuua = np.where(((column_density /
-                                         continuum['normalization']) > tau_min) &
+                                         continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
@@ -235,98 +249,155 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               save_line_list, njobs=-1):
+                               save_line_list, subgrid_resolution=10, njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Only make voigt profile for slice of spectrum that is 10 times the line width.
-        spectrum_bin_ratio = 5
-        # Widen wavelength window until optical depth reaches a max value at the ends.
-        max_tau = 0.001
+        # Widen wavelength window until optical depth falls below this tau 
+        # value at the ends to assure that the wings of a line have been 
+        # fully resolved.
+        min_tau = 1e-3
 
+        # step through each ionic transition (e.g. HI, HII, MgII) specified
+        # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+
             # redshift_eff field combines cosmological and velocity redshifts
+            # so delta_lambda gives the offset in angstroms from the rest frame
+            # wavelength to the observed wavelength of the transition 
             if use_peculiar_velocity:
                 delta_lambda = line['wavelength'] * field_data['redshift_eff']
             else:
                 delta_lambda = line['wavelength'] * field_data['redshift']
+            # lambda_obs is central wavelength of line after redshift
+            lambda_obs = line['wavelength'] + delta_lambda
+            # bin index in lambda_bins of central wavelength of line after z
+            center_index = np.digitize(lambda_obs, self.lambda_bins)
+
+            # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                   field_data['temperature']) /
                                   line['atomic_mass'])
-            center_bins = np.digitize((delta_lambda + line['wavelength']),
-                                      self.lambda_bins)
 
-            # ratio of line width to bin width
-            width_ratio = ((line['wavelength'] + delta_lambda) * \
-                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
+            # the actual thermal width of the lines
+            thermal_width = (lambda_obs * thermal_b / 
+                             speed_of_light_cgs).convert_to_units("angstrom")
 
-            if (width_ratio < 1.0).any():
-                mylog.warn(("%d out of %d line components are unresolved, " +
-                            "consider increasing spectral resolution.") %
-                           ((width_ratio < 1.0).sum(), width_ratio.size))
+            # Sanitize units for faster runtime of the tau_profile machinery.
+            lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
+            lambda_1 = lambda_obs.d # line's observed frame; angstroms
+            cdens = column_density.in_units("cm**-2").d # cm**-2
+            thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
+            dlambda = delta_lambda.d  # lambda offset; angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
 
-            # do voigt profiles for a subset of the full spectrum
-            left_index  = (center_bins -
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
-            right_index = (center_bins +
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+            # When we actually deposit the voigt profile, sometimes we will
+            # have underresolved lines (ie lines with smaller widths than
+            # the spectral bin size).  Here, we create virtual bins small
+            # enough in width to well resolve each line, deposit the voigt 
+            # profile into them, then numerically integrate their tau values
+            # and sum them to redeposit them into the actual spectral bins.
 
-            # loop over all lines wider than the bin width
-            valid_lines = np.where((width_ratio >= 1.0) &
-                                   (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
-                            valid_lines.size)
+            # virtual bins (vbins) will be:
+            # 1) <= the bin_width; assures at least as good as spectral bins
+            # 2) <= 1/10th the thermal width; assures resolving voigt profiles
+            #   (actually 1/subgrid_resolution value, default is 1/10)
+            # 3) a bin width will be divisible by vbin_width times a power of 
+            #    10; this will assure we don't get spikes in the deposited
+            #    spectra from uneven numbers of vbins per bin
+            resolution = thermal_width / self.bin_width 
+            vbin_width = self.bin_width / \
+                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = vbin_width.in_units('angstrom').d
 
-            # Sanitize units here
-            column_density.convert_to_units("cm ** -2")
-            lbins = self.lambda_bins.d  # Angstroms
-            lambda_0 = line['wavelength'].d  # Angstroms
-            v_doppler = thermal_b.in_cgs().d  # cm / s
-            cdens = column_density.d
-            dlambda = delta_lambda.d  # Angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d
+            # the virtual window into which the line is deposited initially 
+            # spans a region of 5 thermal_widths, but this may expand
+            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
+            vbin_window_width = n_vbins*vbin_width
 
-            for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
-                my_bin_ratio = spectrum_bin_ratio
+            if (thermal_width < self.bin_width).any():
+                mylog.info(("%d out of %d line components will be " + \
+                            "deposited as unresolved lines.") %
+                           ((thermal_width < self.bin_width).sum(), 
+                            thermal_width.size))
+
+            valid_lines = np.arange(len(thermal_width))
+            pbar = get_pbar("Adding line - %s [%f A]: " % \
+                            (line['label'], line['wavelength']),
+                            thermal_width.size)
+
+            # for a given transition, step through each location in the 
+            # observed spectrum where it occurs and deposit a voigt profile
+            for i in parallel_objects(valid_lines, njobs=-1):
+                my_vbin_window_width = vbin_window_width[i]
+                my_n_vbins = n_vbins[i]
+                my_vbin_width = vbin_width[i]
 
                 while True:
-                    lambda_bins, line_tau = \
+                    vbins = \
+                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
+                                    lambda_1[i]+my_vbin_window_width/2., 
+                                    my_n_vbins, endpoint=False)
+
+                    vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
-                            cdens[lixel], delta_lambda=dlambda[lixel],
-                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+                            lambda_0, line['f_value'], line['gamma'], thermb[i],
+                            cdens[i], delta_lambda=dlambda[i],
+                            lambda_bins=vbins)
 
-                    # Widen wavelength window until optical depth reaches a max value at the ends.
-                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
-                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
+                    # If tau has not dropped below min tau threshold by the
+                    # edges (ie the wings), then widen the wavelength 
+                    # window and repeat process. 
+                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
-                    my_bin_ratio *= 2
-                    left_index[lixel]  = (center_bins[lixel] -
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
-                    right_index[lixel] = (center_bins[lixel] +
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+                    my_vbin_window_width *= 2
+                    my_n_vbins *= 2
 
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                # identify the extrema of the vbin_window so as to speed
+                # up searching over the entire lambda_bins array
+                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
+                                           self.bin_width.d) + 1
+                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
+                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
+                window_width = right_index - left_index
+
+                # run digitize to identify which vbins are deposited into which
+                # global lambda bins.
+                # shift global lambda bins over by half a bin width; 
+                # this has the effect of assuring np.digitize will place 
+                # the vbins in the closest bin center.
+                binned = np.digitize(vbins, 
+                                     self.lambda_bins[left_index:right_index] \
+                                     + (0.5 * self.bin_width))
+
+                # numerically integrate the virtual bins to calculate a
+                # virtual equivalent width; then sum the virtual equivalent
+                # widths and deposit into each spectral bin
+                vEW = vtau * my_vbin_width
+                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
+                EW = np.array(EW)/self.bin_width.d
+                self.tau_field[left_index:right_index] += EW
+
                 if save_line_list and line['label_threshold'] is not None and \
-                        cdens[lixel] >= line['label_threshold']:
+                        cdens[i] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = vlos[lixel]
+                        peculiar_velocity = vlos[i]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (lambda_0 + dlambda[lixel]),
-                                                    'column_density': column_density[lixel],
-                                                    'b_thermal': thermal_b[lixel],
-                                                    'redshift': field_data['redshift'][lixel],
+                                                    'wavelength': (lambda_0 + dlambda[i]),
+                                                    'column_density': column_density[i],
+                                                    'b_thermal': thermal_b[i],
+                                                    'redshift': field_data['redshift'][i],
                                                     'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()
 
-            del column_density, delta_lambda, thermal_b, \
-                center_bins, width_ratio, left_index, right_index
+            del column_density, delta_lambda, lambda_obs, center_index, \
+                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
+                valid_lines, vbins, vtau, vEW
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -281,8 +281,6 @@
         errSq=sum(dif**2)
 
         if any(linesP[:,1]==speciesDict['init_b']):
-         #   linesP = prevLinesP
-
             flag = True
             break
             

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -12,24 +12,33 @@
 
 import numpy as np
 from yt.testing import \
-    assert_allclose_units, requires_file, requires_module
+    assert_allclose_units, requires_file, requires_module, \
+    assert_almost_equal, assert_array_almost_equal
 from yt.analysis_modules.absorption_spectrum.absorption_line import \
     voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.cosmological_observation.api import LightRay
+from yt.config import ytcfg
 import tempfile
 import os
 import shutil
+from yt.utilities.on_demand_imports import \
+    _h5py as h5
+
+test_dir = ytcfg.get("yt", "test_data_dir")
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+HI_SPECTRUM_COSMO = "absorption_spectrum_data/enzo_lyman_alpha_cosmo_spec.h5"
+HI_SPECTRUM_COSMO_FILE = os.path.join(test_dir, HI_SPECTRUM_COSMO)
+HI_SPECTRUM = "absorption_spectrum_data/enzo_lyman_alpha_spec.h5"
+HI_SPECTRUM_FILE = os.path.join(test_dir, HI_SPECTRUM)
 
 @requires_file(COSMO_PLUS)
-def test_absorption_spectrum():
+ at requires_file(HI_SPECTRUM_COSMO)
+def test_absorption_spectrum_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets
+    This test generates an absorption spectrum from a cosmological light ray
     """
 
     # Set up in a temp dir
@@ -37,7 +46,7 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
@@ -65,22 +74,30 @@
     sp.add_continuum(my_label, field, wavelength, normalization, index)
 
     wavelength, flux = sp.make_spectrum('lightray.h5',
-                                        output_file='spectrum.txt',
+                                        output_file='spectrum.h5',
                                         line_list_file='lines.txt',
                                         use_peculiar_velocity=True)
 
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_COSMO_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
-
- at requires_file(COSMO_PLUS)
- at requires_module("astropy")
-def test_absorption_spectrum_fits():
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_file(HI_SPECTRUM)
+def test_absorption_spectrum_non_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets.  Outputs to fits file if astropy is installed.
+    This test generates an absorption spectrum from a non-cosmological light ray
     """
 
     # Set up in a temp dir
@@ -88,11 +105,114 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS_SINGLE)
 
-    lr.make_light_ray(seed=1234567,
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_equivalent_width_conserved():
+    """
+    This tests that the equivalent width of the optical depth is conserved 
+    regardless of the bin width employed in wavelength space.
+    Unresolved lines should still deposit optical depth into the spectrum.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wave = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    lambda_min= 1200
+    lambda_max= 1300
+    lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
+    total_tau = []
+
+    for lambda_bin_width in lambda_bin_widths:
+        n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
+        sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, 
+                                n_lambda=n_lambda)
+        sp.add_line(my_label, field, wave, f_value, gamma, mass)
+        wavelength, flux = sp.make_spectrum('lightray.h5')
+        total_tau.append((lambda_bin_width * sp.tau_field).sum())
+        
+    # assure that the total tau values are all within 1e-5 of each other
+    for tau in total_tau:
+        assert_almost_equal(tau, total_tau[0], 5)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_module("astropy")
+def test_absorption_spectrum_fits():
+    """
+    This test generates an absorption spectrum and saves it as a fits file.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -64,12 +64,12 @@
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -168,9 +168,9 @@
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
-            if len(self.light_ray_solution) > 1:
-                raise RuntimeError("LightRay Error: cannot specify start_position " + \
-                                   "if light ray uses more than one dataset.")
+            if self.near_redshift is not None or self.far_redshift is not None:
+                raise RuntimeError("LightRay Error: cannot specify both " + \
+                                   "start_position and a redshift range.")
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -30,10 +30,10 @@
     get_rotation_matrix, \
     periodic_dist
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2, \
     TINY
-from yt.utilities.physical_ratios import \
-    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,9 +14,7 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.utilities.answer_testing.framework import \
-    requires_module
-from yt.testing import requires_file
+from yt.testing import requires_file, requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -1,9 +1,8 @@
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, XSpecThermalModel
-import numpy as np
 from yt.testing import requires_module, fake_random_ds
 from yt.utilities.answer_testing.framework import \
-    GenericArrayTest, data_dir_load
+    GenericArrayTest
 from yt.config import ytcfg
 
 def setup():

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.units.yt_array import YTQuantity
-from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.visualization.volume_rendering.api import off_axis_projection
+from yt.funcs import fix_axis, mylog, get_pbar
+from yt.visualization.volume_rendering.off_axis_projection import \
+    off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only
+    communication_system, parallel_root_only
 from yt import units
 from yt.utilities.on_demand_imports import _astropy
 

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -12,8 +12,17 @@
 
 from yt.frontends.stream.api import load_uniform_grid
 from yt.funcs import get_pbar
-from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.utilities.physical_ratios import \
+    cm_per_kpc, \
+    K_per_keV, \
+    cm_per_km
+from yt.utilities.physical_constants import \
+    mh, \
+    kboltz, \
+    Tcmb, \
+    hcgs, \
+    clight, \
+    sigma_thompson
 from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load, GenericImageTest

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -26,7 +26,9 @@
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, inspect, time
+import math
+import inspect
+import time
 from collections import defaultdict
 
 sep = 12

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,7 +16,6 @@
 #-----------------------------------------------------------------------------
 
 import os
-import types
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,16 +13,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os, os.path, types
+import os
 
 # Named imports
 from yt.extern.six import string_types
-from yt.funcs import *
 from yt.config import ytcfg
+from yt.funcs import mylog
 from yt.utilities.parameter_file_storage import \
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTSimulationNotIdentified
 from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -15,7 +15,6 @@
 
 import inspect
 
-from yt.funcs import *
 from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
@@ -23,7 +22,7 @@
 class RegisteredTask(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
-        if hasattr(cls, "skip") and cls.skip == False:
+        if hasattr(cls, "skip") and cls.skip is False:
             return
         analysis_task_registry[cls.__name__] = cls
 

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,6 @@
     particle_handler_registry
 
 from .profiles import \
-    YTEmptyProfileData, \
-    BinnedProfile, \
-    BinnedProfile1D, \
-    BinnedProfile2D, \
-    BinnedProfile3D, \
     create_profile, \
     Profile1D, \
     Profile2D, \

diff -r 08eaadd3894f9cb0d0249e78d3844ff8f3494819 -r 8ab9fd55e2d0f3abbbb4616ec04817b78befec73 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -15,21 +15,29 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math
-import weakref
-import itertools
-import shelve
 from functools import wraps
 import fileinput
 from re import finditer
+from tempfile import TemporaryFile
 import os
+import zipfile
 
 from yt.config import ytcfg
-from yt.funcs import *
-from yt.utilities.logger import ytLogger
-from .data_containers import \
-    YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, \
-    restore_field_information_state, YTFieldData
+from yt.data_objects.data_containers import \
+    YTSelectionContainer1D, \
+    YTSelectionContainer2D, \
+    YTSelectionContainer3D, \
+    YTFieldData
+from yt.funcs import \
+    ensure_list, \
+    mylog, \
+    get_memory_usage, \
+    iterable, \
+    only_on_root
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented, \
+    YTNoAPIKey, \
+    YTTooManyVertices
 from yt.utilities.lib.QuadTree import \
     QuadTree
 from yt.utilities.lib.Interpolators import \
@@ -38,8 +46,6 @@
     fill_region
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.data_point_utilities import CombineGrids,\
-    DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -47,16 +53,10 @@
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.fields.field_exceptions import \
+    NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
 
-from yt.fields.field_exceptions import \
-    NeedsGridType,\
-    NeedsOriginalGrid,\
-    NeedsDataField,\
-    NeedsProperty,\
-    NeedsParameter
-from yt.fields.derived_field import \
-    TranslationFunc
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -369,14 +369,13 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        fd = data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
-        for i in list(data.keys()): self[i] = data.pop(i)
+        for i in list(data.keys()):
+            self[i] = data.pop(i)
         mylog.info("Projection completed")
         self.tree = tree
 
@@ -939,7 +938,6 @@
         ls.current_level += 1
         ls.current_dx = ls.base_dx / \
             self.ds.relative_refinement(0, ls.current_level)
-        LL = ls.left_edge - ls.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)
@@ -1509,11 +1507,8 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        import io
-        from sys import version
         if plot_index is None:
             plot_index = 0
-            vmax=0
         ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         #(0) formulate vertices
@@ -1552,7 +1547,7 @@
                 tmp = self.vertices[i,:]
                 np.divide(tmp, dist_fac, tmp)
                 v[ax][:] = tmp
-        return  v, lut, transparency, emiss, f['cind']
+        return v, lut, transparency, emiss, f['cind']
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
@@ -1734,8 +1729,6 @@
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
-        import zipfile, json
-        from tempfile import TemporaryFile
 
         ply_file = TemporaryFile()
         self.export_ply(ply_file, bounds, color_field, color_map, color_log,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/129722a2fc4c/
Changeset:   129722a2fc4c
Branch:      yt
User:        xarthisius
Date:        2015-11-23 22:24:53+00:00
Summary:     Merged in jzuhone/yt (pull request #1872)

Changing photon_simulator's output file structure
Affected #:  6 files

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -627,3 +627,31 @@
 .. image:: _images/ds9_sloshing.png
 
 .. image:: _images/ds9_bubbles.png
+
+In November 2015, the structure of the photon and event HDF5 files changed. To 
+convert an old-format file to the new format, use the ``convert_old_file`` utility:
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import convert_old_file
+   convert_old_file("old_photons.h5", "new_photons.h5", clobber=True)
+   convert_old_file("old_events.h5", "new_events.h5", clobber=True)
+
+This utility will auto-detect the kind of file (photons or events) and will write 
+the correct replacement for the new version.
+
+At times it may be convenient to write several ``EventLists`` to disk to be merged 
+together later. This can be achieved with the ``merge_files`` utility. It takes a 
+list of 
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import merge_files
+   merge_files(["events_0.h5", "events_1.h5", "events_2.h5"], "merged_events.h5",
+                add_exposure_times=True, clobber=False)
+
+At the current time this utility is very limited, as it only allows merging of 
+``EventLists`` which have the same parameters, with the exception of the exposure
+time. If the ``add_exposure_times`` argument to ``merge_files`` is set to ``True``, 
+the lists will be merged together with the exposure times added. Otherwise, the 
+exposure times of the different files must be equal. 

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -16,7 +16,9 @@
 
 from .photon_simulator import \
      PhotonList, \
-     EventList
+     EventList, \
+     merge_files, \
+     convert_old_file
 
 from .spectral_models import \
      SpectralModel, \

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -52,14 +52,13 @@
 
 class ThermalPhotonModel(PhotonModel):
     r"""
-    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    Initialize a ThermalPhotonModel from a thermal spectrum.
 
     Parameters
     ----------
-
     spectral_model : `SpectralModel`
         A thermal spectral model instance, either of `XSpecThermalModel`
-        or `TableApecModel`. 
+        or `TableApecModel`.
     X_H : float, optional
         The hydrogen mass fraction.
     Zmet : float or string, optional

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -25,6 +25,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
+from collections import defaultdict
 import numpy as np
 from yt.funcs import mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
@@ -38,6 +39,7 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.utilities.on_demand_imports import _astropy
 import warnings
+import os
 
 comm = communication_system.communicators[-1]
 
@@ -53,6 +55,25 @@
     else:
         return YTQuantity(value, default_units)
 
+def validate_parameters(first, second, skip=[]):
+    keys1 = list(first.keys())
+    keys2 = list(first.keys())
+    keys1.sort()
+    keys2.sort()
+    if keys1 != keys2:
+        raise RuntimeError("The two inputs do not have the same parameters!")
+    for k1, k2 in zip(keys1, keys2):
+        if k1 not in skip:
+            v1 = first[k1]
+            v2 = second[k2]
+            if isinstance(v1, string_types) or isinstance(v2, string_types):
+                check_equal = v1 == v2
+            else:
+                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
+            if not check_equal:
+                raise RuntimeError("The values for the parameter '%s' in the two inputs" % k1 +
+                                   " are not identical (%s vs. %s)!" % (v1, v2))
+
 class PhotonList(object):
 
     def __init__(self, photons, parameters, cosmo, p_bins):
@@ -107,29 +128,32 @@
 
         f = h5py.File(filename, "r")
 
-        parameters["FiducialExposureTime"] = YTQuantity(f["/fid_exp_time"].value, "s")
-        parameters["FiducialArea"] = YTQuantity(f["/fid_area"].value, "cm**2")
-        parameters["FiducialRedshift"] = f["/fid_redshift"].value
-        parameters["FiducialAngularDiameterDistance"] = YTQuantity(f["/fid_d_a"].value, "Mpc")
-        parameters["Dimension"] = f["/dimension"].value
-        parameters["Width"] = YTQuantity(f["/width"].value, "kpc")
-        parameters["HubbleConstant"] = f["/hubble"].value
-        parameters["OmegaMatter"] = f["/omega_matter"].value
-        parameters["OmegaLambda"] = f["/omega_lambda"].value
+        p = f["/parameters"]
+        parameters["FiducialExposureTime"] = YTQuantity(p["fid_exp_time"].value, "s")
+        parameters["FiducialArea"] = YTQuantity(p["fid_area"].value, "cm**2")
+        parameters["FiducialRedshift"] = p["fid_redshift"].value
+        parameters["FiducialAngularDiameterDistance"] = YTQuantity(p["fid_d_a"].value, "Mpc")
+        parameters["Dimension"] = p["dimension"].value
+        parameters["Width"] = YTQuantity(p["width"].value, "kpc")
+        parameters["HubbleConstant"] = p["hubble"].value
+        parameters["OmegaMatter"] = p["omega_matter"].value
+        parameters["OmegaLambda"] = p["omega_lambda"].value
 
-        num_cells = f["/x"][:].shape[0]
+        d = f["/data"]
+
+        num_cells = d["x"][:].shape[0]
         start_c = comm.rank*num_cells//comm.size
         end_c = (comm.rank+1)*num_cells//comm.size
 
-        photons["x"] = YTArray(f["/x"][start_c:end_c], "kpc")
-        photons["y"] = YTArray(f["/y"][start_c:end_c], "kpc")
-        photons["z"] = YTArray(f["/z"][start_c:end_c], "kpc")
-        photons["dx"] = YTArray(f["/dx"][start_c:end_c], "kpc")
-        photons["vx"] = YTArray(f["/vx"][start_c:end_c], "km/s")
-        photons["vy"] = YTArray(f["/vy"][start_c:end_c], "km/s")
-        photons["vz"] = YTArray(f["/vz"][start_c:end_c], "km/s")
+        photons["x"] = YTArray(d["x"][start_c:end_c], "kpc")
+        photons["y"] = YTArray(d["y"][start_c:end_c], "kpc")
+        photons["z"] = YTArray(d["z"][start_c:end_c], "kpc")
+        photons["dx"] = YTArray(d["dx"][start_c:end_c], "kpc")
+        photons["vx"] = YTArray(d["vx"][start_c:end_c], "km/s")
+        photons["vy"] = YTArray(d["vy"][start_c:end_c], "km/s")
+        photons["vz"] = YTArray(d["vz"][start_c:end_c], "km/s")
 
-        n_ph = f["/num_photons"][:]
+        n_ph = d["num_photons"][:]
 
         if comm.rank == 0:
             start_e = np.uint64(0)
@@ -142,7 +166,7 @@
         p_bins = np.cumsum(photons["NumberOfPhotons"])
         p_bins = np.insert(p_bins, 0, [np.uint64(0)])
 
-        photons["Energy"] = YTArray(f["/energy"][start_e:end_e], "keV")
+        photons["Energy"] = YTArray(d["energy"][start_e:end_e], "keV")
 
         f.close()
 
@@ -163,7 +187,6 @@
 
         Parameters
         ----------
-
         data_source : `yt.data_objects.data_containers.YTSelectionContainer`
             The data source from which the photons will be generated.
         redshift : float
@@ -177,9 +200,9 @@
             dictionary and returns a *photons* dictionary. Must be of the
             form: photon_model(data_source, parameters)
         parameters : dict, optional
-            A dictionary of parameters to be passed to the user function. 
+            A dictionary of parameters to be passed to the user function.
         center : string or array_like, optional
-            The origin of the photons. Accepts "c", "max", or a coordinate.                
+            The origin of the photons. Accepts "c", "max", or a coordinate.
         dist : tuple, optional
             The angular diameter distance in the form (value, unit), used
             mainly for nearby sources. This may be optionally supplied
@@ -191,7 +214,6 @@
 
         Examples
         --------
-
         This is the simplest possible example, where we call the built-in thermal model:
 
         >>> thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
@@ -227,7 +249,7 @@
 
         >>> import numpy as np
         >>> import yt
-        >>> from yt.analysis_modules.photon_simulator import *
+        >>> from yt.analysis_modules.photon_simulator.api import PhotonList
         >>> def line_func(source, parameters):
         ...
         ...     ds = source.ds
@@ -257,7 +279,7 @@
         >>> ddims = (128,128,128)
         >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")}
         >>> ds = yt.load_uniform_grid(random_data, ddims)
-        >>> dd = ds.all_data
+        >>> dd = ds.all_data()
         >>> my_photons = PhotonList.from_user_model(dd, redshift, area,
         ...                                         time, line_func,
         ...                                         parameters=parameters)
@@ -357,7 +379,7 @@
 
             if comm.rank == 0:
                 num_cells = sum(sizes_c)
-                num_photons = sum(sizes_p)        
+                num_photons = sum(sizes_p)
                 disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))]
                 disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))]
                 x = np.zeros(num_cells)
@@ -401,7 +423,7 @@
             comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long],
                               [n_ph, (sizes_c, disps_c), mpi_long], root=0)
             comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double],
-                              [e, (sizes_p, disps_p), mpi_double], root=0) 
+                              [e, (sizes_p, disps_p), mpi_double], root=0)
 
         else:
 
@@ -419,35 +441,37 @@
 
             f = h5py.File(photonfile, "w")
 
-            # Scalars
+            # Parameters
 
-            f.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
-            f.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
-            f.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
-            f.create_dataset("hubble", data=self.parameters["HubbleConstant"])
-            f.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
-            f.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
-            f.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
-            f.create_dataset("dimension", data=self.parameters["Dimension"])
-            f.create_dataset("width", data=float(self.parameters["Width"]))
+            p = f.create_group("parameters")
+            p.create_dataset("fid_area", data=float(self.parameters["FiducialArea"]))
+            p.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"]))
+            p.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"])
+            p.create_dataset("hubble", data=self.parameters["HubbleConstant"])
+            p.create_dataset("omega_matter", data=self.parameters["OmegaMatter"])
+            p.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"])
+            p.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"]))
+            p.create_dataset("dimension", data=self.parameters["Dimension"])
+            p.create_dataset("width", data=float(self.parameters["Width"]))
 
-            # Arrays
+            # Data
 
-            f.create_dataset("x", data=x)
-            f.create_dataset("y", data=y)
-            f.create_dataset("z", data=z)
-            f.create_dataset("vx", data=vx)
-            f.create_dataset("vy", data=vy)
-            f.create_dataset("vz", data=vz)
-            f.create_dataset("dx", data=dx)
-            f.create_dataset("num_photons", data=n_ph)
-            f.create_dataset("energy", data=e)
+            d = f.create_group("data")
+            d.create_dataset("x", data=x)
+            d.create_dataset("y", data=y)
+            d.create_dataset("z", data=z)
+            d.create_dataset("vx", data=vx)
+            d.create_dataset("vy", data=vy)
+            d.create_dataset("vz", data=vz)
+            d.create_dataset("dx", data=dx)
+            d.create_dataset("num_photons", data=n_ph)
+            d.create_dataset("energy", data=e)
 
             f.close()
 
         comm.barrier()
 
-    def project_photons(self, normal, area_new=None, exp_time_new=None, 
+    def project_photons(self, normal, area_new=None, exp_time_new=None,
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
@@ -458,7 +482,6 @@
 
         Parameters
         ----------
-
         normal : character or array_like
             Normal vector to the plane of projection. If "x", "y", or "z", will
             assume to be along that axis (and will probably be faster). Otherwise, 
@@ -814,7 +837,7 @@
 
         return events, info
 
-class EventList(object) :
+class EventList(object):
 
     def __init__(self, events, parameters):
         self.events = events
@@ -855,22 +878,7 @@
 
     def __add__(self, other):
         assert_same_wcs(self.wcs, other.wcs)
-        keys1 = list(self.parameters.keys())
-        keys2 = list(other.parameters.keys())
-        keys1.sort()
-        keys2.sort()
-        if keys1 != keys2:
-            raise RuntimeError("The two EventLists do not have the same parameters!")
-        for k1, k2 in zip(keys1, keys2):
-            v1 = self.parameters[k1]
-            v2 = other.parameters[k2]
-            if isinstance(v1, string_types) or isinstance(v2, string_types):
-                check_equal = v1 == v2
-            else:
-                check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10)
-            if not check_equal:
-                raise RuntimeError("The values for the parameter '%s' in the two EventLists" % k1 +
-                                   " are not identical (%s vs. %s)!" % (v1, v2))
+        validate_parameters(self.parameters, other.parameters)
         events = {}
         for item1, item2 in zip(self.items(), other.items()):
             k1, v1 = item1
@@ -880,7 +888,7 @@
 
     def filter_events(self, region):
         """
-        Filter events using a ds9 region. Requires the pyregion package.
+        Filter events using a ds9 *region*. Requires the pyregion package.
         Returns a new EventList.
         """
         import pyregion
@@ -909,36 +917,38 @@
 
         f = h5py.File(h5file, "r")
 
-        parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
-        if isinstance(f["/area"].value, (string_types, bytes)):
-            parameters["Area"] = f["/area"].value.decode("utf8")
+        p = f["/parameters"]
+        parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
+        if isinstance(p["area"].value, (string_types, bytes)):
+            parameters["Area"] = p["area"].value.decode("utf8")
         else:
-            parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
-        parameters["Redshift"] = f["/redshift"].value
-        parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
-        if "rmf" in f:
-            parameters["RMF"] = f["/rmf"].value.decode("utf8")
-        if "arf" in f:
-            parameters["ARF"] = f["/arf"].value.decode("utf8")
-        if "channel_type" in f:
-            parameters["ChannelType"] = f["/channel_type"].value.decode("utf8")
-        if "mission" in f:
-            parameters["Mission"] = f["/mission"].value.decode("utf8")
-        if "telescope" in f:
-            parameters["Telescope"] = f["/telescope"].value.decode("utf8")
-        if "instrument" in f:
-            parameters["Instrument"] = f["/instrument"].value.decode("utf8")
+            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+        parameters["Redshift"] = p["redshift"].value
+        parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
+        parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
+        parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
+        parameters["pix_center"] = p["pix_center"][:]
+        if "rmf" in p:
+            parameters["RMF"] = p["rmf"].value.decode("utf8")
+        if "arf" in p:
+            parameters["ARF"] = p["arf"].value.decode("utf8")
+        if "channel_type" in p:
+            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+        if "mission" in p:
+            parameters["Mission"] = p["mission"].value.decode("utf8")
+        if "telescope" in p:
+            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+        if "instrument" in p:
+            parameters["Instrument"] = p["instrument"].value.decode("utf8")
 
-        events["xpix"] = f["/xpix"][:]
-        events["ypix"] = f["/ypix"][:]
-        events["eobs"] = YTArray(f["/eobs"][:], "keV")
-        if "pi" in f:
-            events["PI"] = f["/pi"][:]
-        if "pha" in f:
-            events["PHA"] = f["/pha"][:]
-        parameters["sky_center"] = YTArray(f["/sky_center"][:], "deg")
-        parameters["dtheta"] = YTQuantity(f["/dtheta"].value, "deg")
-        parameters["pix_center"] = f["/pix_center"][:]
+        d = f["/data"]
+        events["xpix"] = d["xpix"][:]
+        events["ypix"] = d["ypix"][:]
+        events["eobs"] = YTArray(d["eobs"][:], "keV")
+        if "pi" in d:
+            events["PI"] = d["pi"][:]
+        if "pha" in d:
+            events["PHA"] = d["pha"][:]
 
         f.close()
 
@@ -1031,10 +1041,10 @@
 
              time = np.random.uniform(size=self.num_events, low=0.0,
                                       high=float(self.parameters["ExposureTime"]))
-             col_t = pyfits.Column(name="TIME", format='1D', unit='s', 
+             col_t = pyfits.Column(name="TIME", format='1D', unit='s',
                                    array=time)
              cols.append(col_t)
-        
+
         coldefs = pyfits.ColDefs(cols)
         tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
@@ -1123,7 +1133,6 @@
 
         Parameters
         ----------
-
         prefix : string
             The filename prefix.
         clobber : boolean, optional
@@ -1207,38 +1216,40 @@
         """
         f = h5py.File(h5file, "w")
 
-        f.create_dataset("/exp_time", data=float(self.parameters["ExposureTime"]))
+        p = f.create_group("parameters")
+        p.create_dataset("exp_time", data=float(self.parameters["ExposureTime"]))
         area = self.parameters["Area"]
         if not isinstance(area, string_types):
             area = float(area)
-        f.create_dataset("/area", data=area)
-        f.create_dataset("/redshift", data=self.parameters["Redshift"])
-        f.create_dataset("/d_a", data=float(self.parameters["AngularDiameterDistance"]))
+        p.create_dataset("area", data=area)
+        p.create_dataset("redshift", data=self.parameters["Redshift"])
+        p.create_dataset("d_a", data=float(self.parameters["AngularDiameterDistance"]))
         if "ARF" in self.parameters:
-            f.create_dataset("/arf", data=self.parameters["ARF"])
+            p.create_dataset("arf", data=self.parameters["ARF"])
         if "RMF" in self.parameters:
-            f.create_dataset("/rmf", data=self.parameters["RMF"])
+            p.create_dataset("rmf", data=self.parameters["RMF"])
         if "ChannelType" in self.parameters:
-            f.create_dataset("/channel_type", data=self.parameters["ChannelType"])
+            p.create_dataset("channel_type", data=self.parameters["ChannelType"])
         if "Mission" in self.parameters:
-            f.create_dataset("/mission", data=self.parameters["Mission"]) 
+            p.create_dataset("mission", data=self.parameters["Mission"])
         if "Telescope" in self.parameters:
-            f.create_dataset("/telescope", data=self.parameters["Telescope"])
+            p.create_dataset("telescope", data=self.parameters["Telescope"])
         if "Instrument" in self.parameters:
-            f.create_dataset("/instrument", data=self.parameters["Instrument"])
+            p.create_dataset("instrument", data=self.parameters["Instrument"])
+        p.create_dataset("sky_center", data=self.parameters["sky_center"].d)
+        p.create_dataset("pix_center", data=self.parameters["pix_center"])
+        p.create_dataset("dtheta", data=float(self.parameters["dtheta"]))
 
-        f.create_dataset("/xpix", data=self["xpix"])
-        f.create_dataset("/ypix", data=self["ypix"])
-        f.create_dataset("/xsky", data=self["xsky"].d)
-        f.create_dataset("/ysky", data=self["ysky"].d)
-        f.create_dataset("/eobs", data=self["eobs"].d)
-        if "PI" in self:
-            f.create_dataset("/pi", data=self["PI"])
-        if "PHA" in self:
-            f.create_dataset("/pha", data=self["PHA"])                  
-        f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
-        f.create_dataset("/pix_center", data=self.parameters["pix_center"])
-        f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
+        d = f.create_group("data")
+        d.create_dataset("xpix", data=self["xpix"])
+        d.create_dataset("ypix", data=self["ypix"])
+        d.create_dataset("xsky", data=self["xsky"].d)
+        d.create_dataset("ysky", data=self["ysky"].d)
+        d.create_dataset("eobs", data=self["eobs"].d)
+        if "PI" in self.events:
+            d.create_dataset("pi", data=self.events["PI"])
+        if "PHA" in self.events:
+            d.create_dataset("pha", data=self.events["PHA"])
 
         f.close()
 
@@ -1250,7 +1261,6 @@
 
         Parameters
         ----------
-
         imagefile : string
             The name of the image file to write.
         clobber : boolean, optional
@@ -1309,7 +1319,6 @@
 
         Parameters
         ----------
-
         specfile : string
             The name of the FITS file to be written.
         bin_type : string, optional
@@ -1417,3 +1426,130 @@
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
 
         hdulist.writeto(specfile, clobber=clobber)
+
+def merge_files(input_files, output_file, clobber=False,
+                add_exposure_times=False):
+    r"""
+    Helper function for merging PhotonList or EventList HDF5 files.
+
+    Parameters
+    ----------
+    input_files : list of strings
+        List of filenames that will be merged together.
+    output_file : string
+        Name of the merged file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+    add_exposure_times : boolean, default False
+        If set to True, exposure times will be added together. Otherwise,
+        the exposure times of all of the files must be the same.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import merge_files
+    >>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
+    ...             clobber=True, add_exposure_times=True)
+
+    Notes
+    -----
+    Currently, to merge files it is mandated that all of the parameters have the
+    same values, with the possible exception of the exposure time parameter "exp_time"
+    if add_exposure_times=False.
+    """
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, set clobber=True.")
+
+    f_in = h5py.File(input_files[0], "r")
+    f_out = h5py.File(output_file, "w")
+
+    exp_time_key = ""
+    p_out = f_out.create_group("parameters")
+    for key, param in f_in["parameters"].items():
+        if key.endswith("exp_time"):
+            exp_time_key = key
+        else:
+            p_out[key] = param.value
+
+    skip = [exp_time_key] if add_exposure_times else []
+    for fn in input_files[1:]:
+        f = h5py.File(fn, "r")
+        validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
+        f.close()
+
+    f_in.close()
+
+    data = defaultdict(list)
+    tot_exp_time = 0.0
+
+    for i, fn in enumerate(input_files):
+        f = h5py.File(fn, "r")
+        if add_exposure_times:
+            tot_exp_time += f["/parameters"][exp_time_key].value
+        elif i == 0:
+            tot_exp_time = f["/parameters"][exp_time_key].value
+        for key in f["/data"]:
+            data[key].append(f["/data"][key][:])
+        f.close()
+
+    p_out["exp_time"] = tot_exp_time
+
+    d = f_out.create_group("data")
+    for k in data:
+        d.create_dataset(k, data=np.concatenate(data[k]))
+
+    f_out.close()
+
+def convert_old_file(input_file, output_file, clobber=False):
+    r"""
+    Helper function for converting old PhotonList or EventList HDF5
+    files (pre yt v3.3) to their new versions.
+
+    Parameters
+    ----------
+    input_file : list of strings
+        The filename of the old-versioned file to be converted.
+    output_file : string
+        Name of the new file to be outputted.
+    clobber : boolean, default False
+        If a the output file already exists, set this to True to
+        overwrite it.
+
+    Examples
+    --------
+    >>> from yt.analysis_modules.photon_simulator.api import convert_old_file
+    >>> convert_old_file("photons_old.h5", "photons_new.h5", clobber=True)
+    """
+    if os.path.exists(output_file) and not clobber:
+        raise IOError("Cannot overwrite existing file %s. " % output_file +
+                      "If you want to do this, set clobber=True.")
+
+    f_in = h5py.File(input_file, "r")
+
+    if "num_photons" in f_in:
+        params = ["fid_exp_time", "fid_area", "fid_d_a", "fid_redshift",
+                  "dimension", "width", "hubble", "omega_matter",
+                  "omega_lambda"]
+        data = ["x", "y", "z", "dx", "vx", "vy", "vz", "energy", "num_photons"]
+    elif "pix_center" in f_in:
+        params = ["exp_time", "area", "redshift", "d_a", "arf",
+                  "rmf", "channel_type", "mission", "telescope",
+                  "instrument", "sky_center", "pix_center", "dtheta"]
+        data = ["xsky", "ysky", "xpix", "ypix", "eobs", "pi", "pha"]
+
+    f_out = h5py.File(output_file, "w")
+
+    p = f_out.create_group("parameters")
+    d = f_out.create_group("data")
+
+    for key in params:
+        if key in f_in:
+            p.create_dataset(key, data=f_in[key].value)
+
+    for key in data:
+        if key in f_in:
+            d.create_dataset(key, data=f_in[key].value)
+
+    f_in.close()
+    f_out.close()
\ No newline at end of file

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -51,7 +51,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the thermal emission model.
     emin : float
@@ -123,7 +122,6 @@
 
     Parameters
     ----------
-
     model_name : string
         The name of the absorption model.
     nH : float
@@ -180,11 +178,10 @@
     Initialize a thermal gas emission model from the AtomDB APEC tables
     available at http://www.atomdb.org. This code borrows heavily from Python
     routines used to read the APEC tables developed by Adam Foster at the
-    CfA (afoster at cfa.harvard.edu). 
+    CfA (afoster at cfa.harvard.edu).
 
     Parameters
     ----------
-
     apec_root : string
         The directory root where the APEC model files are stored.
     emin : float
@@ -323,7 +320,6 @@
 
     Parameters
     ----------
-
     filename : string
         The name of the table file.
     nH : float
@@ -333,7 +329,6 @@
     --------
     >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1)
     """
-
     def __init__(self, filename, nH):
         if not os.path.exists(filename):
             raise IOError("File does not exist: %s." % filename)

diff -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 -r 129722a2fc4cfca129377785af88e615c4aec7a1 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -12,13 +12,18 @@
 
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
+    ThermalPhotonModel, PhotonList, EventList, \
+    convert_old_file, merge_files
 from yt.config import ytcfg
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
+from numpy.testing import assert_array_equal
 from numpy.random import RandomState
+from yt.units.yt_array import uconcatenate
 import os
+import tempfile
+import shutil
 
 def setup():
     from yt.config import ytcfg
@@ -34,9 +39,11 @@
         "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
         "sxt-s_120210_ts02um_intallpxl.arf"]
 
-gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
 APEC = xray_data_dir
-TBABS = xray_data_dir+"/tbabs_table.h5"
+TBABS = os.path.join(xray_data_dir, "tbabs_table.h5")
+old_photon_file = os.path.join(xray_data_dir, "old_photons.h5")
+old_event_file = os.path.join(xray_data_dir, "old_events.h5")
 
 def return_data(data):
     def _return_data(name):
@@ -46,8 +53,14 @@
 @requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
+ at requires_file(old_photon_file)
+ at requires_file(old_event_file)
 def test_sloshing():
 
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
     prng = RandomState(0x4d3d3d3)
 
     ds = data_dir_load(gslr)
@@ -61,25 +74,72 @@
     sphere = ds.sphere("c", (0.1, "Mpc"))
 
     thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
+    photons1 = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                       thermal_model)
 
-    return_photons = return_data(photons.photons)
+    return_photons = return_data(photons1.photons)
 
-    tests = []
-    tests.append(GenericArrayTest(ds, return_photons, args=["photons"]))
+    tests = [GenericArrayTest(ds, return_photons, args=["photons"])]
 
     for a, r in zip(arfs, rmfs):
-        arf = os.path.join(xray_data_dir,a)
-        rmf = os.path.join(xray_data_dir,r)
-        events = photons.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
-                                         absorb_model=tbabs_model, 
-                                         convolve_energies=True, prng=prng)
+        arf = os.path.join(xray_data_dir, a)
+        rmf = os.path.join(xray_data_dir, r)
+        events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
+                                          absorb_model=tbabs_model, 
+                                          convolve_energies=True, prng=prng)
 
-        return_events = return_data(events.events)
+        return_events = return_data(events1.events)
 
         tests.append(GenericArrayTest(ds, return_events, args=[a]))
 
     for test in tests:
         test_sloshing.__name__ = test.description
         yield test
+
+    photons1.write_h5_file("test_photons.h5")
+    events1.write_h5_file("test_events.h5")
+
+    photons2 = PhotonList.from_file("test_photons.h5")
+    events2 = EventList.from_h5_file("test_events.h5")
+
+    convert_old_file(old_photon_file, "converted_photons.h5")
+    convert_old_file(old_event_file, "converted_events.h5")
+
+    photons3 = PhotonList.from_file("converted_photons.h5")
+    events3 = EventList.from_h5_file("converted_events.h5")
+
+    for k in photons1.keys():
+        if k == "Energy":
+            arr1 = uconcatenate(photons1[k])
+            arr2 = uconcatenate(photons2[k])
+            arr3 = uconcatenate(photons3[k])
+        else:
+            arr1 = photons1[k]
+            arr2 = photons2[k]
+            arr3 = photons3[k]
+        yield assert_array_equal, arr1, arr2
+        yield assert_array_equal, arr1, arr3
+    for k in events1.keys():
+        yield assert_array_equal, events1[k], events2[k]
+        yield assert_array_equal, events1[k], events3[k]
+
+    nevents = 0
+
+    for i in range(4):
+        events = photons1.project_photons([1.0,-0.5,0.2],
+                                         exp_time_new=0.25*exp_time,
+                                         absorb_model=tbabs_model,
+                                         prng=prng)
+        events.write_h5_file("split_events_%d.h5" % i)
+        nevents += len(events["xsky"])
+
+    merge_files(["split_events_%d.h5" % i for i in range(4)],
+                "merged_events.h5", add_exposure_times=True,
+                clobber=True)
+
+    merged_events = EventList.from_h5_file("merged_events.h5")
+    assert len(merged_events["xsky"]) == nevents
+    assert merged_events.parameters["ExposureTime"] == exp_time
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list