[yt-svn] commit/yt: 21 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Nov 10 11:50:01 PST 2015


21 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/164b32c62cae/
Changeset:   164b32c62cae
Branch:      yt
User:        jzuhone
Date:        2015-10-12 05:18:45+00:00
Summary:     Cleaning this up, updating the names of things
Affected #:  2 files

diff -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 -r 164b32c62caebda55c644300e526bad7140a4919 yt/analysis_modules/photon_simulator/tests/test_cluster.py
--- a/yt/analysis_modules/photon_simulator/tests/test_cluster.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Answer test the photon_simulator analysis module.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.analysis_modules.photon_simulator.api import \
-    TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
-from yt.config import ytcfg
-from yt.testing import requires_file
-from yt.utilities.answer_testing.framework import requires_ds, \
-    GenericArrayTest, data_dir_load
-import numpy as np
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt", "__withintesting"] = "True"
-
-test_dir = ytcfg.get("yt", "test_data_dir")
-
-ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-APEC = test_dir+"/xray_data/atomdb_v2.0.2"
-TBABS = test_dir+"/xray_data/tbabs_table.h5"
-ARF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_arf.fits"
-RMF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_rmf.fits"
-
- at requires_ds(ETC)
- at requires_file(APEC)
- at requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
-
-    np.random.seed(seed=0x4d3d3d3)
-
-    ds = data_dir_load(ETC)
-    A = 3000.
-    exp_time = 1.0e5
-    redshift = 0.1
-
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
-    tbabs_model = TableAbsorbModel(TBABS, 0.1)
-
-    sphere = ds.sphere("max", (0.5, "mpc"))
-
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
-
-    events = photons.project_photons([0.0,0.0,1.0],
-                                     responses=[ARF,RMF],
-                                     absorb_model=tbabs_model)
-
-    def photons_test(): return photons.photons
-    def events_test(): return events.events
-
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test

diff -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 -r 164b32c62caebda55c644300e526bad7140a4919 yt/analysis_modules/photon_simulator/tests/test_etc.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_etc.py
@@ -0,0 +1,68 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, TableAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.testing import requires_file
+from yt.utilities.answer_testing.framework import requires_ds, \
+    GenericArrayTest, data_dir_load
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_dir = ytcfg.get("yt", "test_data_dir")
+
+ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
+APEC = test_dir+"/xray_data/atomdb_v2.0.2"
+TBABS = test_dir+"/xray_data/tbabs_table.h5"
+ARF = test_dir+"/xray_data/aciss_aimpt_cy17.arf"
+RMF = test_dir+"/xray_data/aciss_aimpt_cy17.rmf"
+
+ at requires_ds(ETC)
+ at requires_file(APEC)
+ at requires_file(TBABS)
+ at requires_file(ARF)
+ at requires_file(RMF)
+def test_etc():
+
+    np.random.seed(seed=0x4d3d3d3)
+
+    ds = data_dir_load(ETC)
+    A = 3000.
+    exp_time = 1.0e5
+    redshift = 0.1
+
+    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
+    tbabs_model = TableAbsorbModel(TBABS, 0.1)
+
+    sphere = ds.sphere("max", (0.5, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    events = photons.project_photons("z", responses=[ARF,RMF],
+                                     absorb_model=tbabs_model)
+
+    def photons_test():
+        return photons.photons
+    def events_test():
+        return events.events
+
+    for test in [GenericArrayTest(ds, photons_test),
+                 GenericArrayTest(ds, events_test)]:
+        test_etc.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/229f1b1c448a/
Changeset:   229f1b1c448a
Branch:      yt
User:        jzuhone
Date:        2015-10-12 05:31:43+00:00
Summary:     Beginnings of a beta model unit test
Affected #:  1 file

diff -r 164b32c62caebda55c644300e526bad7140a4919 -r 229f1b1c448a4ef6ca362a0b4d1b9c93c756cee9 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -0,0 +1,88 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    XSpecThermalModel, XSpecAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.testing import requires_file
+from yt.utilities.answer_testing.framework import \
+    requires_module
+import numpy as np
+from yt.utilities.physical_ratios import \
+    K_per_keV, mass_hydrogen_grams
+from yt.frontends.stream.api import load_uniform_grid
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_dir = ytcfg.get("yt", "test_data_dir")
+
+ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
+ARF = test_dir+"/xray_data/sxt-s_120210_ts02um_intallpxl.arf"
+RMF = test_dir+"/xray_data/ah_sxs_5ev_basefilt_20100712.rmf"
+
+ at requires_module("xspec")
+ at requires_file(ARF)
+ at requires_file(RMF)
+def test_beta_model():
+
+    R = 1.0
+    r_c = 0.05
+    rho_c = 0.04*mass_hydrogen_grams
+    beta = 1.
+    T = 6.0
+    v_shift = 4.0e7
+    nx = 128
+
+    ddims = (nx,nx,nx)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:nx*1j,
+                       -R:R:nx*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
+    dens[r > R] = 0.0
+    temp = T*K_per_keV*np.ones(ddims)
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+    data = {}
+    data["density"] = (dens, "g/cm**3")
+    data["temperature"] = (temp, "K")
+    data["velocity_x"] = (np.zeros(ddims), "cm/s")
+    data["velocity_y"] = (np.zeros(ddims), "cm/s")
+    data["velocity_z"] = (v_shift*np.ones(ddims), "cm/s")
+
+    ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
+                           nprocs=64, bbox=bbox)
+
+    A = 3000.
+    exp_time = 1.0e5
+    redshift = 0.05
+
+    apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 40000,
+                                   thermal_broad=True)
+    abs_model = XSpecAbsorbModel("TBabs", 0.02)
+
+    sphere = ds.sphere("c", (0.5, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    events = photons.project_photons("z", responses=[ARF,RMF],
+                                     absorb_model=abs_model)
+


https://bitbucket.org/yt_analysis/yt/commits/e30cc5258f59/
Changeset:   e30cc5258f59
Branch:      yt
User:        jzuhone
Date:        2015-10-12 05:57:36+00:00
Summary:     We use this beta model test to check every response
Affected #:  1 file

diff -r 229f1b1c448a4ef6ca362a0b4d1b9c93c756cee9 -r e30cc5258f5999badeeacad46e7eaf8a228dd255 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,13 +14,15 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import \
     requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams
 from yt.frontends.stream.api import load_uniform_grid
+import os
+import tempfile
+import shutil
 
 def setup():
     from yt.config import ytcfg
@@ -28,14 +30,27 @@
 
 test_dir = ytcfg.get("yt", "test_data_dir")
 
-ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-ARF = test_dir+"/xray_data/sxt-s_120210_ts02um_intallpxl.arf"
-RMF = test_dir+"/xray_data/ah_sxs_5ev_basefilt_20100712.rmf"
+rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
+        "aciss_aimpt_cy17.rmf", "nustar.rmf",
+        "ah_sxs_5ev_basefilt_20100712.rmf"]
+arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
+        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
+        "sxt-s_120210_ts02um_intallpxl.arf"]
 
 @requires_module("xspec")
- at requires_file(ARF)
- at requires_file(RMF)
 def test_beta_model():
+    import xspec
+    
+    xspec.Fit.statMethod = "cstat"
+    xspec.Xset.addModelString("APECTHERMAL","yes")
+    xspec.Fit.query = "yes"
+    xspec.Fit.method = ["leven","10","0.01"]
+    xspec.Fit.delta = 0.01
+    xspec.Xset.chatter = 5
+
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
 
     R = 1.0
     r_c = 0.05
@@ -73,7 +88,7 @@
     exp_time = 1.0e5
     redshift = 0.05
 
-    apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 40000,
+    apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000,
                                    thermal_broad=True)
     abs_model = XSpecAbsorbModel("TBabs", 0.02)
 
@@ -83,6 +98,32 @@
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    events = photons.project_photons("z", responses=[ARF,RMF],
-                                     absorb_model=abs_model)
+    for a, r in zip(rmfs, arfs):
+        arf = test_dir+"/xray_data/"+a
+        rmf = test_dir+"/xray_data/"+r
+        events = photons.project_photons("z", responses=[arf,rmf],
+                                         absorb_model=abs_model)
+        events.write_spectrum("beta_model_evt.pi", clobber=True)
 
+        s = xspec.Spectrum("beta_model_evt.pi")
+        s.ignore("**-0.5")
+        s.ignore("7.0-**")
+        m = xspec.Model("tbabs*bapec")
+        m.bapec.kT = 5.0
+        m.bapec.Abundanc = 0.25
+        m.bapec.norm = 1.0
+        m.bapec.Redshift = 0.05
+        m.bapec.Velocity = 0.0
+        m.TBabs.nH = 0.015
+
+        m.bapec.Velocity.frozen = False
+        m.bapec.Abundanc.frozen = False
+        m.bapec.Redshift.frozen = False
+        m.TBabs.nH.frozen = False
+
+        xspec.Fit.renorm()
+        xspec.Fit.nIterations = 100
+        xspec.Fit.perform()
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/d3597f70a3bc/
Changeset:   d3597f70a3bc
Branch:      yt
User:        jzuhone
Date:        2015-10-20 16:09:37+00:00
Summary:     Add 'xray_data_dir' to config
Affected #:  1 file

diff -r e30cc5258f5999badeeacad46e7eaf8a228dd255 -r d3597f70a3bcffb42b7495baec566c06c0683f6a yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -60,7 +60,8 @@
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False',
-    chunk_size = '1000'
+    chunk_size = '1000',
+    xray_data_dir = '/does/not/exist',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


https://bitbucket.org/yt_analysis/yt/commits/bfeb1a24fc6a/
Changeset:   bfeb1a24fc6a
Branch:      yt
User:        jzuhone
Date:        2015-10-20 16:13:47+00:00
Summary:     More fixing up of tests
Affected #:  2 files

diff -r d3597f70a3bcffb42b7495baec566c06c0683f6a -r bfeb1a24fc6a4a1e7982d5b097eb3c39ebd1244e yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -28,7 +28,7 @@
     from yt.config import ytcfg
     ytcfg["yt", "__withintesting"] = "True"
 
-test_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
 rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
         "aciss_aimpt_cy17.rmf", "nustar.rmf",
@@ -99,8 +99,8 @@
                                       thermal_model)
 
     for a, r in zip(rmfs, arfs):
-        arf = test_dir+"/xray_data/"+a
-        rmf = test_dir+"/xray_data/"+r
+        arf = xray_data_dir+a
+        rmf = xray_data_dir+r
         events = photons.project_photons("z", responses=[arf,rmf],
                                          absorb_model=abs_model)
         events.write_spectrum("beta_model_evt.pi", clobber=True)
@@ -113,7 +113,7 @@
         m.bapec.Abundanc = 0.25
         m.bapec.norm = 1.0
         m.bapec.Redshift = 0.05
-        m.bapec.Velocity = 0.0
+        m.bapec.Velocity = 100.0
         m.TBabs.nH = 0.015
 
         m.bapec.Velocity.frozen = False
@@ -125,5 +125,11 @@
         xspec.Fit.nIterations = 100
         xspec.Fit.perform()
 
+        assert np.abs(m.bapec.Redshift.values[0]-v_shift) < 1.0
+        assert np.abs(m.bapec.kT.values[0]-6.0) < 1.0
+        assert np.abs(m.bapec.Abundanc.values[0]-0.3) < 1.0
+        assert np.abs(m.bapec.Velocity.values[0]-0.0) < 1.0
+        assert np.abs(m.TBabs.nH.values[0]-0.02) < 1.0
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r d3597f70a3bcffb42b7495baec566c06c0683f6a -r bfeb1a24fc6a4a1e7982d5b097eb3c39ebd1244e yt/analysis_modules/photon_simulator/tests/test_etc.py
--- a/yt/analysis_modules/photon_simulator/tests/test_etc.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_etc.py
@@ -23,13 +23,14 @@
     from yt.config import ytcfg
     ytcfg["yt", "__withintesting"] = "True"
 
-test_dir = ytcfg.get("yt", "test_data_dir")
+test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
-ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-APEC = test_dir+"/xray_data/atomdb_v2.0.2"
-TBABS = test_dir+"/xray_data/tbabs_table.h5"
-ARF = test_dir+"/xray_data/aciss_aimpt_cy17.arf"
-RMF = test_dir+"/xray_data/aciss_aimpt_cy17.rmf"
+ETC = test_data_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
+APEC = xray_data_dir+"/atomdb_v2.0.2"
+TBABS = xray_data_dir+"/tbabs_table.h5"
+ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
+RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
 
 @requires_ds(ETC)
 @requires_file(APEC)


https://bitbucket.org/yt_analysis/yt/commits/b207b3df1a4c/
Changeset:   b207b3df1a4c
Branch:      yt
User:        jzuhone
Date:        2015-10-20 16:15:52+00:00
Summary:     Merge
Affected #:  4 files

diff -r 5438e77cc0c94ab2038bbc78462911de445f6b2e -r b207b3df1a4ca218e2c9e1a4af621c6d0d954864 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -0,0 +1,135 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    XSpecThermalModel, XSpecAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import \
+    requires_module
+import numpy as np
+from yt.utilities.physical_ratios import \
+    K_per_keV, mass_hydrogen_grams
+from yt.frontends.stream.api import load_uniform_grid
+import os
+import tempfile
+import shutil
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
+
+rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
+        "aciss_aimpt_cy17.rmf", "nustar.rmf",
+        "ah_sxs_5ev_basefilt_20100712.rmf"]
+arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
+        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
+        "sxt-s_120210_ts02um_intallpxl.arf"]
+
+ at requires_module("xspec")
+def test_beta_model():
+    import xspec
+    
+    xspec.Fit.statMethod = "cstat"
+    xspec.Xset.addModelString("APECTHERMAL","yes")
+    xspec.Fit.query = "yes"
+    xspec.Fit.method = ["leven","10","0.01"]
+    xspec.Fit.delta = 0.01
+    xspec.Xset.chatter = 5
+
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    R = 1.0
+    r_c = 0.05
+    rho_c = 0.04*mass_hydrogen_grams
+    beta = 1.
+    T = 6.0
+    v_shift = 4.0e7
+    nx = 128
+
+    ddims = (nx,nx,nx)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:nx*1j,
+                       -R:R:nx*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
+    dens[r > R] = 0.0
+    temp = T*K_per_keV*np.ones(ddims)
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+    data = {}
+    data["density"] = (dens, "g/cm**3")
+    data["temperature"] = (temp, "K")
+    data["velocity_x"] = (np.zeros(ddims), "cm/s")
+    data["velocity_y"] = (np.zeros(ddims), "cm/s")
+    data["velocity_z"] = (v_shift*np.ones(ddims), "cm/s")
+
+    ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
+                           nprocs=64, bbox=bbox)
+
+    A = 3000.
+    exp_time = 1.0e5
+    redshift = 0.05
+
+    apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000,
+                                   thermal_broad=True)
+    abs_model = XSpecAbsorbModel("TBabs", 0.02)
+
+    sphere = ds.sphere("c", (0.5, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    for a, r in zip(rmfs, arfs):
+        arf = xray_data_dir+a
+        rmf = xray_data_dir+r
+        events = photons.project_photons("z", responses=[arf,rmf],
+                                         absorb_model=abs_model)
+        events.write_spectrum("beta_model_evt.pi", clobber=True)
+
+        s = xspec.Spectrum("beta_model_evt.pi")
+        s.ignore("**-0.5")
+        s.ignore("7.0-**")
+        m = xspec.Model("tbabs*bapec")
+        m.bapec.kT = 5.0
+        m.bapec.Abundanc = 0.25
+        m.bapec.norm = 1.0
+        m.bapec.Redshift = 0.05
+        m.bapec.Velocity = 100.0
+        m.TBabs.nH = 0.015
+
+        m.bapec.Velocity.frozen = False
+        m.bapec.Abundanc.frozen = False
+        m.bapec.Redshift.frozen = False
+        m.TBabs.nH.frozen = False
+
+        xspec.Fit.renorm()
+        xspec.Fit.nIterations = 100
+        xspec.Fit.perform()
+
+        assert np.abs(m.bapec.Redshift.values[0]-v_shift) < 1.0
+        assert np.abs(m.bapec.kT.values[0]-6.0) < 1.0
+        assert np.abs(m.bapec.Abundanc.values[0]-0.3) < 1.0
+        assert np.abs(m.bapec.Velocity.values[0]-0.0) < 1.0
+        assert np.abs(m.TBabs.nH.values[0]-0.02) < 1.0
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 5438e77cc0c94ab2038bbc78462911de445f6b2e -r b207b3df1a4ca218e2c9e1a4af621c6d0d954864 yt/analysis_modules/photon_simulator/tests/test_cluster.py
--- a/yt/analysis_modules/photon_simulator/tests/test_cluster.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Answer test the photon_simulator analysis module.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.analysis_modules.photon_simulator.api import \
-    TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
-from yt.config import ytcfg
-from yt.testing import requires_file
-from yt.utilities.answer_testing.framework import requires_ds, \
-    GenericArrayTest, data_dir_load
-import numpy as np
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt", "__withintesting"] = "True"
-
-test_dir = ytcfg.get("yt", "test_data_dir")
-
-ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-APEC = test_dir+"/xray_data/atomdb_v2.0.2"
-TBABS = test_dir+"/xray_data/tbabs_table.h5"
-ARF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_arf.fits"
-RMF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_rmf.fits"
-
- at requires_ds(ETC)
- at requires_file(APEC)
- at requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
-
-    np.random.seed(seed=0x4d3d3d3)
-
-    ds = data_dir_load(ETC)
-    A = 3000.
-    exp_time = 1.0e5
-    redshift = 0.1
-
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
-    tbabs_model = TableAbsorbModel(TBABS, 0.1)
-
-    sphere = ds.sphere("max", (0.5, "mpc"))
-
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
-
-    events = photons.project_photons([0.0,0.0,1.0],
-                                     responses=[ARF,RMF],
-                                     absorb_model=tbabs_model)
-
-    def photons_test(): return photons.photons
-    def events_test(): return events.events
-
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test

diff -r 5438e77cc0c94ab2038bbc78462911de445f6b2e -r b207b3df1a4ca218e2c9e1a4af621c6d0d954864 yt/analysis_modules/photon_simulator/tests/test_etc.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_etc.py
@@ -0,0 +1,69 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, TableAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.testing import requires_file
+from yt.utilities.answer_testing.framework import requires_ds, \
+    GenericArrayTest, data_dir_load
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
+
+ETC = test_data_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
+APEC = xray_data_dir+"/atomdb_v2.0.2"
+TBABS = xray_data_dir+"/tbabs_table.h5"
+ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
+RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
+
+ at requires_ds(ETC)
+ at requires_file(APEC)
+ at requires_file(TBABS)
+ at requires_file(ARF)
+ at requires_file(RMF)
+def test_etc():
+
+    np.random.seed(seed=0x4d3d3d3)
+
+    ds = data_dir_load(ETC)
+    A = 3000.
+    exp_time = 1.0e5
+    redshift = 0.1
+
+    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
+    tbabs_model = TableAbsorbModel(TBABS, 0.1)
+
+    sphere = ds.sphere("max", (0.5, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    events = photons.project_photons("z", responses=[ARF,RMF],
+                                     absorb_model=tbabs_model)
+
+    def photons_test():
+        return photons.photons
+    def events_test():
+        return events.events
+
+    for test in [GenericArrayTest(ds, photons_test),
+                 GenericArrayTest(ds, events_test)]:
+        test_etc.__name__ = test.description
+        yield test

diff -r 5438e77cc0c94ab2038bbc78462911de445f6b2e -r b207b3df1a4ca218e2c9e1a4af621c6d0d954864 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -60,7 +60,8 @@
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False',
-    chunk_size = '1000'
+    chunk_size = '1000',
+    xray_data_dir = '/does/not/exist',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


https://bitbucket.org/yt_analysis/yt/commits/b6aaf54c2b78/
Changeset:   b6aaf54c2b78
Branch:      yt
User:        jzuhone
Date:        2015-10-20 16:17:12+00:00
Summary:     Replacing test_data_dir with xray_data_dir here
Affected #:  1 file

diff -r b207b3df1a4ca218e2c9e1a4af621c6d0d954864 -r b6aaf54c2b78fb2a848d92e7bf8f40fef3fc14a0 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -9,7 +9,7 @@
 def setup():
     ytcfg["yt", "__withintesting"] = "True"
 
-test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
 ds = fake_random_ds(64)
 
@@ -17,7 +17,7 @@
 @requires_module("astropy")
 def test_apec():
 
-    settings = {"APECROOT":test_data_dir+"/xray_data/apec_v2.0.2"}
+    settings = {"APECROOT":xray_data_dir+"/xray_data/apec_v2.0.2"}
     xmod = XSpecThermalModel("apec", 0.1, 10.0, 10000, thermal_broad=True,
                              settings=settings)
     xmod.prepare_spectrum(0.2)
@@ -25,10 +25,10 @@
     xcspec, xmspec = xmod.get_spectrum(6.0)
     spec1 = xcspec+0.3*xmspec
 
-    amod = TableApecModel(test_data_dir+"/xray_data", 0.1, 10.0, 
+    amod = TableApecModel(xray_data_dir+"/xray_data", 0.1, 10.0, 
                           10000, thermal_broad=True)
     amod.prepare_spectrum(0.2)
-    
+
     acspec, amspec = amod.get_spectrum(6.0)
     spec2 = acspec+0.3*amspec
 


https://bitbucket.org/yt_analysis/yt/commits/e916173dfdd5/
Changeset:   e916173dfdd5
Branch:      yt
User:        jzuhone
Date:        2015-10-21 03:59:52+00:00
Summary:     Making appropriate doc links
Affected #:  3 files

diff -r b6aaf54c2b78fb2a848d92e7bf8f40fef3fc14a0 -r e916173dfdd554920ec9f916e8172e562349fd1e doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,3 +1,5 @@
+.. _photon_simulator:
+
 Constructing Mock X-ray Observations
 ------------------------------------
 

diff -r b6aaf54c2b78fb2a848d92e7bf8f40fef3fc14a0 -r e916173dfdd554920ec9f916e8172e562349fd1e doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -114,6 +114,8 @@
   to stdout rather than stderr
 * ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
   is turned off.
+* ``xray_data_dir`` (default: ``'/does/not/exist'``): The default path for data related
+  to the :ref:`photon simulator analysis module <photon_simulator>`. 
 
 .. _plugin-file:
 

diff -r b6aaf54c2b78fb2a848d92e7bf8f40fef3fc14a0 -r e916173dfdd554920ec9f916e8172e562349fd1e yt/analysis_modules/photon_simulator/tests/test_etc.py
--- a/yt/analysis_modules/photon_simulator/tests/test_etc.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_etc.py
@@ -60,6 +60,7 @@
 
     def photons_test():
         return photons.photons
+
     def events_test():
         return events.events
 


https://bitbucket.org/yt_analysis/yt/commits/7164c924b1ce/
Changeset:   7164c924b1ce
Branch:      yt
User:        jzuhone
Date:        2015-10-21 04:06:46+00:00
Summary:     Some more doc fixes
Affected #:  2 files

diff -r e916173dfdd554920ec9f916e8172e562349fd1e -r 7164c924b1cea80e8b31da33941591ee8811ad70 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -100,9 +100,8 @@
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
    data package (see the ``xray_data`` `README <xray_data_README.html>`_ 
-   for details on the latter). Make sure that
-   in what follows you specify the full path to the locations of these
-   files.
+   for details on the latter). Make sure that in what follows you 
+   specify the full path to the locations of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could
@@ -199,7 +198,7 @@
 
 .. code:: python
 
-    A = 6000.
+    A = 3000.
     exp_time = 4.0e5
     redshift = 0.05
     cosmo = Cosmology()
@@ -300,7 +299,7 @@
 
 The second option, ``TableAbsorbModel``, takes as input an HDF5 file
 containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in cm2), and the Galactic column density :math:`N_H`:
+(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
 
 .. code:: python
 
@@ -309,7 +308,7 @@
 Now we're ready to project the photons. First, we choose a line-of-sight
 vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA,DEC on the sky in degrees.
+specify a ``sky_center`` in RA and DEC on the sky in degrees.
 
 Also, we're going to convolve the photons with instrument ``responses``.
 For this, you need a ARF/RMF pair with matching energy bins. This is of
@@ -324,8 +323,8 @@
 
 .. code:: python
 
-    ARF = "chandra_ACIS-S3_onaxis_arf.fits"
-    RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
+    ARF = "acisi_aimpt_cy17.arf"
+    RMF = "acisi_aimpt_cy17.rmf"
     normal = [0.0,0.0,1.0]
     events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
                                      absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
@@ -542,7 +541,7 @@
 
    sphere = ds.sphere("c", (1.0,"Mpc"))
        
-   A = 6000.
+   A = 3000.
    exp_time = 2.0e5
    redshift = 0.05
    cosmo = Cosmology()
@@ -557,7 +556,8 @@
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
-                                    responses=["sim_arf.fits","sim_rmf.fits"], 
+                                    responses=["acisi_aimpt_cy17.arf",
+                                               "acisi_aimpt_cy17.rmf"], 
                                     absorb_model=abs_model,
                                     north_vector=[0.0,1.0,0.0])
 

diff -r e916173dfdd554920ec9f916e8172e562349fd1e -r 7164c924b1cea80e8b31da33941591ee8811ad70 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -115,7 +115,8 @@
 * ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
   is turned off.
 * ``xray_data_dir`` (default: ``'/does/not/exist'``): The default path for data related
-  to the :ref:`photon simulator analysis module <photon_simulator>`. 
+  to the :ref:`photon simulator analysis module <photon_simulator>`. Currently used only
+  for testing. 
 
 .. _plugin-file:
 


https://bitbucket.org/yt_analysis/yt/commits/649fb5a7fce5/
Changeset:   649fb5a7fce5
Branch:      yt
User:        jzuhone
Date:        2015-10-21 04:18:01+00:00
Summary:     If this is internal and only for testing, there's no sense in documenting it.
Affected #:  1 file

diff -r 7164c924b1cea80e8b31da33941591ee8811ad70 -r 649fb5a7fce54fb2d84344bb3cf2118f943d695d doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -114,9 +114,6 @@
   to stdout rather than stderr
 * ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
   is turned off.
-* ``xray_data_dir`` (default: ``'/does/not/exist'``): The default path for data related
-  to the :ref:`photon simulator analysis module <photon_simulator>`. Currently used only
-  for testing. 
 
 .. _plugin-file:
 


https://bitbucket.org/yt_analysis/yt/commits/955a88157afc/
Changeset:   955a88157afc
Branch:      yt
User:        jzuhone
Date:        2015-10-21 12:40:38+00:00
Summary:     More updates to tests
Affected #:  4 files

diff -r 649fb5a7fce54fb2d84344bb3cf2118f943d695d -r 955a88157afcf3646ef4f58607ac8b249bb85949 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -101,7 +101,7 @@
     for a, r in zip(rmfs, arfs):
         arf = xray_data_dir+a
         rmf = xray_data_dir+r
-        events = photons.project_photons("z", responses=[arf,rmf],
+        events = photons.project_photons([1.0,-1.0,0.5], responses=[arf,rmf],
                                          absorb_model=abs_model)
         events.write_spectrum("beta_model_evt.pi", clobber=True)
 

diff -r 649fb5a7fce54fb2d84344bb3cf2118f943d695d -r 955a88157afcf3646ef4f58607ac8b249bb85949 yt/analysis_modules/photon_simulator/tests/test_etc.py
--- a/yt/analysis_modules/photon_simulator/tests/test_etc.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
-Answer test the photon_simulator analysis module.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.analysis_modules.photon_simulator.api import \
-    TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
-from yt.config import ytcfg
-from yt.testing import requires_file
-from yt.utilities.answer_testing.framework import requires_ds, \
-    GenericArrayTest, data_dir_load
-import numpy as np
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt", "__withintesting"] = "True"
-
-test_data_dir = ytcfg.get("yt", "test_data_dir")
-xray_data_dir = ytcfg.get("yt", "xray_data_dir")
-
-ETC = test_data_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-APEC = xray_data_dir+"/atomdb_v2.0.2"
-TBABS = xray_data_dir+"/tbabs_table.h5"
-ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
-RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
-
- at requires_ds(ETC)
- at requires_file(APEC)
- at requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
-
-    np.random.seed(seed=0x4d3d3d3)
-
-    ds = data_dir_load(ETC)
-    A = 3000.
-    exp_time = 1.0e5
-    redshift = 0.1
-
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
-    tbabs_model = TableAbsorbModel(TBABS, 0.1)
-
-    sphere = ds.sphere("max", (0.5, "Mpc"))
-
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
-
-    events = photons.project_photons("z", responses=[ARF,RMF],
-                                     absorb_model=tbabs_model)
-
-    def photons_test():
-        return photons.photons
-
-    def events_test():
-        return events.events
-
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test

diff -r 649fb5a7fce54fb2d84344bb3cf2118f943d695d -r 955a88157afcf3646ef4f58607ac8b249bb85949 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -0,0 +1,71 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, TableAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.testing import requires_file
+from yt.utilities.answer_testing.framework import requires_ds, \
+    GenericArrayTest, data_dir_load
+import numpy as np
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
+
+gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+APEC = xray_data_dir+"/atomdb_v2.0.2"
+TBABS = xray_data_dir+"/tbabs_table.h5"
+ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
+RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
+
+ at requires_ds(ETC)
+ at requires_file(APEC)
+ at requires_file(TBABS)
+ at requires_file(ARF)
+ at requires_file(RMF)
+def test_etc():
+
+    np.random.seed(seed=0x4d3d3d3)
+
+    ds = data_dir_load(gslr)
+    A = 2000.
+    exp_time = 1.0e5
+    redshift = 0.1
+
+    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
+    tbabs_model = TableAbsorbModel(TBABS, 0.1)
+
+    sphere = ds.sphere("c", (0.1, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    events = photons.project_photons("z", responses=[ARF,RMF],
+                                     absorb_model=tbabs_model,
+                                     convolve_energies=True)
+
+    def photons_test():
+        return photons.photons
+
+    def events_test():
+        return events.events
+
+    for test in [GenericArrayTest(ds, photons_test),
+                 GenericArrayTest(ds, events_test)]:
+        test_etc.__name__ = test.description
+        yield test

diff -r 649fb5a7fce54fb2d84344bb3cf2118f943d695d -r 955a88157afcf3646ef4f58607ac8b249bb85949 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -17,7 +17,7 @@
 @requires_module("astropy")
 def test_apec():
 
-    settings = {"APECROOT":xray_data_dir+"/xray_data/apec_v2.0.2"}
+    settings = {"APECROOT":xray_data_dir+"/apec_v2.0.2"}
     xmod = XSpecThermalModel("apec", 0.1, 10.0, 10000, thermal_broad=True,
                              settings=settings)
     xmod.prepare_spectrum(0.2)
@@ -25,7 +25,7 @@
     xcspec, xmspec = xmod.get_spectrum(6.0)
     spec1 = xcspec+0.3*xmspec
 
-    amod = TableApecModel(xray_data_dir+"/xray_data", 0.1, 10.0, 
+    amod = TableApecModel(xray_data_dir, 0.1, 10.0, 
                           10000, thermal_broad=True)
     amod.prepare_spectrum(0.2)
 


https://bitbucket.org/yt_analysis/yt/commits/ab68b2728771/
Changeset:   ab68b2728771
Branch:      yt
User:        jzuhone
Date:        2015-11-02 21:50:37+00:00
Summary:     Merge
Affected #:  59 files

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -374,6 +374,17 @@
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
 
+Other smoothing kernels besides the cubic spline one are available through a
+keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
+Current available kernel names include:
+
+* ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
+* ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.
+
+The added smoothed particle field can be accessed by
+``("deposit", "particletype_kernelname_smoothed_fieldname")`` (except for the
+cubic spline kernel, which obeys the naming scheme given above).
+
 Computing the Nth Nearest Neighbor
 ----------------------------------
 

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -54,10 +54,13 @@
  
 .. code-block:: python
 
-   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.save_as_dataset("my_images.h5", fields=["density","temperature"])
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
+In the HDF5 case, the created file can be reloaded just like a regular dataset with
+``yt.load`` and will, itself, be a first-class dataset.  For more information on
+this, see :ref:`saving-grid-data-containers`.
 In the FITS case, there is an option for setting the ``units`` of the coordinate system in
 the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -20,5 +20,6 @@
    units/index
    filtering
    generating_processed_data
+   saving_data
    time_series_analysis
    parallel_computation

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -457,69 +457,9 @@
 ---------------------------
 
 Often, when operating interactively or via the scripting interface, it is
-convenient to save an object or multiple objects out to disk and then restart
-the calculation later.  For example, this is useful after clump finding 
-(:ref:`clump_finding`), which can be very time consuming.  
-Typically, the save and load operations are used on 3D data objects.  yt
-has a separate set of serialization operations for 2D objects such as
-projections.
-
-yt will save out objects to disk under the presupposition that the
-construction of the objects is the difficult part, rather than the generation
-of the data -- this means that you can save out an object as a description of
-how to recreate it in space, but not the actual data arrays affiliated with
-that object.  The information that is saved includes the dataset off of
-which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a dataset
-from whatever limited information it has in the save file.
-
-You can save objects to an output file using the function 
-:func:`~yt.data_objects.index.save_object`: 
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
-   sp.save_object("sphere_name", "save_file.cpkl")
-
-This will store the object as ``sphere_name`` in the file
-``save_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  
-
-To re-load an object saved this way, you can use the shelve module directly:
-
-.. code-block:: python
-
-   import yt
-   import shelve
-   ds = yt.load("my_data") 
-   saved_fn = shelve.open("save_file.cpkl")
-   ds, sp = saved_fn["sphere_name"]
-
-Additionally, we can store multiple objects in a single shelve file, so we 
-have to call the sphere by name.
-
-For certain data objects such as projections, serialization can be performed
-automatically if ``serialize`` option is set to ``True`` in :ref:`the
-configuration file <configuration-file>` or set directly in the script:
-
-.. code-block:: python
-
-   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
-
-.. note:: Use serialization with caution. Enabling serialization means that
-   once a projection of a dataset has been created (and stored in the .yt file
-   in the same directory), any subsequent changes to that dataset will be
-   ignored when attempting to create the same projection. So if you take a
-   density projection of your dataset in the 'x' direction, then somehow tweak
-   that dataset significantly, and take the density projection again, yt will
-   default to finding the original projection and 
-   :ref:`not your new one <faq-old-data>`.
-
-.. note:: It's also possible to use the standard :mod:`cPickle` module for
-          loading and storing objects -- so in theory you could even save a
-          list of objects!
-
-This method works for clumps, as well, and the entire clump index will be
-stored and restored upon load.
+convenient to save an object to disk and then restart the calculation later or
+transfer the data from a container to another filesystem.  This can be
+particularly useful when working with extremely large datasets.  Field data
+can be saved to disk in a format that allows for it to be reloaded just like
+a regular dataset.  For information on how to do this, see
+:ref:`saving-data-containers`.

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/analyzing/saving_data.rst
--- /dev/null
+++ b/doc/source/analyzing/saving_data.rst
@@ -0,0 +1,243 @@
+.. _saving_data
+
+Saving Reloadable Data
+======================
+
+Most of the data loaded into or generated with yt can be saved to a
+format that can be reloaded as a first-class dataset.  This includes
+the following:
+
+  * geometric data containers (regions, spheres, disks, rays, etc.)
+
+  * grid data containers (covering grids, arbitrary grids, fixed
+    resolution buffers)
+
+  * spatial plots (projections, slices, cutting planes)
+
+  * profiles
+
+  * generic array data
+
+In the case of projections, slices, and profiles, reloaded data can be
+used to remake plots.  For information on this, see :ref:`remaking-plots`.
+
+.. _saving-data-containers:
+
+Geometric Data Containers
+-------------------------
+
+Data from geometric data containers can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   sphere = ds.sphere([0.5]*3, (10, "Mpc"))
+   fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+   print (fn)
+
+This function will return the name of the file to which the dataset
+was saved.  The filename will be a combination of the name of the
+original dataset and the type of data container.  Optionally, a
+specific filename can be given with the ``filename`` keyword.  If no
+fields are given, the fields that have previously been queried will
+be saved.
+
+The newly created dataset can be loaded like all other supported
+data through ``yt.load``.  Once loaded, field data can be accessed
+through the traditional data containers or through the ``data``
+attribute, which will be a data container configured like the
+original data container used to make the dataset.  Grid data is
+accessed by the ``grid`` data type and particle data is accessed
+with the original particle type.  As with the original dataset, grid
+positions and cell sizes are accessible with, for example,
+("grid", "x") and ("grid", "dx").  Particle positions are
+accessible as (<particle_type>, "particle_position_x").  All original
+simulation parameters are accessible in the ``parameters``
+dictionary, normally associated with all datasets.
+
+.. code-block:: python
+
+   sphere_ds = yt.load("DD0046_sphere.h5")
+
+   # use the original data container
+   print (sphere_ds.data["grid", "density"])
+
+   # create a new data container
+   ad = sphere_ds.all_data()
+
+   # grid data
+   print (ad["grid", "density"])
+   print (ad["grid", "x"])
+   print (ad["grid", "dx"])
+
+   # particle data
+   print (ad["all", "particle_mass"])
+   print (ad["all", "particle_position_x"])
+
+Note that because field data queried from geometric containers is
+returned as unordered 1D arrays, data container datasets are treated,
+effectively, as particle data.  Thus, 3D indexing of grid data from
+these datasets is not possible.
+
+.. _saving-grid-data-containers:
+
+Grid Data Containers
+--------------------
+
+Data containers that return field data as multidimensional arrays
+can be saved so as to preserve this type of access.  This includes
+covering grids, arbitrary grids, and fixed resolution buffers.
+Saving data from these containers works just as with geometric data
+containers.  Field data can be accessed through geometric data
+containers.
+
+.. code-block:: python
+
+   cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+   fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+
+   cg_ds = yt.load(fn)
+   ad = cg_ds.all_data()
+   print (ad["grid", "density"])
+
+Multidimensional indexing of field data is also available through
+the ``data`` attribute.
+
+.. code-block:: python
+
+   print (cg_ds.data["grid", "density"])
+
+Fixed resolution buffers work just the same.
+
+.. code-block:: python
+
+   my_proj = ds.proj("density", "x", weight_field="density")
+   frb = my_proj.to_frb(1.0, (800, 800))
+   fn = frb.save_as_dataset(fields=["density"])
+   frb_ds = yt.load(fn)
+   print (frb_ds.data["density"])
+
+.. _saving-spatial-plots:
+
+Spatial Plots
+-------------
+
+Spatial plots, such as projections, slices, and off-axis slices
+(cutting planes) can also be saved and reloaded.
+
+.. code-block:: python
+
+   proj = ds.proj("density", "x", weight_field="density")
+   proj.save_as_dataset()
+
+Once reloaded, they can be handed to their associated plotting
+functions to make images.
+
+.. code-block:: python
+
+   proj_ds = yt.load("DD0046_proj.h5")
+   p = yt.ProjectionPlot(proj_ds, "x", "density",
+                         weight_field="density")
+   p.save()
+
+.. _saving-profile-data:
+
+Profiles
+--------
+
+Profiles created with :func:`~yt.data_objects.profiles.create_profile`,
+:class:`~yt.visualization.profile_plotter.ProfilePlot`, and
+:class:`~yt.visualization.profile_plotter.PhasePlot` can be saved with
+the :func:`~yt.data_objects.profiles.save_as_dataset` function, which
+works just as above.  Profile datasets are a type of non-spatial grid
+datasets.  Geometric selection is not possible, but data can be
+accessed through the ``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+
+   profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                                  "cell_mass", weight_field=None,
+                                  n_bins=(128, 128))
+   profile_2d.save_as_dataset()
+
+   prof_2d_ds = yt.load("DD0046_Profile2D.h5")
+   print (prof_2d_ds.data["cell_mass"])
+
+The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
+arrays with "x", "y", and "z".
+
+.. code-block:: python
+
+   print (prof_2d_ds.data["x"])
+
+The bin fields can also be returned with the same shape as the profile
+data by accessing them with their original names.  This allows for
+boolean masking of profile data using the bin fields.
+
+.. code-block:: python
+
+   # density is the x bin field
+   print (prof_2d_ds.data["density"])
+
+For 1, 2, and 3D profile datasets, a fake profile object will be
+constructed by accessing the ".profile" attribute.  This is used
+primarily in the case of 1 and 2D profiles to create figures using
+:class:`~yt.visualization.profile_plotter.ProfilePlot` and
+:class:`~yt.visualization.profile_plotter.PhasePlot`.
+
+.. code-block:: python
+
+   p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
+                    "cell_mass", weight_field=None)
+   p.save()
+
+.. _saving-array-data:
+
+Generic Array Data
+------------------
+
+Generic arrays can be saved and reloaded as non-spatial data using
+the :func:`~yt.frontends.ytdata.utilities.save_as_dataset` function,
+also available as ``yt.save_as_dataset``.  As with profiles, geometric
+selection is not possible, but the data can be accessed through the
+``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   region = ds.box([0.25]*3, [0.75]*3)
+   sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+   my_data = {}
+   my_data["region_density"] = region["density"]
+   my_data["sphere_density"] = sphere["density"]
+   yt.save_as_dataset(ds, "test_data.h5", my_data)
+
+   array_ds = yt.load("test_data.h5")
+   print (array_ds.data["region_density"])
+   print (array_ds.data["sphere_density"])
+
+Array data can be saved with or without a dataset loaded.  If no
+dataset has been loaded, as fake dataset can be provided as a
+dictionary.
+
+.. notebook-cell::
+
+   import numpy as np
+   import yt
+
+   my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+              "temperature": yt.YTArray(np.random.random(10), "K")}
+   fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+   yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+
+   new_ds = yt.load("random_data.h5")
+   print (new_ds.data["density"])

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -302,18 +302,19 @@
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
 
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file). To run the tipsy frontend's answer tests using a different
-yt changeset, update to that changeset, recompile if necessary, and run the
-tests using the following command:
+``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+frontend's answer tests using a different yt changeset, update to that
+changeset, recompile if necessary, and run the tests using the following
+command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
 
 The results from a nose testing session are pretty straightforward to
 understand, the results for each test are printed directly to STDOUT.  If a test

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -290,14 +290,14 @@
 
 .. code-block:: bash
 
-  $ pip install -r requirements.txt
+  $ pip install numpy matplotlib cython cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
 
 .. code-block:: bash
 
-  $ pip install -r optional-requirements.txt
+  $ pip install ipython[notebook]
 
 From here, you can use ``pip`` (which comes with ``Python``) to install the latest
 stable version of yt:

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -72,6 +72,7 @@
 .. autosummary::
    :toctree: generated/
 
+   ~yt.data_objects.data_containers.YTDataContainer
    ~yt.data_objects.data_containers.YTSelectionContainer
    ~yt.data_objects.data_containers.YTSelectionContainer0D
    ~yt.data_objects.data_containers.YTSelectionContainer1D
@@ -383,6 +384,28 @@
    ~yt.frontends.stream.io.IOHandlerStreamOctree
    ~yt.frontends.stream.io.StreamParticleIOHandler
 
+ytdata
+^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.ytdata.data_structures.YTDataContainerDataset
+   ~yt.frontends.ytdata.data_structures.YTSpatialPlotDataset
+   ~yt.frontends.ytdata.data_structures.YTGridDataset
+   ~yt.frontends.ytdata.data_structures.YTGridHierarchy
+   ~yt.frontends.ytdata.data_structures.YTGrid
+   ~yt.frontends.ytdata.data_structures.YTNonspatialDataset
+   ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
+   ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
+   ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
+   ~yt.frontends.ytdata.fields.YTGridFieldInfo
+   ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTGridHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTSpatialPlotHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTNonspatialhdf5
+
 Loading Data
 ------------
 
@@ -739,6 +762,7 @@
    :toctree: generated/
 
    ~yt.convenience.load
+   ~yt.frontends.ytdata.utilities.save_as_dataset
    ~yt.data_objects.static_output.Dataset.all_data
    ~yt.data_objects.static_output.Dataset.box
    ~yt.funcs.deprecate

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1284,6 +1284,81 @@
    bananas_Slice_z_kT.eps
    bananas_Slice_z_density.eps
 
+.. _remaking-plots:
+
+Remaking Figures from Plot Datasets
+-----------------------------------
+
+When working with datasets that are too large to be stored locally,
+making figures just right can be cumbersome as it requires continuously
+moving images somewhere they can be viewed.  However, image creation is
+actually a two step process of first creating the projection, slice,
+or profile object, and then converting that object into an actual image.
+Fortunately, the hard part (creating slices, projections, profiles) can
+be separated from the easy part (generating images).  The intermediate
+slice, projection, and profile objects can be saved as reloadable
+datasets, then handed back to the plotting machinery discussed here.
+
+For slices and projections, the savable object is associated with the
+plot object as ``data_source``.  This can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.  For
+more information, see :ref:`saving_data`.
+
+.. code-block:: python
+
+   p = yt.ProjectionPlot(ds, "x", "density",
+                         weight_field="density")
+   fn = p.data_source.save_as_dataset()
+
+This function will optionally take a ``filename`` keyword that follows
+the same logic as dicussed above in :ref:`saving_plots`.  The filename
+to which the dataset was written will be returned.
+
+Once saved, this file can be reloaded completely independently of the
+original dataset and given back to the plot function with the same
+arguments.  One can now continue to tweak the figure to one's liking.
+
+.. code-block:: python
+
+   new_ds = yt.load(fn)
+   new_p = yt.ProjectionPlot(new_ds, "x", "density",
+                             weight_field="density")
+   new_p.save()
+
+The same functionality is available for profile and phase plots.  In
+each case, a special data container, ``data``, is given to the plotting
+functions.
+
+For ``ProfilePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.ProfilePlot(ad, "density", "temperature",
+                       weight_field="cell_mass")
+
+   # note that ProfilePlots can hold a list of profiles
+   fn = p1.profiles[0].save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.ProfilePlot(new_ds.data, "density", "temperature",
+                       weight_field="cell_mass")
+   p2.save()
+
+For ``PhasePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.PhasePlot(ad, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   fn = p1.profile.save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.PhasePlot(new_ds.data, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   p2.save()
+
 .. _eps-writer:
 
 Publication-ready Figures

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -138,6 +138,9 @@
     load_particles, load_hexahedral_mesh, load_octree, \
     hexahedral_connectivity
 
+from yt.frontends.ytdata.api import \
+    save_as_dataset
+
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset
 GadgetStaticOutput = deprecated_class(GadgetDataset)

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -20,6 +20,7 @@
 
 from .absorption_line import tau_profile
 
+from yt.convenience import load
 from yt.funcs import get_pbar, mylog
 from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
@@ -121,8 +122,8 @@
         Parameters
         ----------
 
-        input_file : string
-           path to input ray data.
+        input_file : string or dataset
+           path to input ray data or a loaded ray dataset
         output_file : optional, string
            path for output file.  File formats are chosen based on the
            filename extension.  ``.h5`` for hdf5, ``.fits`` for fits,
@@ -156,7 +157,6 @@
 
         input_fields = ['dl', 'redshift', 'temperature']
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
-        field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
             input_fields.append('redshift_eff')
@@ -167,10 +167,11 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        input = h5py.File(input_file, 'r')
-        for field in input_fields:
-            field_data[field] = YTArray(input[field].value, field_units[field])
-        input.close()
+        if isinstance(input_file, str):
+            input_ds = load(input_file)
+        else:
+            input_ds = input_file
+        field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
@@ -337,6 +338,8 @@
         """
         Write out list of spectral lines.
         """
+        if filename is None:
+            return
         mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,19 +13,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
     CosmologySplice
 from yt.convenience import \
     load
-from yt.funcs import \
-    mylog
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.logger import \
+    ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
@@ -48,7 +49,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -58,17 +59,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -98,11 +99,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -129,8 +130,9 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -156,7 +158,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -172,9 +174,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.array(start_position)
+            self.light_ray_solution[0]['start'] = np.asarray(start_position)
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.array(end_position)
+                self.light_ray_solution[0]['end'] = np.asarray(end_position)
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -185,12 +187,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -285,15 +287,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -308,13 +310,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -322,7 +324,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -348,12 +350,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -364,6 +366,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
+        data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los', 'redshift_eff'])
@@ -399,10 +403,15 @@
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
+                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                    segment_length = \
+                      my_segment["traversal_box_fraction"].in_units("Mpccm / h")
+                else:
+                    segment_length = my_segment["traversal_box_fraction"] * \
+                      ds.domain_width[0].in_units("Mpccm / h")
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
-                                       ds.domain_width[0].in_units("Mpccm / h") *
-                                       my_segment["traversal_box_fraction"])
+                  self._deltaz_forward(my_segment["redshift"],
+                                       segment_length)
             elif my_segment.get("next", None) is None:
                 next_redshift = self.near_redshift
             else:
@@ -441,7 +450,8 @@
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
+                    # line of sight velocity is reversed relative to ray
+                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
                                                      line_of_sight).sum(axis=1)[asort])
                     del sub_vel
 
@@ -453,7 +463,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -499,12 +509,17 @@
         # Flatten the list into a single dictionary containing fields
         # for the whole ray.
         all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
+        self._data = all_data
 
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
+            ray_ds = load(data_filename)
+            return ray_ds
+        else:
+            return None
 
-        self._data = all_data
-        return all_data
+    def __getitem__(self, field):
+        return self._data[field]
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
@@ -513,19 +528,24 @@
 
         Write light ray data to hdf5 file.
         """
-
-        mylog.info("Saving light ray data to %s." % filename)
-        output = h5py.File(filename, 'w')
-        for field in data.keys():
-            # if the field is a tuple, only use the second part of the tuple
-            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
-            if isinstance(field, tuple):
-                fieldname = field[1]
-            else:
-                fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
-        output.close()
+        if self.simulation_type is None:
+            ds = load(self.parameter_filename, **self.load_kwargs)
+        else:
+            ds = {}
+            ds["dimensionality"] = self.simulation.dimensionality
+            ds["domain_left_edge"] = self.simulation.domain_left_edge
+            ds["domain_right_edge"] = self.simulation.domain_right_edge
+            ds["cosmological_simulation"] = self.simulation.cosmological_simulation
+            ds["periodicity"] = (True, True, True)
+            ds["current_redshift"] = self.near_redshift
+            for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+                ds[attr] = getattr(self.cosmology, attr)
+            ds["current_time"] = \
+              self.cosmology.t_from_z(ds["current_redshift"])
+        extra_attrs = {"data_type": "yt_light_ray"}
+        field_types = dict([(field, "grid") for field in data.keys()])
+        save_as_dataset(ds, filename, data, field_types=field_types,
+                        extra_attrs=extra_attrs)
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):
@@ -572,7 +592,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -599,15 +619,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -21,6 +21,9 @@
     periodic_distance
 from yt.data_objects.profiles import \
     create_profile
+from yt.frontends.ytdata.utilities import \
+    _hdf5_yt_array, \
+    _yt_array_hdf5
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
@@ -584,21 +587,3 @@
     del sphere
     
 add_callback("iterative_center_of_mass", iterative_center_of_mass)
-
-def _yt_array_hdf5(fh, fieldname, data):
-    dataset = fh.create_dataset(fieldname, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-
-def _hdf5_yt_array(fh, fieldname, ds=None):
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[fieldname].attrs:
-        units = fh[fieldname].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[fieldname].value, units)

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1161,8 +1161,8 @@
         tbhdu.writeto(phfile, clobber=clobber)
 
         col1 = pyfits.Column(name='SRC_ID', format='J', array=np.array([1]).astype("int32"))
-        col2 = pyfits.Column(name='RA', format='D', array=np.array([float(self.parameters["sky_center"][0])]))
-        col3 = pyfits.Column(name='DEC', format='D', array=np.array([float(self.parameters["sky_center"][1])]))
+        col2 = pyfits.Column(name='RA', format='D', array=np.array([0.0]))
+        col3 = pyfits.Column(name='DEC', format='D', array=np.array([0.0]))
         col4 = pyfits.Column(name='E_MIN', format='D', array=np.array([float(emin)]))
         col5 = pyfits.Column(name='E_MAX', format='D', array=np.array([float(emax)]))
         col6 = pyfits.Column(name='FLUX', format='D', array=np.array([flux.value]))

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -106,7 +106,7 @@
         self.display_names["Tau"] = r"$\mathrm{\tau}$"
 
         for f, field in zip(self.freqs, self.freq_fields):
-            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+            self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % int(f)
 
     def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an on-axis projection of the SZ signal.
@@ -125,9 +125,26 @@
             as a tuple containing a coordinate and string unit name or by passing
             in a YTArray. If a list or unitless array is supplied, code units are
             assumed.
-        width : float, tuple, or YTQuantity.
-            The width of the projection. A float will assume the width is in code units.
-            A (value, unit) tuple or YTQuantity allows for the units of the width to be specified.
+        width : tuple or a float.
+            Width can have four different formats to support windows with variable
+            x and y widths.  They are:
+
+            ==================================     =======================
+            format                                 example
+            ==================================     =======================
+            (float, string)                        (10,'kpc')
+            ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+            float                                  0.2
+            (float, float)                         (0.2, 0.3)
+            ==================================     =======================
+
+            For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+            wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+            window that is 10 kiloparsecs wide along the x axis and 15
+            kiloparsecs wide along the y axis.  In the other two examples, code
+            units are assumed, for example (0.2, 0.3) requests a plot that has an
+            x width of 0.2 and a y width of 0.3 in code units.  If units are
+            provided the resulting plot axis labels will use the supplied units.
         nx : integer, optional
             The dimensions on a side of the projection image.
         source : yt.data_objects.data_containers.YTSelectionContainer, optional
@@ -141,7 +158,7 @@
         ctr, dctr = self.ds.coordinates.sanitize_center(center, axis)
         width = self.ds.coordinates.sanitize_width(axis, width, None)
 
-        L = np.zeros((3))
+        L = np.zeros(3)
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
@@ -174,7 +191,8 @@
 
         self.ds.field_info.pop(("gas","beta_par"))
 
-    def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+    def off_axis(self, L, center="c", width=(1.0, "unitary"), depth=(1.0,"unitary"),
+                 nx=800, nz=800, north_vector=None, no_ghost=False, source=None):
         r""" Make an off-axis projection of the SZ signal.
 
         Parameters
@@ -191,11 +209,46 @@
             as a tuple containing a coordinate and string unit name or by passing
             in a YTArray. If a list or unitless array is supplied, code units are
             assumed.
-        width : float, tuple, or YTQuantity.
-            The width of the projection. A float will assume the width is in code units.
-            A (value, unit) tuple or YTQuantity allows for the units of the width to be specified.
+        width : tuple or a float.
+            Width can have four different formats to support windows with variable
+            x and y widths.  They are:
+
+            ==================================     =======================
+            format                                 example
+            ==================================     =======================
+            (float, string)                        (10,'kpc')
+            ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
+            float                                  0.2
+            (float, float)                         (0.2, 0.3)
+            ==================================     =======================
+
+            For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+            wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+            window that is 10 kiloparsecs wide along the x axis and 15
+            kiloparsecs wide along the y axis.  In the other two examples, code
+            units are assumed, for example (0.2, 0.3) requests a plot that has an
+            x width of 0.2 and a y width of 0.3 in code units.  If units are
+            provided the resulting plot axis labels will use the supplied units.
+        depth : A tuple or a float
+            A tuple containing the depth to project through and the string
+            key of the unit: (width, 'unit').  If set to a float, code units
+            are assumed
         nx : integer, optional
             The dimensions on a side of the projection image.
+        nz : integer, optional
+            The number of elements along the integration path length.
+        north_vector : a sequence of floats
+            A vector defining the 'up' direction in the plot.  This
+            option sets the orientation of the slicing plane.  If not
+            set, an arbitrary grid-aligned north-vector is chosen.
+        no_ghost: bool, optional
+            Optimization option for off-axis cases. If True, homogenized bricks will
+            extrapolate out from grid instead of interpolating from
+            ghost zones that have to first be calculated.  This can
+            lead to large speed improvements, but at a loss of
+            accuracy/smoothness in resulting image.  The effects are
+            less notable when the transfer function is smooth and
+            broad. Default: True
         source : yt.data_objects.data_containers.YTSelectionContainer, optional
             If specified, this will be the data source used for selecting regions to project.
             Currently unsupported in yt 2.x.
@@ -205,13 +258,10 @@
         >>> L = np.array([0.5, 1.0, 0.75])
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
-        if iterable(width):
-            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
-        elif isinstance(width, YTQuantity):
-            w = width.in_units("code_length").value
-        else:
-            w = width
+        wd = self.ds.coordinates.sanitize_width(L, width, depth)
+        w = tuple(el.in_units('code_length').v for el in wd)
         ctr, dctr = self.ds.coordinates.sanitize_center(center, L)
+        res = (nx, nx, nz)
 
         if source is not None:
             mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
@@ -221,16 +271,23 @@
         self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
         setup_sunyaev_zeldovich_fields(self.ds)
 
-        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
-        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
-        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
-        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
-        omega1  = omega1/(Te*Te) - 1.
+        dens = off_axis_projection(self.ds, ctr, L, w, res, "density",
+                                   north_vector=north_vector, no_ghost=no_ghost)
+        Te = off_axis_projection(self.ds, ctr, L, w, res, "t_sz",
+                                 north_vector=north_vector, no_ghost=no_ghost)/dens
+        bpar = off_axis_projection(self.ds, ctr, L, w, res, "beta_par",
+                                   north_vector=north_vector, no_ghost=no_ghost)/dens
+        omega1 = off_axis_projection(self.ds, ctr, L, w, res, "t_squared",
+                                     north_vector=north_vector, no_ghost=no_ghost)/dens
+        omega1 = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
-            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
-            sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
+            bperp2 = off_axis_projection(self.ds, ctr, L, w, res, "beta_perp_squared", 
+                                         north_vector=north_vector, no_ghost=no_ghost)/dens
+            sigma1 = off_axis_projection(self.ds, ctr, L, w, res, "t_beta_par", 
+                                         north_vector=north_vector, no_ghost=no_ghost)/dens
+            sigma1 = sigma1/Te - bpar
+            kappa1 = off_axis_projection(self.ds, ctr, L, w, res, "beta_par_squared", 
+                                         north_vector=north_vector, no_ghost=no_ghost)/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))
@@ -238,9 +295,9 @@
             kappa1 = np.zeros((nx,nx))
         tau = sigma_thompson*dens*self.mueinv/mh
 
-        self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
-        self.dx = w/nx
-        self.dy = w/nx
+        self.bounds = (-0.5*wd[0], 0.5*wd[0], -0.5*wd[1], 0.5*wd[1])
+        self.dx = wd[0]/nx
+        self.dy = wd[1]/nx
         self.nx = nx
 
         self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
@@ -332,7 +389,7 @@
         if sky_scale is not None and sky_center is not None:
             fib.create_sky_wcs(sky_center, sky_scale)
         fib.writeto(filename, clobber=clobber)
-        
+
     @parallel_root_only
     def write_png(self, filename_prefix, cmap_name="algae",
                   axes_units="kpc", log_fields=None):
@@ -362,13 +419,13 @@
             crossover = False
             if vmin < 0 and vmax < 0:
                 data *= -1
-                negative = True                                        
+                negative = True
             if field in log_fields:
                 log_field = log_fields[field]
             else:
                 log_field = True
             if log_field:
-                formatter = matplotlib.ticker.LogFormatterMathtext()        
+                formatter = matplotlib.ticker.LogFormatterMathtext()
                 norm = matplotlib.colors.LogNorm()
                 if vmin < 0 and vmax > 0:
                     crossover = True
@@ -385,13 +442,13 @@
                 cbar_label += r'$\ \ ('+units+r')$'
             fig = plt.figure(figsize=(10.0,8.0))
             ax = fig.add_subplot(111)
-            cax = ax.imshow(data.ndarray_view(), norm=norm, extent=extent, cmap=cmap_name, origin="lower")
+            cax = ax.imshow(data.d, norm=norm, extent=extent, cmap=cmap_name, origin="lower")
             for label in ax.get_xticklabels():
                 label.set_fontproperties(ticks_font)
             for label in ax.get_yticklabels():
-                label.set_fontproperties(ticks_font)                      
-            ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % (axes_units), fontsize=16)
-            ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % (axes_units), fontsize=16)
+                label.set_fontproperties(ticks_font)
+            ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % axes_units, fontsize=16)
+            ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % axes_units, fontsize=16)
             cbar = fig.colorbar(cax, format=formatter)
             cbar.ax.set_ylabel(cbar_label, fontsize=16)
             if negative:
@@ -404,10 +461,10 @@
                                             np.ceil(np.log10(vmax))+1))
                 cbar.set_ticks(yticks)
             for label in cbar.ax.get_yticklabels():
-                label.set_fontproperties(ticks_font)                 
+                label.set_fontproperties(ticks_font)
             fig.tight_layout()
             plt.savefig(filename)
-            
+
     @parallel_root_only
     def write_hdf5(self, filename):
         r"""Export the set of S-Z fields to a set of HDF5 datasets.
@@ -421,11 +478,8 @@
         --------
         >>> szprj.write_hdf5("SZsloshing.h5")
         """
-        import h5py
-        f = h5py.File(filename, "w")
         for field, data in self.items():
-            f.create_dataset(field,data=data.ndarray_view())
-        f.close()
+            data.write_hdf5(filename, dataset_name=field)
 
     def keys(self):
         return self.data.keys()

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,7 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import itertools
+import os
 import types
 import uuid
 from yt.extern.six import string_types
@@ -25,9 +27,12 @@
 import shelve
 from contextlib import contextmanager
 
+from yt.funcs import get_output_filename
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -98,6 +103,8 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _tds_attrs = ()
+    _tds_fields = ()
     _field_cache = None
     _index = None
 
@@ -463,6 +470,117 @@
         df = pd.DataFrame(data)
         return df
 
+    def save_as_dataset(self, filename=None, fields=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a data object and output a dataset 
+        containing either the fields presently existing or fields 
+        given in the ``fields`` list.  The resulting dataset can be
+        reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, optional
+            If this is supplied, it is the list of fields to be saved to
+            disk.  If not supplied, all the fields that have been queried
+            will be saved.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> sp = ds.sphere(ds.domain_center, (10, "Mpc"))
+        >>> fn = sp.save_as_dataset(fields=["density", "temperature"])
+        >>> sphere_ds = yt.load(fn)
+        >>> # the original data container is available as the data attribute
+        >>> print (sds.data["density"])
+        [  4.46237613e-32   4.86830178e-32   4.46335118e-32 ...,   6.43956165e-30
+           3.57339907e-30   2.83150720e-30] g/cm**3
+        >>> ad = sphere_ds.all_data()
+        >>> print (ad["temperature"])
+        [  1.00000000e+00   1.00000000e+00   1.00000000e+00 ...,   4.40108359e+04
+           4.54380547e+04   4.72560117e+04] K
+
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        data = {}
+        if fields is not None:
+            for f in self._determine_fields(fields):
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        # get the extra fields needed to reconstruct the container
+        tds_fields = tuple(self._determine_fields(list(self._tds_fields)))
+        for f in [f for f in self._container_fields + tds_fields \
+                  if f not in data]:
+            data[f] = self[f]
+        data_fields = list(data.keys())
+
+        need_grid_positions = False
+        need_particle_positions = False
+        ptypes = []
+        ftypes = {}
+        for field in data_fields:
+            if field in self._container_fields:
+                ftypes[field] = "grid"
+                need_grid_positions = True
+            elif self.ds.field_info[field].particle_type:
+                if field[0] not in ptypes:
+                    ptypes.append(field[0])
+                ftypes[field] = field[0]
+                need_particle_positions = True
+            else:
+                ftypes[field] = "grid"
+                need_grid_positions = True
+        # projections and slices use px and py, so don't need positions
+        if self._type_name in ["cutting", "proj", "slice"]:
+            need_grid_positions = False
+
+        if need_particle_positions:
+            for ax in "xyz":
+                for ptype in ptypes:
+                    p_field = (ptype, "particle_position_%s" % ax)
+                    if p_field in self.ds.field_info and p_field not in data:
+                        data_fields.append(field)
+                        ftypes[p_field] = p_field[0]
+                        data[p_field] = self[p_field]
+        if need_grid_positions:
+            for ax in "xyz":
+                g_field = ("index", ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+                g_field = ("index", "d" + ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self._con_args + self._tds_attrs])
+        extra_attrs["con_args"] = self._con_args
+        extra_attrs["data_type"] = "yt_data_container"
+        extra_attrs["container_type"] = self._type_name
+        extra_attrs["dimensionality"] = self._dimensionality
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
+        
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -256,7 +256,8 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", ax] *
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
             vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
@@ -657,7 +658,7 @@
         m = data.ds.quan(0., "g")
         if use_gas:
             e += (data["gas", "kinetic_energy"] *
-                  data["index", "cell_volume"]).sum(dtype=np.float64)
+                  data["gas", "cell_volume"]).sum(dtype=np.float64)
             j += data["gas", "angular_momentum_magnitude"].sum(dtype=np.float64)
             m += data["gas", "cell_mass"].sum(dtype=np.float64)
         if use_particles:

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -165,6 +165,10 @@
             `particle_deposit` namespace as `methodname_deposit`.  Current
             methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
             `weighted_mean`, `mesh_id`, and `nearest`.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -228,6 +232,10 @@
             we are able to find and identify all relevant particles.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -313,6 +321,10 @@
             `particle_smooth` namespace as `methodname_smooth`.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -16,8 +16,10 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
+from yt.funcs import get_output_filename
 from yt.funcs import *
-
 from yt.units.yt_array import uconcatenate, array_like_field
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
@@ -949,6 +951,112 @@
         else:
             return np.linspace(mi, ma, n+1)
 
+    def save_as_dataset(self, filename=None):
+        r"""Export a profile to a reloadable yt dataset.
+
+        This function will take a profile and output a dataset
+        containing all relevant fields.  The resulting dataset
+        can be reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset plus
+            the type of object, e.g., Profile1D.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> ad = ds.all_data()
+        >>> profile = yt.create_profile(ad, ["density", "temperature"],
+        ...                            "cell_mass", weight_field=None,
+        ...                             n_bins=(128, 128))
+        >>> fn = profile.save_as_dataset()
+        >>> prof_ds = yt.load(fn)
+        >>> print (prof_ds.data["cell_mass"])
+        (128, 128)
+        >>> print (prof_ds.data["x"].shape) # x bins as 1D array
+        (128,)
+        >>> print (prof_ds.data["density"]) # x bins as 2D array
+        (128, 128)
+        >>> p = yt.PhasePlot(prof_ds.data, "density", "temperature",
+        ...                  "cell_mass", weight_field=None)
+        >>> p.save()
+
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self.__class__.__name__)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        args = ("field", "log")
+        extra_attrs = {"data_type": "yt_profile",
+                       "profile_dimensions": self.size,
+                       "weight_field": self.weight_field,
+                       "fractional": self.fractional}
+        data = {}
+        data.update(self.field_data)
+        data["weight"] = self.weight
+        data["used"] = self.used.astype("float64")
+
+        dimensionality = 0
+        bin_data = []
+        for ax in "xyz":
+            if hasattr(self, ax):
+                dimensionality += 1
+                data[ax] = getattr(self, ax)
+                bin_data.append(data[ax])
+                bin_field_name = "%s_bins" % ax
+                data[bin_field_name] = getattr(self, bin_field_name)
+                extra_attrs["%s_range" % ax] = self.ds.arr([data[bin_field_name][0],
+                                                            data[bin_field_name][-1]])
+                for arg in args:
+                    key = "%s_%s" % (ax, arg)
+                    extra_attrs[key] = getattr(self, key)
+
+        bin_fields = np.meshgrid(*bin_data)
+        for i, ax in enumerate("xyz"[:dimensionality]):
+            data[getattr(self, "%s_field" % ax)] = bin_fields[i]
+
+        extra_attrs["dimensionality"] = dimensionality
+        ftypes = dict([(field, "data") for field in data])
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
+
+class ProfileNDFromDataset(ProfileND):
+    """
+    An ND profile object loaded from a ytdata dataset.
+    """
+    def __init__(self, ds):
+        ProfileND.__init__(self, ds.data, ds.parameters["weight_field"])
+        self.fractional = ds.parameters["fractional"]
+        exclude_fields = ["used", "weight"]
+        for ax in "xyz"[:ds.dimensionality]:
+            setattr(self, ax, ds.data[ax])
+            setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
+            setattr(self, "%s_field" % ax,
+                    tuple(ds.parameters["%s_field" % ax]))
+            setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
+            exclude_fields.extend([ax, "%s_bins" % ax,
+                                   ds.parameters["%s_field" % ax][1]])
+        self.weight = ds.data["weight"]
+        self.used = ds.data["used"].d.astype(bool)
+        profile_fields = [f for f in ds.field_list
+                          if f[1] not in exclude_fields]
+        for field in profile_fields:
+            self.field_map[field[1]] = field
+            self.field_data[field] = ds.data[field]
+            self.field_units[field] = ds.data[field].units
+
 class Profile1D(ProfileND):
     """An object that represents a 1D profile.
 
@@ -1011,6 +1119,14 @@
     def bounds(self):
         return ((self.x_bins[0], self.x_bins[-1]),)
 
+class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
+    """
+    A 1D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
+
 class Profile2D(ProfileND):
     """An object that represents a 2D profile.
 
@@ -1108,6 +1224,13 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
+class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 class ParticleProfile(Profile2D):
     """An object that represents a *deposited* 2D profile. This is like a
@@ -1354,6 +1477,13 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
+class Profile3DFromDataset(ProfileNDFromDataset, Profile3D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 def sanitize_field_tuple_keys(input_dict, data_source):
     if input_dict is not None:
@@ -1429,8 +1559,8 @@
     >>> profile = create_profile(ad, [("gas", "density")],
     ...                              [("gas", "temperature"),
     ...                               ("gas", "velocity_x")])
-    >>> print profile.x
-    >>> print profile["gas", "temperature"]
+    >>> print (profile.x)
+    >>> print (profile["gas", "temperature"])
 
     """
     bin_fields = data_source._determine_fields(bin_fields)

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -347,6 +347,8 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']
     _type_name = "cutting"
     _con_args = ('normal', 'center')
+    _tds_attrs = ("_inv_mat",)
+    _tds_fields = ("x", "y", "z", "dx")
     _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
     def __init__(self, normal, center, north_vector=None,
                  ds=None, field_parameters=None, data_source=None):

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1,7 +1,7 @@
 """
-Generalized Enzo output objects, both static and time-series.
+Dataset and related data structures.
 
-Presumably at some point EnzoRun will be absorbed into here.
+
 
 
 """
@@ -37,6 +37,8 @@
     FieldInfoContainer, NullFunc
 from yt.fields.fluid_fields import \
     setup_gradient_fields
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
 from yt.data_objects.particle_filters import \
     filter_registry
 from yt.data_objects.particle_unions import \
@@ -371,6 +373,7 @@
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:
             self.field_info.setup_particle_fields(ptype)
+        self.field_info.setup_fluid_index_fields()
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))
@@ -570,6 +573,7 @@
     def _add_object_class(self, name, base):
         self.object_types.append(name)
         obj = functools.partial(base, ds=weakref.proxy(self))
+        obj.__doc__ = base.__doc__
         setattr(self, name, obj)
 
     def find_max(self, field):
@@ -910,7 +914,7 @@
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)
 
-    def add_deposited_particle_field(self, deposit_field, method):
+    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic'):
         """Add a new deposited particle field
 
         Creates a new deposited field based on the particle *deposit_field*.
@@ -922,8 +926,16 @@
            The field name tuple of the particle field the deposited field will
            be created from.  This must be a field name tuple so yt can
            appropriately infer the correct particle type.
-        method : one of 'count', 'sum', or 'cic'
-           The particle deposition method to use.
+        method : string
+           This is the "method name" which will be looked up in the
+           `particle_deposit` namespace as `methodname_deposit`.  Current
+           methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
+           `weighted_mean`, `mesh_id`, and `nearest`.
+        kernel_name : string, default 'cubic'
+           This is the name of the smoothing kernel to use. It is only used for
+           the `simple_smooth` method and is otherwise ignored. Current
+           supported kernel names include `cubic`, `quartic`, `quintic`,
+           `wendland2`, `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -947,15 +959,17 @@
             if method != 'count':
                 pden = data[ptype, "particle_mass"]
                 top = data.deposit(pos, [data[(ptype, deposit_field)]*pden],
-                                   method=method)
-                bottom = data.deposit(pos, [pden], method=method)
+                                   method=method, kernel_name=kernel_name)
+                bottom = data.deposit(pos, [pden], method=method,
+                                      kernel_name=kernel_name)
                 top[bottom == 0] = 0.0
                 bnz = bottom.nonzero()
                 top[bnz] /= bottom[bnz]
                 d = data.ds.arr(top, input_units=units)
             else:
                 d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field]],
-                                             method=method))
+                                             method=method,
+                                             kernel_name=kernel_name))
             return d
         name_map = {"cic": "cic", "sum": "nn", "count": "count"}
         field_name = "%s_" + name_map[method] + "_%s"
@@ -968,6 +982,56 @@
             validators=[ValidateSpatial()])
         return ("deposit", field_name)
 
+    def add_smoothed_particle_field(self, smooth_field, method="volume_weighted",
+                                    nneighbors=64, kernel_name="cubic"):
+        """Add a new smoothed particle field
+
+        Creates a new smoothed field based on the particle *smooth_field*.
+
+        Parameters
+        ----------
+
+        smooth_field : tuple
+           The field name tuple of the particle field the smoothed field will
+           be created from.  This must be a field name tuple so yt can
+           appropriately infer the correct particle type.
+        method : string, default 'volume_weighted'
+           The particle smoothing method to use. Can only be 'volume_weighted'
+           for now.
+        nneighbors : int, default 64
+            The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
+
+        Returns
+        -------
+
+        The field name tuple for the newly created field.
+        """
+        self.index
+        if isinstance(smooth_field, tuple):
+            ptype, smooth_field = smooth_field[0], smooth_field[1]
+        else:
+            raise RuntimeError("smooth_field must be a tuple, received %s" %
+                               smooth_field)
+        if method != "volume_weighted":
+            raise NotImplementedError("method must be 'volume_weighted'")
+
+        coord_name = "particle_position"
+        mass_name = "particle_mass"
+        smoothing_length_name = "smoothing_length"
+        if (ptype, smoothing_length_name) not in self.derived_field_list:
+            raise ValueError("%s not in derived_field_list" %
+                             ((ptype, smoothing_length_name),))
+        density_name = "density"
+        registry = self.field_info
+
+        return add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
+                   smoothing_length_name, density_name, smooth_field, registry,
+                   nneighbors=nneighbors, kernel_name=kernel_name)[0]
+
     def add_gradient_fields(self, input_field):
         """Add gradient fields.
 

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -63,6 +63,20 @@
     def setup_fluid_fields(self):
         pass
 
+    def setup_fluid_index_fields(self):
+        # Now we get all our index types and set up aliases to them
+        if self.ds is None: return
+        index_fields = set([f for _, f in self if _ == "index"])
+        for ftype in self.ds.fluid_types:
+            if ftype in ("index", "deposit"): continue
+            for f in index_fields:
+                if (ftype, f) in self: continue
+                self.alias((ftype, f), ("index", f))
+                # Different field types have different default units.
+                # We want to make sure the aliased field will have
+                # the same units as the "index" field.
+                self[(ftype, f)].units = self["index", f].units
+
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         skip_output_units = ("code_length",)
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -52,7 +52,7 @@
     create_vector_fields(registry, "velocity", "cm / s", ftype, slice_info)
 
     def _cell_mass(field, data):
-        return data[ftype, "density"] * data["index", "cell_volume"]
+        return data[ftype, "density"] * data[ftype, "cell_volume"]
 
     registry.add_field((ftype, "cell_mass"),
         function=_cell_mass,
@@ -89,11 +89,11 @@
             units = "")
 
     def _courant_time_step(field, data):
-        t1 = data["index", "dx"] / (data[ftype, "sound_speed"]
+        t1 = data[ftype, "dx"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_x"]))
-        t2 = data["index", "dy"] / (data[ftype, "sound_speed"]
+        t2 = data[ftype, "dy"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_y"]))
-        t3 = data["index", "dz"] / (data[ftype, "sound_speed"]
+        t3 = data[ftype, "dz"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_z"]))
         tr = np.minimum(np.minimum(t1, t2), t3)
         return tr
@@ -140,7 +140,7 @@
              units="Zsun")
 
     def _metal_mass(field, data):
-        return data[ftype, "metal_density"] * data["index", "cell_volume"]
+        return data[ftype, "metal_density"] * data[ftype, "cell_volume"]
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
                        units="g")
@@ -188,7 +188,7 @@
         slice_3dl[axi] = sl_left
         slice_3dr[axi] = sl_right
         def func(field, data):
-            ds = div_fac * data["index", "d%s" % ax]
+            ds = div_fac * data[ftype, "d%s" % ax]
             f  = data[grad_field][slice_3dr]/ds[slice_3d]
             f -= data[grad_field][slice_3dl]/ds[slice_3d]
             new_field = data.ds.arr(np.zeros_like(data[grad_field], dtype=np.float64),

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -826,34 +826,6 @@
                        units = "code_length")
     return [field_name]
 
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
-                       kernel_name = 'cubic'):
-    if kernel_name == 'cubic':
-        field_name = (ptype, "smoothed_density")
-    else:
-        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-
-    def _nth_neighbor(field, data):
-        pos = data[ptype, coord_name]
-        pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name]
-        mass.convert_to_units("g")
-        densities = mass * 0.0
-        data.particle_operation(pos, [mass, densities],
-                         method="density",
-                         nneighbors = nneighbors,
-                         kernel_name = kernel_name)
-        ones = pos.prod(axis=1) # Get us in code_length**3
-        ones[:] = 1.0
-        densities /= ones
-        # Now some quick unit conversions.
-        return densities
-    registry.add_field(field_name, function = _nth_neighbor,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = True,
-                       units = "g/cm**3")
-    return [field_name]
-
 def add_union_field(registry, ptype, field_name, units):
     """
     Create a field that is the concatenation of multiple particle types.

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -1,11 +1,15 @@
 import numpy as np
 
+from yt import \
+    load
 from yt.testing import \
     fake_random_ds, \
+    assert_almost_equal, \
     assert_equal, \
     assert_array_almost_equal_nulp, \
     assert_array_equal, \
-    assert_raises
+    assert_raises, \
+    requires_file
 from yt.utilities.cosmology import \
     Cosmology
 from yt.frontends.stream.fields import \
@@ -188,6 +192,15 @@
     ret = ad[fn]
     assert_equal(ret.sum(), ad['particle_ones'].sum())
 
+ at requires_file('GadgetDiskGalaxy/snapshot_200.hdf5')
+def test_add_smoothed_particle_field():
+    ds = load('GadgetDiskGalaxy/snapshot_200.hdf5')
+    fn = ds.add_smoothed_particle_field(('PartType0', 'particle_ones'))
+    assert_equal(fn, ('deposit', 'PartType0_smoothed_particle_ones'))
+    ad = ds.all_data()
+    ret = ad[fn]
+    assert_almost_equal(ret.sum(), 3824750.912653606)
+
 def test_add_gradient_fields():
     gfields = base_ds.add_gradient_fields(("gas","density"))
     gfields += base_ds.add_gradient_fields(("index", "ones"))

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,6 +39,7 @@
     'sdf',
     'stream',
     'tipsy',
+    'ytdata',
 ]
 
 class _frontend_container:

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -457,7 +457,7 @@
             units_override = {}
         # This is for backwards-compatibility
         already_warned = False
-        for k,v in self.specified_parameters.items():
+        for k, v in list(self.specified_parameters.items()):
             if k.endswith("_unit") and k not in units_override:
                 if not already_warned:
                     mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+
@@ -489,12 +489,15 @@
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
         mag_unit = getattr(self, "magnetic_unit", None)
+        vel_unit = getattr(self, "velocity_unit", None)
         if mag_unit is None:
             self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                          (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
-
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        if vel_unit is None:
+            self.velocity_unit = self.length_unit/self.time_unit
+        self.unit_registry.modify("code_velocity", self.velocity_unit)
 
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e78c54e3253f/
Changeset:   e78c54e3253f
Branch:      yt
User:        jzuhone
Date:        2015-11-02 18:21:29+00:00
Summary:     Adding an emission_measure field
Affected #:  1 file

diff -r 955a88157afcf3646ef4f58607ac8b249bb85949 -r e78c54e3253fb01f66718b94543175fbd21d22a9 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -91,6 +91,20 @@
                        function=_chandra_emissivity,
                        units="") # add correct units here
 
+    def _emission_measure(field, data):
+        if data.has_field_parameter("X_H"):
+            X_H = data.get_field_parameter("X_H")
+        else:
+            X_H = 0.76
+        nenh = data["density"]*data["density"]
+        nenh /= mh*mh
+        nenh *= 0.5*(1.+X_H)*X_H*data["cell_volume"]
+        return nenh
+    
+    registry.add_field((ftype, "emission_measure"),
+                       function=_emission_measure,
+                       units="cm**-3")
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2


https://bitbucket.org/yt_analysis/yt/commits/3d05dc3b7a70/
Changeset:   3d05dc3b7a70
Branch:      yt
User:        jzuhone
Date:        2015-11-02 18:26:22+00:00
Summary:     Adding the ability to put in your own pseudo-random number generator, mainly for testing.
Affected #:  2 files

diff -r e78c54e3253fb01f66718b94543175fbd21d22a9 -r 3d05dc3b7a706fb1cb828337c2b121497e950cec yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -73,14 +73,20 @@
         "invert_cdf": Invert the cumulative distribution function of the spectrum.
         "accept_reject": Acceptance-rejection method using the spectrum. 
         The first method should be sufficient for most cases. 
+    prng : NumPy `RandomState` object or numpy.random                                                                            
+        A pseudo-random number generator. Typically will only be specified                                                            
+        if you have a reason to generate the same set of random numbers, such as for a 
+        test. Default is the numpy.random module.                                                                      
     """
     def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
-                 photons_per_chunk=10000000, method="invert_cdf"):
+                 photons_per_chunk=10000000, method="invert_cdf",
+                 prng=np.random):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
         self.method = method
+        self.prng = prng
 
     def __call__(self, data_source, parameters):
 
@@ -174,7 +180,7 @@
                 tot_ph_c = cspec.d.sum()
                 tot_ph_m = mspec.d.sum()
 
-                u = np.random.random(size=n_current)
+                u = self.prng.uniform(size=n_current)
 
                 cell_norm_c = tot_ph_c*cem
                 cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
@@ -209,7 +215,7 @@
                         cumspec += Z * cumspec_m
                         norm_factor = 1.0 / cumspec[-1]
                         cumspec *= norm_factor
-                        randvec = np.random.uniform(size=cn)
+                        randvec = self.prng.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
@@ -217,7 +223,7 @@
                         tot_spec += Z * mspec.d
                         norm_factor = 1.0 / tot_spec.sum()
                         tot_spec *= norm_factor
-                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                        eidxs = self.prng.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e
                     cell_counter += 1

diff -r e78c54e3253fb01f66718b94543175fbd21d22a9 -r 3d05dc3b7a706fb1cb828337c2b121497e950cec yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -449,7 +449,7 @@
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
                         convolve_energies=False, no_shifting=False,
-                        north_vector=None):
+                        north_vector=None, prng=np.random):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -489,6 +489,11 @@
             the plane of projection. If not set, an arbitrary grid-aligned north_vector 
             is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
             "z") is explicitly specified.
+        prng : NumPy `RandomState` object or numpy.random                                                                              
+            A pseudo-random number generator. Typically will only be specified if you
+            have a reason to generate the same set of random numbers, such as for a                                    
+            test. Default is the numpy.random module.                                                                            
+
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -612,14 +617,14 @@
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
         else:
-            idxs = np.random.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
+            idxs = prng.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
         if isinstance(normal, string_types):
 
-            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
             xsky *= delta
             ysky *= delta
             xsky += self.photons[axes_lookup[normal][0]][obs_cells]
@@ -629,9 +634,9 @@
                 vz = self.photons["v%s" % normal]
 
         else:
-            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            x = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
 
             if not no_shifting:
                 vz = self.photons["vx"]*z_hat[0] + \
@@ -664,7 +669,7 @@
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
-            randvec = aspec.max()*np.random.random(eobs.shape)
+            randvec = aspec.max()*prng.uniform(size=eobs.shape)
             not_abs = randvec < absorb
 
         if eff_area is None:
@@ -672,7 +677,7 @@
         else:
             mylog.info("Applying energy-dependent effective area.")
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
-            randvec = eff_area.max()*np.random.random(eobs.shape)
+            randvec = eff_area.max()*prng.uniform(size=eobs.shape)
             detected = randvec < earea
 
         detected = np.logical_and(not_abs, detected)
@@ -689,8 +694,8 @@
         events = comm.par_combine_object(events, datatype="dict", op="cat")
 
         if psf_sigma is not None:
-            events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
-            events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
+            events["xpix"] += prng.normal(sigma=psf_sigma/dtheta)
+            events["ypix"] += prng.normal(sigma=psf_sigma/dtheta)
 
         num_events = len(events["xpix"])
 
@@ -698,7 +703,8 @@
             mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
-            events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
+            events, info = self._convolve_with_rmf(parameters["RMF"], events, 
+                                                   mat_key, prng)
             for k, v in info.items():
                 parameters[k] = v
 
@@ -725,7 +731,7 @@
         rmf.close()
         return weights
 
-    def _convolve_with_rmf(self, respfile, events, mat_key):
+    def _convolve_with_rmf(self, respfile, events, mat_key, prng):
         """
         Convolve the events with a RMF file.
         """
@@ -780,7 +786,7 @@
             if len(trueChannel) > 0:
                 for q in range(fcurr,last):
                     if phEE[q] >= low and phEE[q] < high:
-                        channelInd = np.random.choice(len(weights), p=weights)
+                        channelInd = prng.choice(len(weights), p=weights)
                         fcurr += 1
                         detectedChannels.append(trueChannel[channelInd])
                     if phEE[q] >= high:
@@ -1349,6 +1355,8 @@
                 bins = 0.5*(ee[1:]+ee[:-1])
                 spectype = "energy"
             else:
+                mylog.info("Events haven't been convolved with an RMF, so assuming "
+                           "a perfect response and %d PI channels." % nchan)
                 bins = (np.arange(nchan)+1).astype("int32")
                 spectype = "pi"
 


https://bitbucket.org/yt_analysis/yt/commits/d3014a2994d5/
Changeset:   d3014a2994d5
Branch:      yt
User:        jzuhone
Date:        2015-11-02 18:38:35+00:00
Summary:     Updated answer testing
Affected #:  1 file

diff -r 3d05dc3b7a706fb1cb828337c2b121497e950cec -r d3014a2994d5eb1d2c08d9f03cafec72e246e225 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -18,6 +18,7 @@
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
 import numpy as np
+from numpy.random import RandomState
 
 def setup():
     from yt.config import ytcfg
@@ -26,46 +27,59 @@
 test_data_dir = ytcfg.get("yt", "test_data_dir")
 xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
+rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
+        "aciss_aimpt_cy17.rmf", "nustar.rmf",
+        "ah_sxs_5ev_basefilt_20100712.rmf"]
+arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
+        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
+        "sxt-s_120210_ts02um_intallpxl.arf"]
+
 gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
-APEC = xray_data_dir+"/atomdb_v2.0.2"
+APEC = xray_data_dir
 TBABS = xray_data_dir+"/tbabs_table.h5"
-ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
-RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
 
- at requires_ds(ETC)
+ at requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
+def test_sloshing():
 
-    np.random.seed(seed=0x4d3d3d3)
+    prng = RandomState(0x4d3d3d3)
 
     ds = data_dir_load(gslr)
     A = 2000.
     exp_time = 1.0e5
     redshift = 0.1
 
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
+    apec_model = TableApecModel(APEC, 0.1, 11.0, 10000)
     tbabs_model = TableAbsorbModel(TBABS, 0.1)
 
     sphere = ds.sphere("c", (0.1, "Mpc"))
 
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
     events = photons.project_photons("z", responses=[ARF,RMF],
                                      absorb_model=tbabs_model,
-                                     convolve_energies=True)
+                                     convolve_energies=True,
+                                     prng=prng)
+
+    test_sloshing.__name__ = test.description
 
     def photons_test():
         return photons.photons
 
-    def events_test():
-        return events.events
+    yield GenericArrayTest(ds, photons_test)
 
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test
+    for a, r in zip(arfs, rmfs):
+        arf = os.path.join(xray_data_dir,a)
+        rmf = os.path.join(xray_data_dir,r)
+        events = photons.project_photons("z", responses=[arf,rmf],
+                                         absorb_model=tbabs_model, 
+                                         convolve_energies=True)
+
+        def events_test():
+            return events.events
+
+        yield GenericArrayTest(ds, events_test)
+


https://bitbucket.org/yt_analysis/yt/commits/68fe8a08e35e/
Changeset:   68fe8a08e35e
Branch:      yt
User:        jzuhone
Date:        2015-11-02 22:04:32+00:00
Summary:     Updating tests
Affected #:  2 files

diff -r d3014a2994d5eb1d2c08d9f03cafec72e246e225 -r 68fe8a08e35eb54a496260743c035931ca17e93a yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -1,5 +1,5 @@
 """
-Answer test the photon_simulator analysis module.
+A unit test for the photon_simulator analysis module.
 """
 
 #-----------------------------------------------------------------------------
@@ -16,13 +16,18 @@
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import \
     requires_module
+from yt.testing import requires_file
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams
+from yt.utilities.physical_constants import clight
 from yt.frontends.stream.api import load_uniform_grid
 import os
 import tempfile
 import shutil
+from numpy.random import RandomState
+
+ckms = clight.in_units("km/s").v
 
 def setup():
     from yt.config import ytcfg
@@ -30,14 +35,12 @@
 
 xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
-rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
-        "aciss_aimpt_cy17.rmf", "nustar.rmf",
-        "ah_sxs_5ev_basefilt_20100712.rmf"]
-arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
-        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
-        "sxt-s_120210_ts02um_intallpxl.arf"]
+arf = xray_data_dir+"/sxt-s_120210_ts02um_intallpxl.arf"
+rmf = xray_data_dir+"/ah_sxs_5ev_basefilt_20100712.rmf"
 
 @requires_module("xspec")
+ at requires_file(arf)
+ at requires_file(rmf)
 def test_beta_model():
     import xspec
     
@@ -48,6 +51,8 @@
     xspec.Fit.delta = 0.01
     xspec.Xset.chatter = 5
 
+    my_prng = RandomState(24)
+
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
     os.chdir(tmpdir)
@@ -56,8 +61,9 @@
     r_c = 0.05
     rho_c = 0.04*mass_hydrogen_grams
     beta = 1.
-    T = 6.0
+    kT_sim = 6.0
     v_shift = 4.0e7
+    v_width = 4.0e7
     nx = 128
 
     ddims = (nx,nx,nx)
@@ -71,15 +77,16 @@
     dens = np.zeros(ddims)
     dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
     dens[r > R] = 0.0
-    temp = T*K_per_keV*np.ones(ddims)
+    temp = kT_sim*K_per_keV*np.ones(ddims)
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    velz = my_prng.normal(loc=v_shift,scale=v_width,size=ddims)
 
     data = {}
     data["density"] = (dens, "g/cm**3")
     data["temperature"] = (temp, "K")
     data["velocity_x"] = (np.zeros(ddims), "cm/s")
     data["velocity_y"] = (np.zeros(ddims), "cm/s")
-    data["velocity_z"] = (v_shift*np.ones(ddims), "cm/s")
+    data["velocity_z"] = (velz, "cm/s")
 
     ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
                            nprocs=64, bbox=bbox)
@@ -87,49 +94,75 @@
     A = 3000.
     exp_time = 1.0e5
     redshift = 0.05
+    nH_sim = 0.02
 
     apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000,
                                    thermal_broad=True)
-    abs_model = XSpecAbsorbModel("TBabs", 0.02)
+    abs_model = XSpecAbsorbModel("TBabs", nH_sim)
 
     sphere = ds.sphere("c", (0.5, "Mpc"))
 
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    mu_sim = -v_shift / 1.0e5
+    sigma_sim = v_width / 1.0e5
+
+    Z_sim = 0.3
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=Z_sim, X_H=0.76,
+                                       prng=my_prng)
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    for a, r in zip(rmfs, arfs):
-        arf = xray_data_dir+a
-        rmf = xray_data_dir+r
-        events = photons.project_photons([1.0,-1.0,0.5], responses=[arf,rmf],
-                                         absorb_model=abs_model)
-        events.write_spectrum("beta_model_evt.pi", clobber=True)
+    D_A = photons.parameters["FiducialAngularDiameterDistance"]
 
-        s = xspec.Spectrum("beta_model_evt.pi")
-        s.ignore("**-0.5")
-        s.ignore("7.0-**")
-        m = xspec.Model("tbabs*bapec")
-        m.bapec.kT = 5.0
-        m.bapec.Abundanc = 0.25
-        m.bapec.norm = 1.0
-        m.bapec.Redshift = 0.05
-        m.bapec.Velocity = 100.0
-        m.TBabs.nH = 0.015
+    norm_sim = sphere.quantities.total_quantity("emission_measure")
+    norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
+    norm_sim = float(norm_sim.in_cgs())
 
-        m.bapec.Velocity.frozen = False
-        m.bapec.Abundanc.frozen = False
-        m.bapec.Redshift.frozen = False
-        m.TBabs.nH.frozen = False
+    events = photons.project_photons("z", responses=[arf,rmf],
+                                     absorb_model=abs_model, 
+                                     convolve_energies=True, prng=my_prng)
+    events.write_spectrum("beta_model_evt.pi", clobber=True)
 
-        xspec.Fit.renorm()
-        xspec.Fit.nIterations = 100
-        xspec.Fit.perform()
+    s = xspec.Spectrum("beta_model_evt.pi")
+    s.ignore("**-0.5")
+    s.ignore("9.0-**")
 
-        assert np.abs(m.bapec.Redshift.values[0]-v_shift) < 1.0
-        assert np.abs(m.bapec.kT.values[0]-6.0) < 1.0
-        assert np.abs(m.bapec.Abundanc.values[0]-0.3) < 1.0
-        assert np.abs(m.bapec.Velocity.values[0]-0.0) < 1.0
-        assert np.abs(m.TBabs.nH.values[0]-0.02) < 1.0
+    m = xspec.Model("tbabs*bapec")
+    m.bapec.kT = 5.5
+    m.bapec.Abundanc = 0.25
+    m.bapec.norm = 1.0
+    m.bapec.Redshift = 0.05
+    m.bapec.Velocity = 300.0
+    m.TBabs.nH = 0.02
 
+    m.bapec.Velocity.frozen = False
+    m.bapec.Abundanc.frozen = False
+    m.bapec.Redshift.frozen = False
+    m.TBabs.nH.frozen = True
+
+    xspec.Fit.renorm()
+    xspec.Fit.nIterations = 100
+    xspec.Fit.perform()
+    
+    kT  = m.bapec.kT.values[0]
+    mu = (m.bapec.Redshift.values[0]-redshift)*ckms
+    Z = m.bapec.Abundanc.values[0]
+    sigma = m.bapec.Velocity.values[0]
+    norm = m.bapec.norm.values[0]
+
+    dkT = m.bapec.kT.sigma
+    dmu = m.bapec.Redshift.sigma*ckms
+    dZ = m.bapec.Abundanc.sigma
+    dsigma = m.bapec.Velocity.sigma
+    dnorm = m.bapec.norm.sigma
+
+    print kT, kT_sim, dkT
+
+    assert np.abs(mu-mu_sim) < dmu
+    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(Z-Z_sim) < dZ
+    assert np.abs(sigma-sigma_sim) < dsigma
+    assert np.abs(norm-norm_sim) < dnorm
+        
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r d3014a2994d5eb1d2c08d9f03cafec72e246e225 -r 68fe8a08e35eb54a496260743c035931ca17e93a yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -59,7 +59,7 @@
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    events = photons.project_photons("z", responses=[ARF,RMF],
+    events = photons.project_photons([1.0,-0.5,0.2], responses=[ARF,RMF],
                                      absorb_model=tbabs_model,
                                      convolve_energies=True,
                                      prng=prng)


https://bitbucket.org/yt_analysis/yt/commits/7cc7df10f345/
Changeset:   7cc7df10f345
Branch:      yt
User:        jzuhone
Date:        2015-11-02 22:11:36+00:00
Summary:     Merging
Affected #:  5 files

diff -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -73,14 +73,20 @@
         "invert_cdf": Invert the cumulative distribution function of the spectrum.
         "accept_reject": Acceptance-rejection method using the spectrum. 
         The first method should be sufficient for most cases. 
+    prng : NumPy `RandomState` object or numpy.random                                                                            
+        A pseudo-random number generator. Typically will only be specified                                                            
+        if you have a reason to generate the same set of random numbers, such as for a 
+        test. Default is the numpy.random module.                                                                      
     """
     def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
-                 photons_per_chunk=10000000, method="invert_cdf"):
+                 photons_per_chunk=10000000, method="invert_cdf",
+                 prng=np.random):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
         self.method = method
+        self.prng = prng
 
     def __call__(self, data_source, parameters):
 
@@ -174,7 +180,7 @@
                 tot_ph_c = cspec.d.sum()
                 tot_ph_m = mspec.d.sum()
 
-                u = np.random.random(size=n_current)
+                u = self.prng.uniform(size=n_current)
 
                 cell_norm_c = tot_ph_c*cem
                 cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
@@ -209,7 +215,7 @@
                         cumspec += Z * cumspec_m
                         norm_factor = 1.0 / cumspec[-1]
                         cumspec *= norm_factor
-                        randvec = np.random.uniform(size=cn)
+                        randvec = self.prng.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
@@ -217,7 +223,7 @@
                         tot_spec += Z * mspec.d
                         norm_factor = 1.0 / tot_spec.sum()
                         tot_spec *= norm_factor
-                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                        eidxs = self.prng.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e
                     cell_counter += 1

diff -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -449,7 +449,7 @@
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
                         convolve_energies=False, no_shifting=False,
-                        north_vector=None):
+                        north_vector=None, prng=np.random):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -489,6 +489,11 @@
             the plane of projection. If not set, an arbitrary grid-aligned north_vector 
             is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
             "z") is explicitly specified.
+        prng : NumPy `RandomState` object or numpy.random                                                                              
+            A pseudo-random number generator. Typically will only be specified if you
+            have a reason to generate the same set of random numbers, such as for a                                    
+            test. Default is the numpy.random module.                                                                            
+
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -612,14 +617,14 @@
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
         else:
-            idxs = np.random.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
+            idxs = prng.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
         if isinstance(normal, string_types):
 
-            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
             xsky *= delta
             ysky *= delta
             xsky += self.photons[axes_lookup[normal][0]][obs_cells]
@@ -629,9 +634,9 @@
                 vz = self.photons["v%s" % normal]
 
         else:
-            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            x = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
 
             if not no_shifting:
                 vz = self.photons["vx"]*z_hat[0] + \
@@ -664,7 +669,7 @@
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
-            randvec = aspec.max()*np.random.random(eobs.shape)
+            randvec = aspec.max()*prng.uniform(size=eobs.shape)
             not_abs = randvec < absorb
 
         if eff_area is None:
@@ -672,7 +677,7 @@
         else:
             mylog.info("Applying energy-dependent effective area.")
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
-            randvec = eff_area.max()*np.random.random(eobs.shape)
+            randvec = eff_area.max()*prng.uniform(size=eobs.shape)
             detected = randvec < earea
 
         detected = np.logical_and(not_abs, detected)
@@ -689,8 +694,8 @@
         events = comm.par_combine_object(events, datatype="dict", op="cat")
 
         if psf_sigma is not None:
-            events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
-            events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
+            events["xpix"] += prng.normal(sigma=psf_sigma/dtheta)
+            events["ypix"] += prng.normal(sigma=psf_sigma/dtheta)
 
         num_events = len(events["xpix"])
 
@@ -698,7 +703,8 @@
             mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
-            events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
+            events, info = self._convolve_with_rmf(parameters["RMF"], events, 
+                                                   mat_key, prng)
             for k, v in info.items():
                 parameters[k] = v
 
@@ -725,7 +731,7 @@
         rmf.close()
         return weights
 
-    def _convolve_with_rmf(self, respfile, events, mat_key):
+    def _convolve_with_rmf(self, respfile, events, mat_key, prng):
         """
         Convolve the events with a RMF file.
         """
@@ -780,7 +786,7 @@
             if len(trueChannel) > 0:
                 for q in range(fcurr,last):
                     if phEE[q] >= low and phEE[q] < high:
-                        channelInd = np.random.choice(len(weights), p=weights)
+                        channelInd = prng.choice(len(weights), p=weights)
                         fcurr += 1
                         detectedChannels.append(trueChannel[channelInd])
                     if phEE[q] >= high:
@@ -1349,6 +1355,8 @@
                 bins = 0.5*(ee[1:]+ee[:-1])
                 spectype = "energy"
             else:
+                mylog.info("Events haven't been convolved with an RMF, so assuming "
+                           "a perfect response and %d PI channels." % nchan)
                 bins = (np.arange(nchan)+1).astype("int32")
                 spectype = "pi"
 

diff -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -1,5 +1,5 @@
 """
-Answer test the photon_simulator analysis module.
+A unit test for the photon_simulator analysis module.
 """
 
 #-----------------------------------------------------------------------------
@@ -16,13 +16,18 @@
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import \
     requires_module
+from yt.testing import requires_file
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams
+from yt.utilities.physical_constants import clight
 from yt.frontends.stream.api import load_uniform_grid
 import os
 import tempfile
 import shutil
+from numpy.random import RandomState
+
+ckms = clight.in_units("km/s").v
 
 def setup():
     from yt.config import ytcfg
@@ -30,14 +35,12 @@
 
 xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
-rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
-        "aciss_aimpt_cy17.rmf", "nustar.rmf",
-        "ah_sxs_5ev_basefilt_20100712.rmf"]
-arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
-        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
-        "sxt-s_120210_ts02um_intallpxl.arf"]
+arf = xray_data_dir+"/sxt-s_120210_ts02um_intallpxl.arf"
+rmf = xray_data_dir+"/ah_sxs_5ev_basefilt_20100712.rmf"
 
 @requires_module("xspec")
+ at requires_file(arf)
+ at requires_file(rmf)
 def test_beta_model():
     import xspec
     
@@ -48,6 +51,8 @@
     xspec.Fit.delta = 0.01
     xspec.Xset.chatter = 5
 
+    my_prng = RandomState(24)
+
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
     os.chdir(tmpdir)
@@ -56,8 +61,9 @@
     r_c = 0.05
     rho_c = 0.04*mass_hydrogen_grams
     beta = 1.
-    T = 6.0
+    kT_sim = 6.0
     v_shift = 4.0e7
+    v_width = 4.0e7
     nx = 128
 
     ddims = (nx,nx,nx)
@@ -71,15 +77,16 @@
     dens = np.zeros(ddims)
     dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
     dens[r > R] = 0.0
-    temp = T*K_per_keV*np.ones(ddims)
+    temp = kT_sim*K_per_keV*np.ones(ddims)
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    velz = my_prng.normal(loc=v_shift,scale=v_width,size=ddims)
 
     data = {}
     data["density"] = (dens, "g/cm**3")
     data["temperature"] = (temp, "K")
     data["velocity_x"] = (np.zeros(ddims), "cm/s")
     data["velocity_y"] = (np.zeros(ddims), "cm/s")
-    data["velocity_z"] = (v_shift*np.ones(ddims), "cm/s")
+    data["velocity_z"] = (velz, "cm/s")
 
     ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
                            nprocs=64, bbox=bbox)
@@ -87,49 +94,75 @@
     A = 3000.
     exp_time = 1.0e5
     redshift = 0.05
+    nH_sim = 0.02
 
     apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000,
                                    thermal_broad=True)
-    abs_model = XSpecAbsorbModel("TBabs", 0.02)
+    abs_model = XSpecAbsorbModel("TBabs", nH_sim)
 
     sphere = ds.sphere("c", (0.5, "Mpc"))
 
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    mu_sim = -v_shift / 1.0e5
+    sigma_sim = v_width / 1.0e5
+
+    Z_sim = 0.3
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=Z_sim, X_H=0.76,
+                                       prng=my_prng)
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    for a, r in zip(rmfs, arfs):
-        arf = xray_data_dir+a
-        rmf = xray_data_dir+r
-        events = photons.project_photons([1.0,-1.0,0.5], responses=[arf,rmf],
-                                         absorb_model=abs_model)
-        events.write_spectrum("beta_model_evt.pi", clobber=True)
+    D_A = photons.parameters["FiducialAngularDiameterDistance"]
 
-        s = xspec.Spectrum("beta_model_evt.pi")
-        s.ignore("**-0.5")
-        s.ignore("7.0-**")
-        m = xspec.Model("tbabs*bapec")
-        m.bapec.kT = 5.0
-        m.bapec.Abundanc = 0.25
-        m.bapec.norm = 1.0
-        m.bapec.Redshift = 0.05
-        m.bapec.Velocity = 100.0
-        m.TBabs.nH = 0.015
+    norm_sim = sphere.quantities.total_quantity("emission_measure")
+    norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
+    norm_sim = float(norm_sim.in_cgs())
 
-        m.bapec.Velocity.frozen = False
-        m.bapec.Abundanc.frozen = False
-        m.bapec.Redshift.frozen = False
-        m.TBabs.nH.frozen = False
+    events = photons.project_photons("z", responses=[arf,rmf],
+                                     absorb_model=abs_model, 
+                                     convolve_energies=True, prng=my_prng)
+    events.write_spectrum("beta_model_evt.pi", clobber=True)
 
-        xspec.Fit.renorm()
-        xspec.Fit.nIterations = 100
-        xspec.Fit.perform()
+    s = xspec.Spectrum("beta_model_evt.pi")
+    s.ignore("**-0.5")
+    s.ignore("9.0-**")
 
-        assert np.abs(m.bapec.Redshift.values[0]-v_shift) < 1.0
-        assert np.abs(m.bapec.kT.values[0]-6.0) < 1.0
-        assert np.abs(m.bapec.Abundanc.values[0]-0.3) < 1.0
-        assert np.abs(m.bapec.Velocity.values[0]-0.0) < 1.0
-        assert np.abs(m.TBabs.nH.values[0]-0.02) < 1.0
+    m = xspec.Model("tbabs*bapec")
+    m.bapec.kT = 5.5
+    m.bapec.Abundanc = 0.25
+    m.bapec.norm = 1.0
+    m.bapec.Redshift = 0.05
+    m.bapec.Velocity = 300.0
+    m.TBabs.nH = 0.02
 
+    m.bapec.Velocity.frozen = False
+    m.bapec.Abundanc.frozen = False
+    m.bapec.Redshift.frozen = False
+    m.TBabs.nH.frozen = True
+
+    xspec.Fit.renorm()
+    xspec.Fit.nIterations = 100
+    xspec.Fit.perform()
+    
+    kT  = m.bapec.kT.values[0]
+    mu = (m.bapec.Redshift.values[0]-redshift)*ckms
+    Z = m.bapec.Abundanc.values[0]
+    sigma = m.bapec.Velocity.values[0]
+    norm = m.bapec.norm.values[0]
+
+    dkT = m.bapec.kT.sigma
+    dmu = m.bapec.Redshift.sigma*ckms
+    dZ = m.bapec.Abundanc.sigma
+    dsigma = m.bapec.Velocity.sigma
+    dnorm = m.bapec.norm.sigma
+
+    print kT, kT_sim, dkT
+
+    assert np.abs(mu-mu_sim) < dmu
+    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(Z-Z_sim) < dZ
+    assert np.abs(sigma-sigma_sim) < dsigma
+    assert np.abs(norm-norm_sim) < dnorm
+        
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -18,6 +18,7 @@
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
 import numpy as np
+from numpy.random import RandomState
 
 def setup():
     from yt.config import ytcfg
@@ -26,46 +27,59 @@
 test_data_dir = ytcfg.get("yt", "test_data_dir")
 xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
+rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
+        "aciss_aimpt_cy17.rmf", "nustar.rmf",
+        "ah_sxs_5ev_basefilt_20100712.rmf"]
+arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
+        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
+        "sxt-s_120210_ts02um_intallpxl.arf"]
+
 gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
-APEC = xray_data_dir+"/atomdb_v2.0.2"
+APEC = xray_data_dir
 TBABS = xray_data_dir+"/tbabs_table.h5"
-ARF = xray_data_dir+"/aciss_aimpt_cy17.arf"
-RMF = xray_data_dir+"/aciss_aimpt_cy17.rmf"
 
- at requires_ds(ETC)
+ at requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
+def test_sloshing():
 
-    np.random.seed(seed=0x4d3d3d3)
+    prng = RandomState(0x4d3d3d3)
 
     ds = data_dir_load(gslr)
     A = 2000.
     exp_time = 1.0e5
     redshift = 0.1
 
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
+    apec_model = TableApecModel(APEC, 0.1, 11.0, 10000)
     tbabs_model = TableAbsorbModel(TBABS, 0.1)
 
     sphere = ds.sphere("c", (0.1, "Mpc"))
 
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    events = photons.project_photons("z", responses=[ARF,RMF],
+    events = photons.project_photons([1.0,-0.5,0.2], responses=[ARF,RMF],
                                      absorb_model=tbabs_model,
-                                     convolve_energies=True)
+                                     convolve_energies=True,
+                                     prng=prng)
+
+    test_sloshing.__name__ = test.description
 
     def photons_test():
         return photons.photons
 
-    def events_test():
-        return events.events
+    yield GenericArrayTest(ds, photons_test)
 
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test
+    for a, r in zip(arfs, rmfs):
+        arf = os.path.join(xray_data_dir,a)
+        rmf = os.path.join(xray_data_dir,r)
+        events = photons.project_photons("z", responses=[arf,rmf],
+                                         absorb_model=tbabs_model, 
+                                         convolve_energies=True)
+
+        def events_test():
+            return events.events
+
+        yield GenericArrayTest(ds, events_test)
+

diff -r ab68b2728771961ab1b0dbfa7b72cd0c123137e4 -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -91,6 +91,20 @@
                        function=_chandra_emissivity,
                        units="") # add correct units here
 
+    def _emission_measure(field, data):
+        if data.has_field_parameter("X_H"):
+            X_H = data.get_field_parameter("X_H")
+        else:
+            X_H = 0.76
+        nenh = data["density"]*data["density"]
+        nenh /= mh*mh
+        nenh *= 0.5*(1.+X_H)*X_H*data["cell_volume"]
+        return nenh
+    
+    registry.add_field((ftype, "emission_measure"),
+                       function=_emission_measure,
+                       units="cm**-3")
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2


https://bitbucket.org/yt_analysis/yt/commits/1206e507df9c/
Changeset:   1206e507df9c
Branch:      yt
User:        jzuhone
Date:        2015-11-03 05:48:46+00:00
Summary:     Fixing tests. Adding cleanup routines for spectral models.
Affected #:  6 files

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -258,4 +258,6 @@
         mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
         mylog.info("Number of cells with photons: %d" % len(photons["x"]))
 
+        self.spectral_model.cleanup_spectrum()
+
         return photons

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -90,6 +90,9 @@
         else:
             return self.photons[key]
 
+    def __contains__(self, key):
+        return key in self.photons
+
     def __repr__(self):
         return self.photons.__repr__()
 
@@ -671,6 +674,7 @@
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
             randvec = aspec.max()*prng.uniform(size=eobs.shape)
             not_abs = randvec < absorb
+            absorb_model.cleanup_spectrum()
 
         if eff_area is None:
             detected = np.ones(eobs.shape, dtype='bool')

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -42,6 +42,9 @@
     def get_spectrum(self):
         pass
 
+    def cleanup_spectrum(self):
+        pass
+
 class XSpecThermalModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from PyXspec.
@@ -109,6 +112,10 @@
         cosmic_spec *= self.norm
         metal_spec *= self.norm
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
+    
+    def cleanup_spectrum(self):
+        del self.thermal_comp
+        del self.model
 
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -165,6 +172,9 @@
         m.nH = self.nH
         return np.array(self.model.values(0))
 
+    def cleanup_spectrum(self):
+        del self.model
+
 class TableApecModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from the AtomDB APEC tables

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -163,6 +163,9 @@
     assert np.abs(Z-Z_sim) < dZ
     assert np.abs(sigma-sigma_sim) < dsigma
     assert np.abs(norm-norm_sim) < dnorm
-        
+
+    xspec.AllModels.clear()
+    xspec.AllData.clear()
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -19,6 +19,7 @@
     GenericArrayTest, data_dir_load
 import numpy as np
 from numpy.random import RandomState
+import os
 
 def setup():
     from yt.config import ytcfg
@@ -38,6 +39,11 @@
 APEC = xray_data_dir
 TBABS = xray_data_dir+"/tbabs_table.h5"
 
+def return_data(data):
+    def _return_data():
+        return data
+    return _return_data
+
 @requires_ds(gslr)
 @requires_file(APEC)
 @requires_file(TBABS)
@@ -47,7 +53,7 @@
 
     ds = data_dir_load(gslr)
     A = 2000.
-    exp_time = 1.0e5
+    exp_time = 1.0e4
     redshift = 0.1
 
     apec_model = TableApecModel(APEC, 0.1, 11.0, 10000)
@@ -59,27 +65,22 @@
     photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                       thermal_model)
 
-    events = photons.project_photons([1.0,-0.5,0.2], responses=[ARF,RMF],
-                                     absorb_model=tbabs_model,
-                                     convolve_energies=True,
-                                     prng=prng)
+    return_photons = return_data(photons.photons)
 
-    test_sloshing.__name__ = test.description
-
-    def photons_test():
-        return photons.photons
-
-    yield GenericArrayTest(ds, photons_test)
+    tests = []
+    tests.append(GenericArrayTest(ds, return_photons))
 
     for a, r in zip(arfs, rmfs):
         arf = os.path.join(xray_data_dir,a)
         rmf = os.path.join(xray_data_dir,r)
-        events = photons.project_photons("z", responses=[arf,rmf],
+        events = photons.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                          absorb_model=tbabs_model, 
-                                         convolve_energies=True)
+                                         convolve_energies=True, prng=prng)
 
-        def events_test():
-            return events.events
+        return_events = return_data(events.events)
 
-        yield GenericArrayTest(ds, events_test)
+        tests.append(GenericArrayTest(ds, return_events))
 
+    for test in tests:
+        test_sloshing.__name__ = test.description
+        yield test

diff -r 7cc7df10f345f6bc52a264eb6c0b05bba17477d0 -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -42,3 +42,4 @@
         test_apec.__name__ = test.description
         yield test
 
+    xmod.cleanup_spectrum()


https://bitbucket.org/yt_analysis/yt/commits/94c9b68f1cf1/
Changeset:   94c9b68f1cf1
Branch:      yt
User:        jzuhone
Date:        2015-11-03 06:39:01+00:00
Summary:     Make sure every test has a distinct name
Affected #:  1 file

diff -r 1206e507df9c4e7bfb3b61b2adce428c76b5c596 -r 94c9b68f1cf1dee14c7373d1dcdb6692520511a4 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -40,7 +40,7 @@
 TBABS = xray_data_dir+"/tbabs_table.h5"
 
 def return_data(data):
-    def _return_data():
+    def _return_data(name):
         return data
     return _return_data
 
@@ -68,7 +68,7 @@
     return_photons = return_data(photons.photons)
 
     tests = []
-    tests.append(GenericArrayTest(ds, return_photons))
+    tests.append(GenericArrayTest(ds, return_photons, args=["photons"]))
 
     for a, r in zip(arfs, rmfs):
         arf = os.path.join(xray_data_dir,a)
@@ -79,7 +79,7 @@
 
         return_events = return_data(events.events)
 
-        tests.append(GenericArrayTest(ds, return_events))
+        tests.append(GenericArrayTest(ds, return_events, args=[a]))
 
     for test in tests:
         test_sloshing.__name__ = test.description


https://bitbucket.org/yt_analysis/yt/commits/a43e790d6445/
Changeset:   a43e790d6445
Branch:      yt
User:        jzuhone
Date:        2015-11-09 17:25:54+00:00
Summary:     Use os.path.join instead
Affected #:  1 file

diff -r 94c9b68f1cf1dee14c7373d1dcdb6692520511a4 -r a43e790d6445312ba5bb88b7a53e8773c1b369f6 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -35,8 +35,8 @@
 
 xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
-arf = xray_data_dir+"/sxt-s_120210_ts02um_intallpxl.arf"
-rmf = xray_data_dir+"/ah_sxs_5ev_basefilt_20100712.rmf"
+arf = os.path.join(xray_data_dir,"sxt-s_120210_ts02um_intallpxl.arf")
+rmf = os.path.join(xray_data_dir,"ah_sxs_5ev_basefilt_20100712.rmf")
 
 @requires_module("xspec")
 @requires_file(arf)


https://bitbucket.org/yt_analysis/yt/commits/3547c2e2eca0/
Changeset:   3547c2e2eca0
Branch:      yt
User:        ngoldbaum
Date:        2015-11-10 19:49:51+00:00
Summary:     Merged in jzuhone/yt (pull request #1844)

Unit and answer tests for photon_simulator
Affected #:  11 files

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,3 +1,5 @@
+.. _photon_simulator:
+
 Constructing Mock X-ray Observations
 ------------------------------------
 
@@ -98,9 +100,8 @@
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
    data package (see the ``xray_data`` `README <xray_data_README.html>`_ 
-   for details on the latter). Make sure that
-   in what follows you specify the full path to the locations of these
-   files.
+   for details on the latter). Make sure that in what follows you 
+   specify the full path to the locations of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could
@@ -197,7 +198,7 @@
 
 .. code:: python
 
-    A = 6000.
+    A = 3000.
     exp_time = 4.0e5
     redshift = 0.05
     cosmo = Cosmology()
@@ -298,7 +299,7 @@
 
 The second option, ``TableAbsorbModel``, takes as input an HDF5 file
 containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in cm2), and the Galactic column density :math:`N_H`:
+(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
 
 .. code:: python
 
@@ -307,7 +308,7 @@
 Now we're ready to project the photons. First, we choose a line-of-sight
 vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA,DEC on the sky in degrees.
+specify a ``sky_center`` in RA and DEC on the sky in degrees.
 
 Also, we're going to convolve the photons with instrument ``responses``.
 For this, you need a ARF/RMF pair with matching energy bins. This is of
@@ -322,8 +323,8 @@
 
 .. code:: python
 
-    ARF = "chandra_ACIS-S3_onaxis_arf.fits"
-    RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
+    ARF = "acisi_aimpt_cy17.arf"
+    RMF = "acisi_aimpt_cy17.rmf"
     normal = [0.0,0.0,1.0]
     events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
                                      absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
@@ -540,7 +541,7 @@
 
    sphere = ds.sphere("c", (1.0,"Mpc"))
        
-   A = 6000.
+   A = 3000.
    exp_time = 2.0e5
    redshift = 0.05
    cosmo = Cosmology()
@@ -555,7 +556,8 @@
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
-                                    responses=["sim_arf.fits","sim_rmf.fits"], 
+                                    responses=["acisi_aimpt_cy17.arf",
+                                               "acisi_aimpt_cy17.rmf"], 
                                     absorb_model=abs_model,
                                     north_vector=[0.0,1.0,0.0])
 

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -73,14 +73,20 @@
         "invert_cdf": Invert the cumulative distribution function of the spectrum.
         "accept_reject": Acceptance-rejection method using the spectrum. 
         The first method should be sufficient for most cases. 
+    prng : NumPy `RandomState` object or numpy.random                                                                            
+        A pseudo-random number generator. Typically will only be specified                                                            
+        if you have a reason to generate the same set of random numbers, such as for a 
+        test. Default is the numpy.random module.                                                                      
     """
     def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, 
-                 photons_per_chunk=10000000, method="invert_cdf"):
+                 photons_per_chunk=10000000, method="invert_cdf",
+                 prng=np.random):
         self.X_H = X_H
         self.Zmet = Zmet
         self.spectral_model = spectral_model
         self.photons_per_chunk = photons_per_chunk
         self.method = method
+        self.prng = prng
 
     def __call__(self, data_source, parameters):
 
@@ -174,7 +180,7 @@
                 tot_ph_c = cspec.d.sum()
                 tot_ph_m = mspec.d.sum()
 
-                u = np.random.random(size=n_current)
+                u = self.prng.uniform(size=n_current)
 
                 cell_norm_c = tot_ph_c*cem
                 cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem
@@ -209,7 +215,7 @@
                         cumspec += Z * cumspec_m
                         norm_factor = 1.0 / cumspec[-1]
                         cumspec *= norm_factor
-                        randvec = np.random.uniform(size=cn)
+                        randvec = self.prng.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
@@ -217,7 +223,7 @@
                         tot_spec += Z * mspec.d
                         norm_factor = 1.0 / tot_spec.sum()
                         tot_spec *= norm_factor
-                        eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
+                        eidxs = self.prng.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e
                     cell_counter += 1
@@ -252,4 +258,6 @@
         mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"])))
         mylog.info("Number of cells with photons: %d" % len(photons["x"]))
 
+        self.spectral_model.cleanup_spectrum()
+
         return photons

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -90,6 +90,9 @@
         else:
             return self.photons[key]
 
+    def __contains__(self, key):
+        return key in self.photons
+
     def __repr__(self):
         return self.photons.__repr__()
 
@@ -449,7 +452,7 @@
                         absorb_model=None, psf_sigma=None,
                         sky_center=None, responses=None,
                         convolve_energies=False, no_shifting=False,
-                        north_vector=None):
+                        north_vector=None, prng=np.random):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -489,6 +492,11 @@
             the plane of projection. If not set, an arbitrary grid-aligned north_vector 
             is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or
             "z") is explicitly specified.
+        prng : NumPy `RandomState` object or numpy.random                                                                              
+            A pseudo-random number generator. Typically will only be specified if you
+            have a reason to generate the same set of random numbers, such as for a                                    
+            test. Default is the numpy.random module.                                                                            
+
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -612,14 +620,14 @@
         if my_n_obs == n_ph_tot:
             idxs = np.arange(my_n_obs,dtype='uint64')
         else:
-            idxs = np.random.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
+            idxs = prng.permutation(n_ph_tot)[:my_n_obs].astype("uint64")
         obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1
         delta = dx[obs_cells]
 
         if isinstance(normal, string_types):
 
-            xsky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            ysky = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            xsky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            ysky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
             xsky *= delta
             ysky *= delta
             xsky += self.photons[axes_lookup[normal][0]][obs_cells]
@@ -629,9 +637,9 @@
                 vz = self.photons["v%s" % normal]
 
         else:
-            x = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            y = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
-            z = np.random.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            x = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            y = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
+            z = prng.uniform(low=-0.5,high=0.5,size=my_n_obs)
 
             if not no_shifting:
                 vz = self.photons["vx"]*z_hat[0] + \
@@ -664,15 +672,16 @@
             emid = absorb_model.emid
             aspec = absorb_model.get_spectrum()
             absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0)
-            randvec = aspec.max()*np.random.random(eobs.shape)
+            randvec = aspec.max()*prng.uniform(size=eobs.shape)
             not_abs = randvec < absorb
+            absorb_model.cleanup_spectrum()
 
         if eff_area is None:
             detected = np.ones(eobs.shape, dtype='bool')
         else:
             mylog.info("Applying energy-dependent effective area.")
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
-            randvec = eff_area.max()*np.random.random(eobs.shape)
+            randvec = eff_area.max()*prng.uniform(size=eobs.shape)
             detected = randvec < earea
 
         detected = np.logical_and(not_abs, detected)
@@ -689,8 +698,8 @@
         events = comm.par_combine_object(events, datatype="dict", op="cat")
 
         if psf_sigma is not None:
-            events["xpix"] += np.random.normal(sigma=psf_sigma/dtheta)
-            events["ypix"] += np.random.normal(sigma=psf_sigma/dtheta)
+            events["xpix"] += prng.normal(sigma=psf_sigma/dtheta)
+            events["ypix"] += prng.normal(sigma=psf_sigma/dtheta)
 
         num_events = len(events["xpix"])
 
@@ -698,7 +707,8 @@
             mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
-            events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
+            events, info = self._convolve_with_rmf(parameters["RMF"], events, 
+                                                   mat_key, prng)
             for k, v in info.items():
                 parameters[k] = v
 
@@ -725,7 +735,7 @@
         rmf.close()
         return weights
 
-    def _convolve_with_rmf(self, respfile, events, mat_key):
+    def _convolve_with_rmf(self, respfile, events, mat_key, prng):
         """
         Convolve the events with a RMF file.
         """
@@ -780,7 +790,7 @@
             if len(trueChannel) > 0:
                 for q in range(fcurr,last):
                     if phEE[q] >= low and phEE[q] < high:
-                        channelInd = np.random.choice(len(weights), p=weights)
+                        channelInd = prng.choice(len(weights), p=weights)
                         fcurr += 1
                         detectedChannels.append(trueChannel[channelInd])
                     if phEE[q] >= high:
@@ -1349,6 +1359,8 @@
                 bins = 0.5*(ee[1:]+ee[:-1])
                 spectype = "energy"
             else:
+                mylog.info("Events haven't been convolved with an RMF, so assuming "
+                           "a perfect response and %d PI channels." % nchan)
                 bins = (np.arange(nchan)+1).astype("int32")
                 spectype = "pi"
 

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -42,6 +42,9 @@
     def get_spectrum(self):
         pass
 
+    def cleanup_spectrum(self):
+        pass
+
 class XSpecThermalModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from PyXspec.
@@ -109,6 +112,10 @@
         cosmic_spec *= self.norm
         metal_spec *= self.norm
         return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s")
+    
+    def cleanup_spectrum(self):
+        del self.thermal_comp
+        del self.model
 
 class XSpecAbsorbModel(SpectralModel):
     r"""
@@ -165,6 +172,9 @@
         m.nH = self.nH
         return np.array(self.model.values(0))
 
+    def cleanup_spectrum(self):
+        del self.model
+
 class TableApecModel(SpectralModel):
     r"""
     Initialize a thermal gas emission model from the AtomDB APEC tables

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -0,0 +1,171 @@
+"""
+A unit test for the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    XSpecThermalModel, XSpecAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import \
+    requires_module
+from yt.testing import requires_file
+import numpy as np
+from yt.utilities.physical_ratios import \
+    K_per_keV, mass_hydrogen_grams
+from yt.utilities.physical_constants import clight
+from yt.frontends.stream.api import load_uniform_grid
+import os
+import tempfile
+import shutil
+from numpy.random import RandomState
+
+ckms = clight.in_units("km/s").v
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
+
+arf = os.path.join(xray_data_dir,"sxt-s_120210_ts02um_intallpxl.arf")
+rmf = os.path.join(xray_data_dir,"ah_sxs_5ev_basefilt_20100712.rmf")
+
+ at requires_module("xspec")
+ at requires_file(arf)
+ at requires_file(rmf)
+def test_beta_model():
+    import xspec
+    
+    xspec.Fit.statMethod = "cstat"
+    xspec.Xset.addModelString("APECTHERMAL","yes")
+    xspec.Fit.query = "yes"
+    xspec.Fit.method = ["leven","10","0.01"]
+    xspec.Fit.delta = 0.01
+    xspec.Xset.chatter = 5
+
+    my_prng = RandomState(24)
+
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    R = 1.0
+    r_c = 0.05
+    rho_c = 0.04*mass_hydrogen_grams
+    beta = 1.
+    kT_sim = 6.0
+    v_shift = 4.0e7
+    v_width = 4.0e7
+    nx = 128
+
+    ddims = (nx,nx,nx)
+
+    x, y, z = np.mgrid[-R:R:nx*1j,
+                       -R:R:nx*1j,
+                       -R:R:nx*1j]
+
+    r = np.sqrt(x**2+y**2+z**2)
+
+    dens = np.zeros(ddims)
+    dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
+    dens[r > R] = 0.0
+    temp = kT_sim*K_per_keV*np.ones(ddims)
+    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+    velz = my_prng.normal(loc=v_shift,scale=v_width,size=ddims)
+
+    data = {}
+    data["density"] = (dens, "g/cm**3")
+    data["temperature"] = (temp, "K")
+    data["velocity_x"] = (np.zeros(ddims), "cm/s")
+    data["velocity_y"] = (np.zeros(ddims), "cm/s")
+    data["velocity_z"] = (velz, "cm/s")
+
+    ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
+                           nprocs=64, bbox=bbox)
+
+    A = 3000.
+    exp_time = 1.0e5
+    redshift = 0.05
+    nH_sim = 0.02
+
+    apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000,
+                                   thermal_broad=True)
+    abs_model = XSpecAbsorbModel("TBabs", nH_sim)
+
+    sphere = ds.sphere("c", (0.5, "Mpc"))
+
+    mu_sim = -v_shift / 1.0e5
+    sigma_sim = v_width / 1.0e5
+
+    Z_sim = 0.3
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=Z_sim, X_H=0.76,
+                                       prng=my_prng)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    D_A = photons.parameters["FiducialAngularDiameterDistance"]
+
+    norm_sim = sphere.quantities.total_quantity("emission_measure")
+    norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift))
+    norm_sim = float(norm_sim.in_cgs())
+
+    events = photons.project_photons("z", responses=[arf,rmf],
+                                     absorb_model=abs_model, 
+                                     convolve_energies=True, prng=my_prng)
+    events.write_spectrum("beta_model_evt.pi", clobber=True)
+
+    s = xspec.Spectrum("beta_model_evt.pi")
+    s.ignore("**-0.5")
+    s.ignore("9.0-**")
+
+    m = xspec.Model("tbabs*bapec")
+    m.bapec.kT = 5.5
+    m.bapec.Abundanc = 0.25
+    m.bapec.norm = 1.0
+    m.bapec.Redshift = 0.05
+    m.bapec.Velocity = 300.0
+    m.TBabs.nH = 0.02
+
+    m.bapec.Velocity.frozen = False
+    m.bapec.Abundanc.frozen = False
+    m.bapec.Redshift.frozen = False
+    m.TBabs.nH.frozen = True
+
+    xspec.Fit.renorm()
+    xspec.Fit.nIterations = 100
+    xspec.Fit.perform()
+    
+    kT  = m.bapec.kT.values[0]
+    mu = (m.bapec.Redshift.values[0]-redshift)*ckms
+    Z = m.bapec.Abundanc.values[0]
+    sigma = m.bapec.Velocity.values[0]
+    norm = m.bapec.norm.values[0]
+
+    dkT = m.bapec.kT.sigma
+    dmu = m.bapec.Redshift.sigma*ckms
+    dZ = m.bapec.Abundanc.sigma
+    dsigma = m.bapec.Velocity.sigma
+    dnorm = m.bapec.norm.sigma
+
+    print kT, kT_sim, dkT
+
+    assert np.abs(mu-mu_sim) < dmu
+    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(Z-Z_sim) < dZ
+    assert np.abs(sigma-sigma_sim) < dsigma
+    assert np.abs(norm-norm_sim) < dnorm
+
+    xspec.AllModels.clear()
+    xspec.AllData.clear()
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/tests/test_cluster.py
--- a/yt/analysis_modules/photon_simulator/tests/test_cluster.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Answer test the photon_simulator analysis module.
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.analysis_modules.photon_simulator.api import \
-    TableApecModel, TableAbsorbModel, \
-    ThermalPhotonModel, PhotonList
-from yt.config import ytcfg
-from yt.testing import requires_file
-from yt.utilities.answer_testing.framework import requires_ds, \
-    GenericArrayTest, data_dir_load
-import numpy as np
-
-def setup():
-    from yt.config import ytcfg
-    ytcfg["yt", "__withintesting"] = "True"
-
-test_dir = ytcfg.get("yt", "test_data_dir")
-
-ETC = test_dir+"/enzo_tiny_cosmology/DD0046/DD0046"
-APEC = test_dir+"/xray_data/atomdb_v2.0.2"
-TBABS = test_dir+"/xray_data/tbabs_table.h5"
-ARF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_arf.fits"
-RMF = test_dir+"/xray_data/chandra_ACIS-S3_onaxis_rmf.fits"
-
- at requires_ds(ETC)
- at requires_file(APEC)
- at requires_file(TBABS)
- at requires_file(ARF)
- at requires_file(RMF)
-def test_etc():
-
-    np.random.seed(seed=0x4d3d3d3)
-
-    ds = data_dir_load(ETC)
-    A = 3000.
-    exp_time = 1.0e5
-    redshift = 0.1
-
-    apec_model = TableApecModel(APEC, 0.1, 20.0, 2000)
-    tbabs_model = TableAbsorbModel(TBABS, 0.1)
-
-    sphere = ds.sphere("max", (0.5, "mpc"))
-
-    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3)
-    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
-                                      thermal_model)
-
-    events = photons.project_photons([0.0,0.0,1.0],
-                                     responses=[ARF,RMF],
-                                     absorb_model=tbabs_model)
-
-    def photons_test(): return photons.photons
-    def events_test(): return events.events
-
-    for test in [GenericArrayTest(ds, photons_test),
-                 GenericArrayTest(ds, events_test)]:
-        test_etc.__name__ = test.description
-        yield test

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -0,0 +1,86 @@
+"""
+Answer test the photon_simulator analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, TableAbsorbModel, \
+    ThermalPhotonModel, PhotonList
+from yt.config import ytcfg
+from yt.testing import requires_file
+from yt.utilities.answer_testing.framework import requires_ds, \
+    GenericArrayTest, data_dir_load
+import numpy as np
+from numpy.random import RandomState
+import os
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
+
+rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf",
+        "aciss_aimpt_cy17.rmf", "nustar.rmf",
+        "ah_sxs_5ev_basefilt_20100712.rmf"]
+arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf",
+        "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf",
+        "sxt-s_120210_ts02um_intallpxl.arf"]
+
+gslr = test_data_dir+"/GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+APEC = xray_data_dir
+TBABS = xray_data_dir+"/tbabs_table.h5"
+
+def return_data(data):
+    def _return_data(name):
+        return data
+    return _return_data
+
+ at requires_ds(gslr)
+ at requires_file(APEC)
+ at requires_file(TBABS)
+def test_sloshing():
+
+    prng = RandomState(0x4d3d3d3)
+
+    ds = data_dir_load(gslr)
+    A = 2000.
+    exp_time = 1.0e4
+    redshift = 0.1
+
+    apec_model = TableApecModel(APEC, 0.1, 11.0, 10000)
+    tbabs_model = TableAbsorbModel(TBABS, 0.1)
+
+    sphere = ds.sphere("c", (0.1, "Mpc"))
+
+    thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng)
+    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
+                                      thermal_model)
+
+    return_photons = return_data(photons.photons)
+
+    tests = []
+    tests.append(GenericArrayTest(ds, return_photons, args=["photons"]))
+
+    for a, r in zip(arfs, rmfs):
+        arf = os.path.join(xray_data_dir,a)
+        rmf = os.path.join(xray_data_dir,r)
+        events = photons.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
+                                         absorb_model=tbabs_model, 
+                                         convolve_energies=True, prng=prng)
+
+        return_events = return_data(events.events)
+
+        tests.append(GenericArrayTest(ds, return_events, args=[a]))
+
+    for test in tests:
+        test_sloshing.__name__ = test.description
+        yield test

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -9,7 +9,7 @@
 def setup():
     ytcfg["yt", "__withintesting"] = "True"
 
-test_data_dir = ytcfg.get("yt", "test_data_dir")
+xray_data_dir = ytcfg.get("yt", "xray_data_dir")
 
 ds = fake_random_ds(64)
 
@@ -17,7 +17,7 @@
 @requires_module("astropy")
 def test_apec():
 
-    settings = {"APECROOT":test_data_dir+"/xray_data/apec_v2.0.2"}
+    settings = {"APECROOT":xray_data_dir+"/apec_v2.0.2"}
     xmod = XSpecThermalModel("apec", 0.1, 10.0, 10000, thermal_broad=True,
                              settings=settings)
     xmod.prepare_spectrum(0.2)
@@ -25,10 +25,10 @@
     xcspec, xmspec = xmod.get_spectrum(6.0)
     spec1 = xcspec+0.3*xmspec
 
-    amod = TableApecModel(test_data_dir+"/xray_data", 0.1, 10.0, 
+    amod = TableApecModel(xray_data_dir, 0.1, 10.0, 
                           10000, thermal_broad=True)
     amod.prepare_spectrum(0.2)
-    
+
     acspec, amspec = amod.get_spectrum(6.0)
     spec2 = acspec+0.3*amspec
 
@@ -42,3 +42,4 @@
         test_apec.__name__ = test.description
         yield test
 
+    xmod.cleanup_spectrum()

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -60,7 +60,8 @@
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False',
-    chunk_size = '1000'
+    chunk_size = '1000',
+    xray_data_dir = '/does/not/exist',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 652f3058834c94de92fb59bfde799736182ee8b7 -r 3547c2e2eca0949527d1d6ff86a63996eda62c84 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -91,6 +91,20 @@
                        function=_chandra_emissivity,
                        units="") # add correct units here
 
+    def _emission_measure(field, data):
+        if data.has_field_parameter("X_H"):
+            X_H = data.get_field_parameter("X_H")
+        else:
+            X_H = 0.76
+        nenh = data["density"]*data["density"]
+        nenh /= mh*mh
+        nenh *= 0.5*(1.+X_H)*X_H*data["cell_volume"]
+        return nenh
+    
+    registry.add_field((ftype, "emission_measure"),
+                       function=_emission_measure,
+                       units="cm**-3")
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list