[yt-svn] commit/yt: 13 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon Oct 12 11:39:12 PDT 2015
13 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/0bf84ec33db4/
Changeset: 0bf84ec33db4
Branch: yt
User: jzuhone
Date: 2015-10-05 15:29:33+00:00
Summary: Fixing a few small bugs and adding some minor memory improvments
Affected #: 1 file
diff -r d70d717bcd116621c0d30808a495b905a4046f58 -r 0bf84ec33db4f59f1a0db4e3b37fb747a8d0950c yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -536,6 +536,19 @@
D_A0 = self.parameters["FiducialAngularDiameterDistance"]
scale_factor = 1.0
+ # If we use an RMF, figure out where the response matrix actually is.
+ if "RMF" in parameters:
+ rmf = _astropy.pyfits.open(parameters["RMF"])
+ if "MATRIX" in rmf:
+ mat_key = "MATRIX"
+ elif "SPECRESP MATRIX" in rmf:
+ mat_key = "SPECRESP MATRIX"
+ else:
+ raise RuntimeError("Cannot find the response matrix in the RMF %s!!" % parameters["RMF"])
+ rmf.close()
+ else:
+ mat_key = None
+
if (exp_time_new is None and area_new is None and
redshift_new is None and dist_new is None):
my_n_obs = n_ph_tot
@@ -552,11 +565,10 @@
if comm.rank == 0:
mylog.info("Using energy-dependent effective area: %s" % (parameters["ARF"]))
f = _astropy.pyfits.open(area_new)
- elo = f["SPECRESP"].data.field("ENERG_LO")
- ehi = f["SPECRESP"].data.field("ENERG_HI")
+ earf = 0.5*(f["SPECRESP"].data.field("ENERG_LO")+f["SPECRESP"].data.field("ENERG_HI"))
eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
if "RMF" in parameters:
- weights = self._normalize_arf(parameters["RMF"])
+ weights = self._normalize_arf(parameters["RMF"], mat_key)
eff_area *= weights
else:
mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
@@ -679,7 +691,7 @@
if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
if "RMF" in parameters and convolve_energies:
- events, info = self._convolve_with_rmf(parameters["RMF"], events)
+ events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
for k, v in info.items(): parameters[k] = v
if exp_time_new is None:
@@ -700,12 +712,12 @@
def _normalize_arf(self, respfile):
rmf = _astropy.pyfits.open(respfile)
- table = rmf["MATRIX"]
+ table = rmf[mat_key]
weights = np.array([w.sum() for w in table.data["MATRIX"]])
rmf.close()
return weights
- def _convolve_with_rmf(self, respfile, events):
+ def _convolve_with_rmf(self, respfile, events, mat_key):
"""
Convolve the events with a RMF file.
"""
@@ -714,7 +726,7 @@
hdulist = _astropy.pyfits.open(respfile)
- tblhdu = hdulist["MATRIX"]
+ tblhdu = hdulist[mat_key]
n_de = len(tblhdu.data["ENERG_LO"])
mylog.info("Number of energy bins in RMF: %d" % (n_de))
mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
@@ -735,13 +747,13 @@
# run through all photon energies and find which bin they go in
k = 0
fcurr = 0
- last = len(phEE)-1
+ last = len(phEE)
- pbar = get_pbar("Scattering energies with RMF:", n_de)
+ pbar = get_pbar("Scattering energies with RMF:", last)
for low,high in zip(tblhdu.data["ENERG_LO"],tblhdu.data["ENERG_HI"]):
# weight function for probabilities from RMF
- weights = np.nan_to_num(tblhdu.data[k]["MATRIX"][:])
+ weights = np.nan_to_num(np.float64(tblhdu.data[k]["MATRIX"][:]))
weights /= weights.sum()
# build channel number list associated to array value,
# there are groups of channels in rmfs with nonzero probabilities
@@ -768,7 +780,7 @@
detectedChannels.append(trueChannel[channelInd])
if phEE[q] >= high:
break
- pbar.update(k)
+ pbar.update(fcurr)
k+=1
pbar.finish()
@@ -800,9 +812,6 @@
self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
self.wcs.wcs.cunit = ["deg"]*2
- x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
- self.events["xsky"] = YTArray(x, "degree")
- self.events["ysky"] = YTArray(y, "degree")
def keys(self):
return self.events.keys()
@@ -817,11 +826,19 @@
return self.events.values()
def __getitem__(self,key):
+ if key not in self.events:
+ if key == "xsky" or key == "ysky":
+ x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
+ self.events["xsky"] = YTArray(x, "degree")
+ self.events["ysky"] = YTArray(y, "degree")
return self.events[key]
def __repr__(self):
return self.events.__repr__()
+ def __contains__(self, key):
+ return key in self.events
+
def __add__(self, other):
assert_same_wcs(self.wcs, other.wcs)
keys1 = list(self.parameters.keys())
@@ -860,11 +877,11 @@
reg = pyregion.parse(region)
r = reg.as_imagecoord(header=self.wcs.to_header())
f = r.get_filter()
- idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+ idxs = f.inside_x_y(self["xpix"], self["ypix"])
if idxs.sum() == 0:
raise RuntimeError("No events are inside this region!")
new_events = {}
- for k, v in self.events.items():
+ for k, v in self.items():
new_events[k] = v[idxs]
return EventList(new_events, self.parameters)
@@ -968,20 +985,20 @@
cols = []
col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
- array=self.events["eobs"].in_units("eV").d)
+ array=self["eobs"].in_units("eV").d)
col2 = pyfits.Column(name='X', format='D', unit='pixel',
- array=self.events["xpix"])
+ array=self["xpix"])
col3 = pyfits.Column(name='Y', format='D', unit='pixel',
- array=self.events["ypix"])
+ array=self["ypix"])
cols = [col1, col2, col3]
if "ChannelType" in self.parameters:
chantype = self.parameters["ChannelType"]
if chantype == "PHA":
- cunit="adu"
+ cunit = "adu"
elif chantype == "PI":
- cunit="Chan"
+ cunit = "Chan"
col4 = pyfits.Column(name=chantype.upper(), format='1J',
unit=cunit, array=self.events[chantype])
cols.append(col4)
@@ -1055,21 +1072,17 @@
raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
if emin is None:
- emin = self.events["eobs"].min().value
+ emin = self["eobs"].min().value
if emax is None:
- emax = self.events["eobs"].max().value
+ emax = self["eobs"].max().value
- idxs = np.logical_and(self.events["eobs"].d >= emin,
- self.events["eobs"].d <= emax)
- flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
+ idxs = np.logical_and(self["eobs"].d >= emin, self["eobs"].d <= emax)
+ flux = np.sum(self["eobs"][idxs].in_units("erg")) / \
self.parameters["ExposureTime"]/self.parameters["Area"]
- col1 = pyfits.Column(name='ENERGY', format='E',
- array=self["eobs"].d)
- col2 = pyfits.Column(name='DEC', format='D',
- array=self["ysky"].d)
- col3 = pyfits.Column(name='RA', format='D',
- array=self["xsky"].d)
+ col1 = pyfits.Column(name='ENERGY', format='E', array=self["eobs"].d)
+ col2 = pyfits.Column(name='DEC', format='D', array=self["ysky"].d)
+ col3 = pyfits.Column(name='RA', format='D', array=self["xsky"].d)
coldefs = pyfits.ColDefs([col1, col2, col3])
@@ -1147,15 +1160,15 @@
if "Instrument" in self.parameters:
f.create_dataset("/instrument", data=self.parameters["Instrument"])
- f.create_dataset("/xpix", data=self.events["xpix"])
- f.create_dataset("/ypix", data=self.events["ypix"])
- f.create_dataset("/xsky", data=self.events["xsky"].d)
- f.create_dataset("/ysky", data=self.events["ysky"].d)
- f.create_dataset("/eobs", data=self.events["eobs"].d)
- if "PI" in self.events:
- f.create_dataset("/pi", data=self.events["PI"])
- if "PHA" in self.events:
- f.create_dataset("/pha", data=self.events["PHA"])
+ f.create_dataset("/xpix", data=self["xpix"])
+ f.create_dataset("/ypix", data=self["ypix"])
+ f.create_dataset("/xsky", data=self["xsky"].d)
+ f.create_dataset("/ysky", data=self["ysky"].d)
+ f.create_dataset("/eobs", data=self["eobs"].d)
+ if "PI" in self:
+ f.create_dataset("/pi", data=self["PI"])
+ if "PHA" in self:
+ f.create_dataset("/pha", data=self["PHA"])
f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
f.create_dataset("/pix_center", data=self.parameters["pix_center"])
f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
@@ -1181,13 +1194,13 @@
The maximum energy of the photons to put in the image, in keV.
"""
if emin is None:
- mask_emin = np.ones((self.num_events), dtype='bool')
+ mask_emin = np.ones(self.num_events, dtype='bool')
else:
- mask_emin = self.events["eobs"].d > emin
+ mask_emin = self["eobs"].d > emin
if emax is None:
- mask_emax = np.ones((self.num_events), dtype='bool')
+ mask_emax = np.ones(self.num_events, dtype='bool')
else:
- mask_emax = self.events["eobs"].d < emax
+ mask_emax = self["eobs"].d < emax
mask = np.logical_and(mask_emin, mask_emax)
@@ -1197,8 +1210,8 @@
xbins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True)
ybins = np.linspace(0.5, float(ny)+0.5, ny+1, endpoint=True)
- H, xedges, yedges = np.histogram2d(self.events["xpix"][mask],
- self.events["ypix"][mask],
+ H, xedges, yedges = np.histogram2d(self["xpix"][mask],
+ self["ypix"][mask],
bins=[xbins,ybins])
hdu = _astropy.pyfits.PrimaryHDU(H.T)
@@ -1262,11 +1275,11 @@
f.close()
minlength = nchan
if cmin == 1: minlength += 1
- spec = np.bincount(self.events[spectype],minlength=minlength)
+ spec = np.bincount(self[spectype],minlength=minlength)
if cmin == 1: spec = spec[1:]
bins = (np.arange(nchan)+cmin).astype("int32")
else:
- espec = self.events["eobs"].d
+ espec = self["eobs"].d
erange = (emin, emax)
spec, ee = np.histogram(espec, bins=nchan, range=erange)
if bin_type == "energy":
https://bitbucket.org/yt_analysis/yt/commits/197963595c5e/
Changeset: 197963595c5e
Branch: yt
User: jzuhone
Date: 2015-10-05 15:31:45+00:00
Summary: Making sure _normalize_arf has the right call signature
Affected #: 1 file
diff -r 0bf84ec33db4f59f1a0db4e3b37fb747a8d0950c -r 197963595c5e84c010671fb809c0e87539d52db0 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -664,7 +664,6 @@
detected = np.ones(eobs.shape, dtype='bool')
else:
mylog.info("Applying energy-dependent effective area.")
- earf = 0.5*(elo+ehi)
earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
randvec = eff_area.max()*np.random.random(eobs.shape)
detected = randvec < earea
@@ -710,7 +709,7 @@
return EventList(events, parameters)
- def _normalize_arf(self, respfile):
+ def _normalize_arf(self, respfile, mat_key):
rmf = _astropy.pyfits.open(respfile)
table = rmf[mat_key]
weights = np.array([w.sum() for w in table.data["MATRIX"]])
https://bitbucket.org/yt_analysis/yt/commits/c7ba816a95a1/
Changeset: c7ba816a95a1
Branch: yt
User: jzuhone
Date: 2015-10-05 19:03:40+00:00
Summary: More intelligent and correct way of determining the channel bounds
Affected #: 1 file
diff -r 197963595c5e84c010671fb809c0e87539d52db0 -r c7ba816a95a15fde1eb30b369fb39003ebb7b5b4 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1264,13 +1264,19 @@
if bin_type == "channel" and "ChannelType" in self.parameters:
spectype = self.parameters["ChannelType"]
f = pyfits.open(self.parameters["RMF"])
- nchan = int(f[1].header["DETCHANS"])
- try:
- cmin = int(f[1].header["TLMIN4"])
- except KeyError:
+ nchan = int(f["EBOUNDS"].header["DETCHANS"])
+ num = 0
+ for i in range(1,len(f["EBOUNDS"].columns)+1):
+ if f["EBOUNDS"].header["TTYPE%d" % i] == "CHANNEL":
+ num = i
+ break
+ if num > 0:
+ tlmin = "TLMIN%d" % num
+ cmin = int(f["EBOUNDS"].header[tlmin])
+ else:
mylog.warning("Cannot determine minimum allowed value for channel. " +
"Setting to 0, which may be wrong.")
- cmin = int(0)
+ cmin = 0
f.close()
minlength = nchan
if cmin == 1: minlength += 1
https://bitbucket.org/yt_analysis/yt/commits/d6e31a1e8e7c/
Changeset: d6e31a1e8e7c
Branch: yt
User: jzuhone
Date: 2015-10-05 20:52:19+00:00
Summary: Adding astropy.time to the on-demand import
Affected #: 1 file
diff -r c7ba816a95a15fde1eb30b369fb39003ebb7b5b4 -r d6e31a1e8e7cdf3a80e0376db917d32dc70daaaf yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -84,6 +84,18 @@
self._conv = conv
return self._conv
+ _time = None
+ @property
+ def time(self):
+ if self._time is None:
+ try:
+ import astropy.time as time
+ self.log
+ except ImportError:
+ time = NotAModule(self._name)
+ self._time = time
+ return self._time
+
_astropy = astropy_imports()
class scipy_imports:
https://bitbucket.org/yt_analysis/yt/commits/11caaf5bd5e0/
Changeset: 11caaf5bd5e0
Branch: yt
User: jzuhone
Date: 2015-10-05 20:52:56+00:00
Summary: If we write a events FITS file, make it more standard so that outside tools can use it
Affected #: 1 file
diff -r d6e31a1e8e7cdf3a80e0376db917d32dc70daaaf -r 11caaf5bd5e0f1501a43a3d79f4b90fd436f57de yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -980,17 +980,25 @@
Set *clobber* to True if you need to overwrite a previous file.
"""
pyfits = _astropy.pyfits
+ Time = _astropy.time.Time
+ TimeDelta = _astropy.time.TimeDelta
+
+ exp_time = float(self.parameters["ExposureTime"])
+
+ t_begin = Time.now()
+ dt = TimeDelta(exp_time, format='sec')
+ t_end = t_begin + dt
cols = []
- col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
- array=self["eobs"].in_units("eV").d)
- col2 = pyfits.Column(name='X', format='D', unit='pixel',
- array=self["xpix"])
- col3 = pyfits.Column(name='Y', format='D', unit='pixel',
- array=self["ypix"])
+ col_e = pyfits.Column(name='ENERGY', format='E', unit='eV',
+ array=self["eobs"].in_units("eV").d)
+ col_x = pyfits.Column(name='X', format='D', unit='pixel',
+ array=self["xpix"])
+ col_y = pyfits.Column(name='Y', format='D', unit='pixel',
+ array=self["ypix"])
- cols = [col1, col2, col3]
+ cols = [col_e, col_x, col_y]
if "ChannelType" in self.parameters:
chantype = self.parameters["ChannelType"]
@@ -998,10 +1006,20 @@
cunit = "adu"
elif chantype == "PI":
cunit = "Chan"
- col4 = pyfits.Column(name=chantype.upper(), format='1J',
- unit=cunit, array=self.events[chantype])
- cols.append(col4)
+ col_ch = pyfits.Column(name=chantype.upper(), format='1J',
+ unit=cunit, array=self.events[chantype])
+ cols.append(col_ch)
+ mylog.info("Generating times for events assuming uniform time "
+ "distribution. In future versions this will be made "
+ "more general.")
+
+ time = np.random.uniform(size=self.num_events, low=0.0,
+ high=float(self.parameters["ExposureTime"]))
+ col_t = pyfits.Column(name="TIME", format='1D', unit='s',
+ array=time)
+ cols.append(col_t)
+
coldefs = pyfits.ColDefs(cols)
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.update_ext_name("EVENTS")
@@ -1022,7 +1040,9 @@
tbhdu.header["TLMIN3"] = 0.5
tbhdu.header["TLMAX2"] = 2.*self.parameters["pix_center"][0]-0.5
tbhdu.header["TLMAX3"] = 2.*self.parameters["pix_center"][1]-0.5
- tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+ tbhdu.header["EXPOSURE"] = exp_time
+ tbhdu.header["TSTART"] = 0.0
+ tbhdu.header["TSTOP"] = exp_time
if isinstance(self.parameters["Area"], string_types):
tbhdu.header["AREA"] = self.parameters["Area"]
else:
@@ -1032,8 +1052,18 @@
tbhdu.header["HDUVERS"] = "1.1.0"
tbhdu.header["RADECSYS"] = "FK5"
tbhdu.header["EQUINOX"] = 2000.0
+ tbhdu.header["HDUCLASS"] = "OGIP"
+ tbhdu.header["HDUCLAS1"] = "EVENTS"
+ tbhdu.header["HDUCLAS2"] = "ACCEPTED"
+ tbhdu.header["DATE"] = t_begin.tt.isot
+ tbhdu.header["DATE-OBS"] = t_begin.tt.isot
+ tbhdu.header["DATE-END"] = t_end.tt.isot
if "RMF" in self.parameters:
tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+ f = pyfits.open(self.parameters["RMF"])
+ nchan = int(f["EBOUNDS"].header["DETCHANS"])
+ tbhdu.header["PHA_BINS"] = nchan
+ f.close()
if "ARF" in self.parameters:
tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
if "ChannelType" in self.parameters:
@@ -1045,7 +1075,30 @@
if "Instrument" in self.parameters:
tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
- tbhdu.writeto(fitsfile, clobber=clobber)
+ hdulist = [pyfits.PrimaryHDU(), tbhdu]
+
+ if "ChannelType" in self.parameters:
+ start = pyfits.Column(name='START', format='1D', unit='s',
+ array=np.array([0.0]))
+ stop = pyfits.Column(name='STOP', format='1D', unit='s',
+ array=np.array([exp_time]))
+
+ tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop])
+ tbhdu_gti.update_ext_name("STDGTI")
+ tbhdu_gti.header["TSTART"] = 0.0
+ tbhdu_gti.header["TSTOP"] = exp_time
+ tbhdu_gti.header["HDUCLASS"] = "OGIP"
+ tbhdu_gti.header["HDUCLAS1"] = "GTI"
+ tbhdu_gti.header["HDUCLAS2"] = "STANDARD"
+ tbhdu_gti.header["RADECSYS"] = "FK5"
+ tbhdu_gti.header["EQUINOX"] = 2000.0
+ tbhdu_gti.header["DATE"] = t_begin.tt.isot
+ tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot
+ tbhdu_gti.header["DATE-END"] = t_end.tt.isot
+
+ hdulist.append(tbhdu_gti)
+
+ pyfits.HDUList(hdulist).writeto(fitsfile, clobber=clobber)
@parallel_root_only
def write_simput_file(self, prefix, clobber=False, emin=None, emax=None):
@@ -1301,7 +1354,7 @@
coldefs = pyfits.ColDefs([col1, col2, col3, col4])
- tbhdu = pyfits.new_table(coldefs)
+ tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.update_ext_name("SPECTRUM")
tbhdu.header["DETCHANS"] = spec.shape[0]
https://bitbucket.org/yt_analysis/yt/commits/df29ab5cb767/
Changeset: df29ab5cb767
Branch: yt
User: jzuhone
Date: 2015-10-05 21:20:40+00:00
Summary: Minor cleanup
Affected #: 1 file
diff -r 11caaf5bd5e0f1501a43a3d79f4b90fd436f57de -r df29ab5cb767c88e92d719f59c2eb6be0c0eb809 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -687,11 +687,13 @@
num_events = len(events["xpix"])
- if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
+ if comm.rank == 0:
+ mylog.info("Total number of observed photons: %d" % num_events)
if "RMF" in parameters and convolve_energies:
events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
- for k, v in info.items(): parameters[k] = v
+ for k, v in info.items():
+ parameters[k] = v
if exp_time_new is None:
parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
@@ -738,8 +740,6 @@
eidxs = np.argsort(events["eobs"])
phEE = events["eobs"][eidxs].d
- phXX = events["xpix"][eidxs]
- phYY = events["ypix"][eidxs]
detectedChannels = []
@@ -775,18 +775,18 @@
for q in range(fcurr,last):
if phEE[q] >= low and phEE[q] < high:
channelInd = np.random.choice(len(weights), p=weights)
- fcurr +=1
+ fcurr += 1
detectedChannels.append(trueChannel[channelInd])
if phEE[q] >= high:
break
pbar.update(fcurr)
- k+=1
+ k += 1
pbar.finish()
dchannel = np.array(detectedChannels)
- events["xpix"] = phXX
- events["ypix"] = phYY
+ events["xpix"] = events["xpix"][eidxs]
+ events["ypix"] = events["ypix"][eidxs]
events["eobs"] = YTArray(phEE, "keV")
events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int)
https://bitbucket.org/yt_analysis/yt/commits/fe81028a0b09/
Changeset: fe81028a0b09
Branch: yt
User: jzuhone
Date: 2015-10-05 21:53:17+00:00
Summary: We've pretty much covered the bases now, so this warning is superfluous
Affected #: 1 file
diff -r df29ab5cb767c88e92d719f59c2eb6be0c0eb809 -r fe81028a0b0995e6d1f09f72f3ed672f9a5a3a0a yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -722,7 +722,6 @@
"""
Convolve the events with a RMF file.
"""
- mylog.warning("This routine has not been tested to work with all RMFs. YMMV.")
mylog.info("Reading response matrix file (RMF): %s" % (respfile))
hdulist = _astropy.pyfits.open(respfile)
https://bitbucket.org/yt_analysis/yt/commits/d58d0c66cfd7/
Changeset: d58d0c66cfd7
Branch: yt
User: jzuhone
Date: 2015-10-07 15:43:33+00:00
Summary: Adding log here
Affected #: 1 file
diff -r fe81028a0b0995e6d1f09f72f3ed672f9a5a3a0a -r d58d0c66cfd7c667211f5fb6843a2703b2ff70c8 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -67,6 +67,7 @@
if self._units is None:
try:
from astropy import units
+ self.log
except ImportError:
units = NotAModule(self._name)
self._units = units
https://bitbucket.org/yt_analysis/yt/commits/3e2ce5013c35/
Changeset: 3e2ce5013c35
Branch: yt
User: jzuhone
Date: 2015-10-07 15:58:05+00:00
Summary: More descriptive error message when we can't find the RMF
Affected #: 1 file
diff -r d58d0c66cfd7c667211f5fb6843a2703b2ff70c8 -r 3e2ce5013c3595c891df7dc8774d0cf654218de2 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -544,7 +544,9 @@
elif "SPECRESP MATRIX" in rmf:
mat_key = "SPECRESP MATRIX"
else:
- raise RuntimeError("Cannot find the response matrix in the RMF %s!!" % parameters["RMF"])
+ raise RuntimeError("Cannot find the response matrix in the RMF "
+ "file %s! " % parameters["RMF"]+"It should "
+ "be named \"MATRIX\" or \"SPECRESP MATRIX\".")
rmf.close()
else:
mat_key = None
https://bitbucket.org/yt_analysis/yt/commits/0b879cada434/
Changeset: 0b879cada434
Branch: yt
User: jzuhone
Date: 2015-10-07 16:08:15+00:00
Summary: More descriptive error message when we ask for too many photons
Affected #: 1 file
diff -r 3e2ce5013c3595c891df7dc8774d0cf654218de2 -r 0b879cada4341c8f7d9f373b080923280dc100fc yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -597,7 +597,12 @@
(D_A*D_A*(1.+zobs)**3)
fak = Aratio*Tratio*Dratio
if fak > 1:
- raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
+ raise ValueError("This combination of requested parameters results in "
+ "%g%% more photons collected than are " % (100.*(fak-1.)) +
+ "available in the sample. Please reduce the collecting "
+ "area, exposure time, or increase the distance/redshift "
+ "of the object. Alternatively, generate a larger sample "
+ "of photons.")
my_n_obs = np.uint64(n_ph_tot*fak)
n_obs_all = comm.mpi_allreduce(my_n_obs)
https://bitbucket.org/yt_analysis/yt/commits/95372a47bdc0/
Changeset: 95372a47bdc0
Branch: yt
User: jzuhone
Date: 2015-10-07 16:12:33+00:00
Summary: Fixing whitespace
Affected #: 1 file
diff -r 0b879cada4341c8f7d9f373b080923280dc100fc -r 95372a47bdc041d4a81e5b1587e70d443ad7345b yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -694,12 +694,12 @@
num_events = len(events["xpix"])
- if comm.rank == 0:
+ if comm.rank == 0:
mylog.info("Total number of observed photons: %d" % num_events)
if "RMF" in parameters and convolve_energies:
events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
- for k, v in info.items():
+ for k, v in info.items():
parameters[k] = v
if exp_time_new is None:
@@ -807,7 +807,6 @@
class EventList(object) :
def __init__(self, events, parameters):
-
self.events = events
self.parameters = parameters
self.num_events = events["xpix"].shape[0]
https://bitbucket.org/yt_analysis/yt/commits/e6ae53ea2ac7/
Changeset: e6ae53ea2ac7
Branch: yt
User: jzuhone
Date: 2015-10-08 14:48:35+00:00
Summary: Merge
Affected #: 72 files
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
include yt/visualization/mapserver/html/leaflet/*.css
include yt/visualization/mapserver/html/leaflet/*.js
include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
building_the_docs
testing
debugdrive
+ releasing
creating_datatypes
creating_derived_fields
creating_derived_quantities
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,208 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+ These releases are regularly scheduled and will optimally happen approximately
+ once a month. These releases should contain only fixes for bugs discovered in
+ earlier releases and should not contain new features or API changes. Bugfix
+ releases should increment the ``PATCH`` version number. Bugfix releases should
+ *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+ requests should be manually backported using the PR backport script, described
+ below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+ These releases happen when new features are deemed ready to be merged into the
+ ``stable`` branch and should not happen on a regular schedule. Minor releases
+ can also include fixes for bugs if the fix is determined to be too invasive
+ for a bugfix release. Minor releases should *not* inlucde
+ backwards-incompatible changes and should not change APIs. If an API change
+ is deemed to be necessary, the old API should continue to function but might
+ trigger deprecation warnings. Minor releases should happen by merging the
+ ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+ ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+ Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+ These releases happen when the development community decides to make major
+ backwards-incompatible changes. In principle a major version release could
+ include arbitrary changes to the library. Major version releases should only
+ happen after extensive discussion and vetting among the developer and user
+ community. Like minor releases, a major release should happen by merging the
+ ``yt`` branch into the ``stable`` branch. Major releases should increment the
+ ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+ to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch. Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+ $ cd $YT_HG/scripts
+ $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+ [extensions]
+ rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release. All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
+
+.. code-block:: none
+
+ [ui]
+ merge = internal:merge3
+ mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+ The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+ The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+ The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+ hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+ python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -22,7 +22,7 @@
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
| Athena | Y | N | Y | Y | Y | Y | Y | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| Castro | Y | Y | Partial | Y | Y | Y | N | Full |
+| Castro | Y | N | Partial | Y | Y | Y | N | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
| Chombo | Y | Y | Y | Y | Y | Y | Y | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
@@ -42,7 +42,7 @@
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
| MOAB | Y | N/A | Y | Y | Y | Y | Y | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| Nyx | Y | Y | Y | Y | Y | Y | Y | Full |
+| Nyx | Y | N | Y | Y | Y | Y | Y | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
| Orion | Y | Y | Y | Y | Y | Y | Y | Full |
+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 scripts/pr_backport.py
--- /dev/null
+++ b/scripts/pr_backport.py
@@ -0,0 +1,311 @@
+import hglib
+import requests
+import shutil
+import tempfile
+
+from datetime import datetime
+from distutils.version import LooseVersion
+from time import strptime, mktime
+
+MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
+ "yt/pullrequests/?state=MERGED")
+
+YT_REPO = "https://bitbucket.org/yt_analysis/yt"
+
+
+def clone_new_repo(source=None):
+ """Clones a new copy of yt_analysis/yt and returns a path to it"""
+ path = tempfile.mkdtemp()
+ dest_repo_path = path+'/yt-backport'
+ if source is None:
+ source = YT_REPO
+ hglib.clone(source=source, dest=dest_repo_path)
+ with hglib.open(dest_repo_path) as client:
+ # Changesets that are on the yt branch but aren't topological ancestors
+ # of whichever changeset the experimental bookmark is pointing at
+ client.update('heads(branch(yt) - ::bookmark(experimental))')
+ return dest_repo_path
+
+
+def get_first_commit_after_last_major_release(repo_path):
+ """Returns the SHA1 hash of the first commit to the yt branch that wasn't
+ included in the last tagged release.
+ """
+ with hglib.open(repo_path) as client:
+ tags = client.log("reverse(tag())")
+ tags = sorted([LooseVersion(t[2]) for t in tags])
+ for t in tags[::-1]:
+ if t.version[0:2] != ['yt', '-']:
+ continue
+ if len(t.version) == 4 or t.version[4] == 0:
+ last_major_tag = t
+ break
+ last_before_release = client.log(
+ "last(ancestors(%s) and branch(yt))" % str(last_major_tag))
+ first_after_release = client.log(
+ "first(descendants(%s) and branch(yt) and not %s)"
+ % (last_before_release[0][1], last_before_release[0][1]))
+ return str(first_after_release[0][1][:12])
+
+
+def get_branch_tip(repo_path, branch, exclude=None):
+ """Returns the SHA1 hash of the most recent commit on the given branch"""
+ revset = "head() and branch(%s)" % branch
+ if exclude is not None:
+ revset += "and not %s" % exclude
+ with hglib.open(repo_path) as client:
+ change = client.log(revset)[0][1][:12]
+ return change
+
+
+def get_lineage_between_release_and_tip(repo_path, first, last):
+ """Returns the lineage of changesets that were at one point the public tip"""
+ with hglib.open(repo_path) as client:
+ lineage = client.log("'%s'::'%s' and p1('%s'::'%s') + '%s'"
+ % (first, last, first, last, last))
+ return lineage
+
+
+def get_pull_requests_since_last_release(repo_path):
+ """Returns a list of pull requests made since the last tagged release"""
+ r = requests.get(MERGED_PR_ENDPOINT)
+ done = False
+ merged_prs = []
+ with hglib.open(repo_path) as client:
+ last_tag = client.log("reverse(tag())")[0]
+ while not done:
+ if r.status_code != 200:
+ raise RuntimeError
+ data = r.json()
+ prs = data['values']
+ for pr in prs:
+ activity = requests.get(pr['links']['activity']['href']).json()
+ merge_date = None
+ for action in activity['values']:
+ if 'update' in action and action['update']['state'] == 'MERGED':
+ merge_date = action['update']['date']
+ merge_date = merge_date.split('.')[0]
+ timestamp = mktime(strptime(merge_date, "%Y-%m-%dT%H:%M:%S"))
+ merge_date = datetime.fromtimestamp(timestamp)
+ break
+ if merge_date is None:
+ break
+ if merge_date < last_tag[6]:
+ done = True
+ break
+ merged_prs.append(pr)
+ r = requests.get(data['next'])
+ return merged_prs
+
+
+def cache_commit_data(prs):
+ """Avoid repeated calls to bitbucket API to get the list of commits per PR"""
+ commit_data = {}
+ for pr in prs:
+ data = requests.get(pr['links']['commits']['href']).json()
+ if data.keys() == [u'error']:
+ # this happens when commits have been stripped, e.g.
+ # https://bitbucket.org/yt_analysis/yt/pull-requests/1641
+ continue
+ done = False
+ commits = []
+ while not done:
+ commits.extend(data['values'])
+ if 'next' not in data:
+ done = True
+ else:
+ data = requests.get(data['next']).json()
+ commit_data[pr['id']] = commits
+ return commit_data
+
+
+def find_commit_in_prs(needle, commit_data, prs):
+ """Finds the commit `needle` PR in the commit_data dictionary
+
+ If found, returns the pr the needle commit is in. If the commit was not
+ part of the PRs in the dictionary, returns None.
+ """
+ for pr_id in commit_data:
+ commits = commit_data[pr_id]
+ for commit in commits:
+ if commit['hash'] == needle[1]:
+ pr = [pr for pr in prs if pr['id'] == pr_id][0]
+ return pr
+ return None
+
+
+def find_merge_commit_in_prs(needle, prs):
+ """Find the merge commit `needle` in the list of `prs`
+
+ If found, returns the pr the merge commit comes from. If not found, return
+ None
+ """
+ for pr in prs[::-1]:
+ if pr['merge_commit'] is not None:
+ if pr['merge_commit']['hash'] == needle[1][:12]:
+ return pr
+ return None
+
+
+def create_commits_to_prs_mapping(linege, prs):
+ """create a mapping from commits to the pull requests that the commit is
+ part of
+ """
+ commits_to_prs = {}
+ # make a copy of this list to avoid side effects from calling this function
+ my_prs = list(prs)
+ commit_data = cache_commit_data(my_prs)
+ for commit in lineage:
+ cset_hash = commit[1]
+ message = commit[5]
+ if message.startswith('Merged in') and '(pull request #' in message:
+ pr = find_merge_commit_in_prs(commit, my_prs)
+ if pr is None:
+ continue
+ commits_to_prs[cset_hash] = pr
+ # Since we know this PR won't have another commit associated with it,
+ # remove from global list to reduce number of network accesses
+ my_prs.remove(commits_to_prs[cset_hash])
+ else:
+ pr = find_commit_in_prs(commit, commit_data, my_prs)
+ commits_to_prs[cset_hash] = pr
+ return commits_to_prs
+
+
+def invert_commits_to_prs_mapping(commits_to_prs):
+ """invert the mapping from individual commits to pull requests"""
+ inv_map = {}
+ for k, v in commits_to_prs.iteritems():
+ # can't save v itself in inv_map since it's an unhashable dictionary
+ if v is not None:
+ created_date = v['created_on'].split('.')[0]
+ timestamp = mktime(strptime(created_date, "%Y-%m-%dT%H:%M:%S"))
+ created_date = datetime.fromtimestamp(timestamp)
+ pr_desc = (v['id'], v['title'], created_date,
+ v['links']['html']['href'], v['description'])
+ else:
+ pr_desc = None
+ inv_map[pr_desc] = inv_map.get(pr_desc, [])
+ inv_map[pr_desc].append(k)
+ return inv_map
+
+
+def get_last_descendant(repo_path, commit):
+ """get the most recent descendant of a commit"""
+ with hglib.open(repo_path) as client:
+ com = client.log('last(%s::)' % commit)
+ return com[0][1][:12]
+
+def screen_already_backported(repo_path, inv_map):
+ with hglib.open(repo_path) as client:
+ tags = client.log("reverse(tag())")
+ major_tags = [t for t in tags if t[2].endswith('.0')]
+ most_recent_major_tag_name = major_tags[0][2]
+ lineage = client.log(
+ "descendants(%s) and branch(stable)" % most_recent_major_tag_name)
+ prs_to_screen = []
+ for pr in inv_map:
+ for commit in lineage:
+ if commit[5].startswith('Backporting PR #%s' % pr[0]):
+ prs_to_screen.append(pr)
+ for pr in prs_to_screen:
+ del inv_map[pr]
+ return inv_map
+
+def commit_already_on_stable(repo_path, commit):
+ with hglib.open(repo_path) as client:
+ commit_info = client.log(commit)[0]
+ most_recent_tag_name = client.log("reverse(tag())")[0][2]
+ lineage = client.log(
+ "descendants(%s) and branch(stable)" % most_recent_tag_name)
+ # if there is a stable commit with the same commit message,
+ # it's been grafted
+ if any([commit_info[5] == c[5] for c in lineage]):
+ return True
+ return False
+
+def backport_pr_commits(repo_path, inv_map, last_stable, prs):
+ """backports pull requests to the stable branch.
+
+ Accepts a dictionary mapping pull requests to a list of commits that
+ are in the pull request.
+ """
+ pr_list = inv_map.keys()
+ pr_list = sorted(pr_list, key=lambda x: x[2])
+ for pr_desc in pr_list:
+ merge_warn = False
+ merge_commits = []
+ pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
+ data = requests.get(pr['links']['commits']['href']).json()
+ commits = data['values']
+ while 'next' in data:
+ data = requests.get(data['next']).json()
+ commits.extend(data['values'])
+ commits = [com['hash'][:12] for com in commits]
+ with hglib.open(repo_path) as client:
+ for com in commits:
+ if client.log('merge() and %s' % com) != []:
+ merge_warn = True
+ merge_commits.append(com)
+ if len(commits) > 1:
+ revset = " | ".join(commits)
+ revset = '"%s"' % revset
+ message = "Backporting PR #%s %s" % \
+ (pr['id'], pr['links']['html']['href'])
+ dest = get_last_descendant(repo_path, last_stable)
+ message = \
+ "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
+ (revset, message, dest)
+ message += "hg update stable\n\n"
+ if merge_warn is True:
+ if len(merge_commits) > 1:
+ merge_commits = ", ".join(merge_commits)
+ else:
+ merge_commits = merge_commits[0]
+ message += \
+ "WARNING, PULL REQUEST CONTAINS MERGE COMMITS, CONSIDER\n" \
+ "BACKPORTING BY HAND TO AVOID BACKPORTING UNWANTED CHANGES\n"
+ message += \
+ "Merge commits are %s\n\n" % merge_commits
+ else:
+ if commit_already_on_stable(repo_path, commits[0]) is True:
+ continue
+ message = "hg graft %s\n" % commits[0]
+ print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+ print "To backport, issue the following command(s):\n"
+ print message
+ raw_input('Press any key to continue')
+
+
+if __name__ == "__main__":
+ print ""
+ print "Gathering PR information, this may take a minute."
+ print "Don't worry, yt loves you."
+ print ""
+ repo_path = clone_new_repo()
+ try:
+ last_major_release = get_first_commit_after_last_major_release(repo_path)
+ last_dev = get_branch_tip(repo_path, 'yt', 'experimental')
+ last_stable = get_branch_tip(repo_path, 'stable')
+ lineage = get_lineage_between_release_and_tip(
+ repo_path, last_major_release, last_dev)
+ prs = get_pull_requests_since_last_release(repo_path)
+ commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
+ inv_map = invert_commits_to_prs_mapping(commits_to_prs)
+ # for now, ignore commits that aren't part of a pull request since
+ # the last bugfix release. These are mostly commits in pull requests
+ # from before the last bugfix release but might include commits that
+ # were pushed directly to the repo.
+ del inv_map[None]
+
+ inv_map = screen_already_backported(repo_path, inv_map)
+ print "In another terminal window, navigate to the following path:"
+ print "%s" % repo_path
+ raw_input("Press any key to continue")
+ backport_pr_commits(repo_path, inv_map, last_stable, prs)
+ raw_input(
+ "Now you need to push your backported changes. The temporary\n"
+ "repository currently being used will be deleted as soon as you\n"
+ "press any key.")
+ finally:
+ shutil.rmtree(repo_path)
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 scripts/yt_lodgeit.py
--- a/scripts/yt_lodgeit.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- LodgeIt!
- ~~~~~~~~
-
- A script that pastes stuff into the yt-project pastebin on
- paste.yt-project.org.
-
- Modified (very, very slightly) from the original script by the authors
- below.
-
- .lodgeitrc / _lodgeitrc
- -----------------------
-
- Under UNIX create a file called ``~/.lodgeitrc``, under Windows
- create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
-
- language=default_language
- clipboard=true/false
- open_browser=true/false
- encoding=fallback_charset
-
- :authors: 2007-2008 Georg Brandl <georg at python.org>,
- 2006 Armin Ronacher <armin.ronacher at active-4.com>,
- 2006 Matt Good <matt at matt-good.net>,
- 2005 Raphael Slinckx <raphael at slinckx.net>
-"""
-import os
-import sys
-from optparse import OptionParser
-
-
-SCRIPT_NAME = os.path.basename(sys.argv[0])
-VERSION = '0.3'
-SERVICE_URL = 'http://paste.yt-project.org/'
-SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
- 'open_browser']
-
-# global server proxy
-_xmlrpc_service = None
-
-
-def fail(msg, code):
- """Bail out with an error message."""
- print >> sys.stderr, 'ERROR: %s' % msg
- sys.exit(code)
-
-
-def load_default_settings():
- """Load the defaults from the lodgeitrc file."""
- settings = {
- 'language': None,
- 'clipboard': True,
- 'open_browser': False,
- 'encoding': 'iso-8859-15'
- }
- rcfile = None
- if os.name == 'posix':
- rcfile = os.path.expanduser('~/.lodgeitrc')
- elif os.name == 'nt' and 'APPDATA' in os.environ:
- rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
- if rcfile:
- try:
- f = open(rcfile)
- for line in f:
- if line.strip()[:1] in '#;':
- continue
- p = line.split('=', 1)
- if len(p) == 2:
- key = p[0].strip().lower()
- if key in settings:
- if key in ('clipboard', 'open_browser'):
- settings[key] = p[1].strip().lower() in \
- ('true', '1', 'on', 'yes')
- else:
- settings[key] = p[1].strip()
- f.close()
- except IOError:
- pass
- settings['tags'] = []
- settings['title'] = None
- return settings
-
-
-def make_utf8(text, encoding):
- """Convert a text to UTF-8, brute-force."""
- try:
- u = unicode(text, 'utf-8')
- uenc = 'utf-8'
- except UnicodeError:
- try:
- u = unicode(text, encoding)
- uenc = 'utf-8'
- except UnicodeError:
- u = unicode(text, 'iso-8859-15', 'ignore')
- uenc = 'iso-8859-15'
- try:
- import chardet
- except ImportError:
- return u.encode('utf-8')
- d = chardet.detect(text)
- if d['encoding'] == uenc:
- return u.encode('utf-8')
- return unicode(text, d['encoding'], 'ignore').encode('utf-8')
-
-
-def get_xmlrpc_service():
- """Create the XMLRPC server proxy and cache it."""
- global _xmlrpc_service
- import xmlrpclib
- if _xmlrpc_service is None:
- try:
- _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
- allow_none=True)
- except Exception, err:
- fail('Could not connect to Pastebin: %s' % err, -1)
- return _xmlrpc_service
-
-
-def copy_url(url):
- """Copy the url into the clipboard."""
- # try windows first
- try:
- import win32clipboard
- except ImportError:
- # then give pbcopy a try. do that before gtk because
- # gtk might be installed on os x but nobody is interested
- # in the X11 clipboard there.
- from subprocess import Popen, PIPE
- try:
- client = Popen(['pbcopy'], stdin=PIPE)
- except OSError:
- try:
- import pygtk
- pygtk.require('2.0')
- import gtk
- import gobject
- except ImportError:
- return
- gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
- gobject.idle_add(gtk.main_quit)
- gtk.main()
- else:
- client.stdin.write(url)
- client.stdin.close()
- client.wait()
- else:
- win32clipboard.OpenClipboard()
- win32clipboard.EmptyClipboard()
- win32clipboard.SetClipboardText(url)
- win32clipboard.CloseClipboard()
-
-
-def open_webbrowser(url):
- """Open a new browser window."""
- import webbrowser
- webbrowser.open(url)
-
-
-def language_exists(language):
- """Check if a language alias exists."""
- xmlrpc = get_xmlrpc_service()
- langs = xmlrpc.pastes.getLanguages()
- return language in langs
-
-
-def get_mimetype(data, filename):
- """Try to get MIME type from data."""
- try:
- import gnomevfs
- except ImportError:
- from mimetypes import guess_type
- if filename:
- return guess_type(filename)[0]
- else:
- if filename:
- return gnomevfs.get_mime_type(os.path.abspath(filename))
- return gnomevfs.get_mime_type_for_data(data)
-
-
-def print_languages():
- """Print a list of all supported languages, with description."""
- xmlrpc = get_xmlrpc_service()
- languages = xmlrpc.pastes.getLanguages().items()
- languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
- print 'Supported Languages:'
- for alias, name in languages:
- print ' %-30s%s' % (alias, name)
-
-
-def download_paste(uid):
- """Download a paste given by ID."""
- xmlrpc = get_xmlrpc_service()
- paste = xmlrpc.pastes.getPaste(uid)
- if not paste:
- fail('Paste "%s" does not exist.' % uid, 5)
- print paste['code'].encode('utf-8')
-
-
-def create_paste(code, language, filename, mimetype, private):
- """Create a new paste."""
- xmlrpc = get_xmlrpc_service()
- rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
- private)
- if not rv:
- fail('Could not create paste. Something went wrong '
- 'on the server side.', 4)
- return rv
-
-
-def compile_paste(filenames, langopt):
- """Create a single paste out of zero, one or multiple files."""
- def read_file(f):
- try:
- return f.read()
- finally:
- f.close()
- mime = ''
- lang = langopt or ''
- if not filenames:
- data = read_file(sys.stdin)
- if not langopt:
- mime = get_mimetype(data, '') or ''
- fname = ""
- elif len(filenames) == 1:
- fname = filenames[0]
- data = read_file(open(filenames[0], 'rb'))
- if not langopt:
- mime = get_mimetype(data, filenames[0]) or ''
- else:
- result = []
- for fname in filenames:
- data = read_file(open(fname, 'rb'))
- if langopt:
- result.append('### %s [%s]\n\n' % (fname, langopt))
- else:
- result.append('### %s\n\n' % fname)
- result.append(data)
- result.append('\n\n')
- data = ''.join(result)
- lang = 'multi'
- return data, lang, fname, mime
-
-
-def main():
- """Main script entry point."""
-
- usage = ('Usage: %%prog [options] [FILE ...]\n\n'
- 'Read the files and paste their contents to %s.\n'
- 'If no file is given, read from standard input.\n'
- 'If multiple files are given, they are put into a single paste.'
- % SERVICE_URL)
- parser = OptionParser(usage=usage)
-
- settings = load_default_settings()
-
- parser.add_option('-v', '--version', action='store_true',
- help='Print script version')
- parser.add_option('-L', '--languages', action='store_true', default=False,
- help='Retrieve a list of supported languages')
- parser.add_option('-l', '--language', default=settings['language'],
- help='Used syntax highlighter for the file')
- parser.add_option('-e', '--encoding', default=settings['encoding'],
- help='Specify the encoding of a file (default is '
- 'utf-8 or guessing if available)')
- parser.add_option('-b', '--open-browser', dest='open_browser',
- action='store_true',
- default=settings['open_browser'],
- help='Open the paste in a web browser')
- parser.add_option('-p', '--private', action='store_true', default=False,
- help='Paste as private')
- parser.add_option('--no-clipboard', dest='clipboard',
- action='store_false',
- default=settings['clipboard'],
- help="Don't copy the url into the clipboard")
- parser.add_option('--download', metavar='UID',
- help='Download a given paste')
-
- opts, args = parser.parse_args()
-
- # special modes of operation:
- # - paste script version
- if opts.version:
- print '%s: version %s' % (SCRIPT_NAME, VERSION)
- sys.exit()
- # - print list of languages
- elif opts.languages:
- print_languages()
- sys.exit()
- # - download Paste
- elif opts.download:
- download_paste(opts.download)
- sys.exit()
-
- # check language if given
- if opts.language and not language_exists(opts.language):
- fail('Language %s is not supported.' % opts.language, 3)
-
- # load file(s)
- try:
- data, language, filename, mimetype = compile_paste(args, opts.language)
- except Exception, err:
- fail('Error while reading the file(s): %s' % err, 2)
- if not data:
- fail('Aborted, no content to paste.', 4)
-
- # create paste
- code = make_utf8(data, opts.encoding)
- pid = create_paste(code, language, filename, mimetype, opts.private)
- url = '%sshow/%s/' % (SERVICE_URL, pid)
- print url
- if opts.open_browser:
- open_webbrowser(url)
- if opts.clipboard:
- copy_url(url)
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@
config.make_config_py()
# config.make_svn_version_py()
config.add_subpackage('yt', 'yt')
- config.add_scripts("scripts/*")
+ config.add_scripts("scripts/iyt")
return config
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -15,7 +15,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from .absorption_line import tau_profile
@@ -159,7 +159,9 @@
field_data = {}
if use_peculiar_velocity:
input_fields.append('velocity_los')
+ input_fields.append('redshift_eff')
field_units["velocity_los"] = "cm/s"
+ field_units["redshift_eff"] = ""
for feature in self.line_list + self.continuum_list:
if not feature['field_name'] in input_fields:
input_fields.append(feature['field_name'])
@@ -204,11 +206,11 @@
for continuum in self.continuum_list:
column_density = field_data[continuum['field_name']] * field_data['dl']
- delta_lambda = continuum['wavelength'] * field_data['redshift']
+ # redshift_eff field combines cosmological and velocity redshifts
if use_peculiar_velocity:
- # include factor of (1 + z) because our velocity is in proper frame.
- delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
- field_data['velocity_los'] / speed_of_light_cgs
+ delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+ else:
+ delta_lambda = continuum['wavelength'] * field_data['redshift']
this_wavelength = delta_lambda + continuum['wavelength']
right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
left_index = np.digitize((this_wavelength *
@@ -242,11 +244,11 @@
for line in parallel_objects(self.line_list, njobs=njobs):
column_density = field_data[line['field_name']] * field_data['dl']
- delta_lambda = line['wavelength'] * field_data['redshift']
+ # redshift_eff field combines cosmological and velocity redshifts
if use_peculiar_velocity:
- # include factor of (1 + z) because our velocity is in proper frame.
- delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
- field_data['velocity_los'] / speed_of_light_cgs
+ delta_lambda = line['wavelength'] * field_data['redshift_eff']
+ else:
+ delta_lambda = line['wavelength'] * field_data['redshift']
thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
field_data['temperature']) /
line['atomic_mass'])
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,4 +1,4 @@
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.analysis_modules.absorption_spectrum.absorption_line import \
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.analysis_modules.cosmological_observation.cosmology_splice import \
@@ -29,6 +29,7 @@
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_objects, \
parallel_root_only
+from yt.utilities.physical_constants import speed_of_light_cgs
class LightRay(CosmologySplice):
"""
@@ -365,7 +366,7 @@
all_fields.extend(['dl', 'dredshift', 'redshift'])
if get_los_velocity:
all_fields.extend(['velocity_x', 'velocity_y',
- 'velocity_z', 'velocity_los'])
+ 'velocity_z', 'velocity_los', 'redshift_eff'])
data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
all_ray_storage = {}
@@ -457,6 +458,28 @@
sub_data['redshift'] = my_segment['redshift'] - \
sub_data['dredshift'].cumsum() + sub_data['dredshift']
+ # When velocity_los is present, add effective redshift
+ # (redshift_eff) field by combining cosmological redshift and
+ # doppler redshift.
+
+ # first convert los velocities to comoving frame (ie mult. by (1+z)),
+ # then calculate doppler redshift:
+ # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+
+ # then to add cosmological redshift and doppler redshift, follow
+ # eqn 3.75 in Peacock's Cosmological Physics:
+ # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
+ # Alternatively, see eqn 5.49 in Peebles for a similar result.
+ if get_los_velocity:
+
+ velocity_los_cm = (1 + sub_data['redshift']) * \
+ sub_data['velocity_los']
+ redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
+ (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
+ sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
+ (1 + sub_data['redshift'])) - 1
+ del velocity_los_cm, redshift_dopp
+
# Remove empty lixels.
sub_dl_nonzero = sub_data['dl'].nonzero()
for field in all_fields:
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -34,7 +34,7 @@
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import glob
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import gc
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import math
import numpy as np
import glob
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -22,7 +22,7 @@
from collections import OrderedDict
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
class ParticleTrajectories(object):
r"""A collection of particle trajectories in time over a series of
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -199,15 +199,24 @@
ei = start_e
for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
if cn == 0: continue
+ # The rather verbose form of the few next statements is a
+ # result of code optimization and shouldn't be changed
+ # without checking for perfomance degradation. See
+ # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
+ # for details.
if self.method == "invert_cdf":
- cumspec = cumspec_c + Z*cumspec_m
- cumspec /= cumspec[-1]
+ cumspec = cumspec_c
+ cumspec += Z * cumspec_m
+ norm_factor = 1.0 / cumspec[-1]
+ cumspec *= norm_factor
randvec = np.random.uniform(size=cn)
randvec.sort()
cell_e = np.interp(randvec, cumspec, ebins)
elif self.method == "accept_reject":
- tot_spec = cspec.d+Z*mspec.d
- tot_spec /= tot_spec.sum()
+ tot_spec = cspec.d
+ tot_spec += Z * mspec.d
+ norm_factor = 1.0 / tot_spec.sum()
+ tot_spec *= norm_factor
eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
cell_e = emid[eidxs]
energies[ei:ei+cn] = cell_e
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -35,7 +35,7 @@
communication_system, parallel_root_only, get_mpi_type, \
parallel_capable
from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
from yt.utilities.on_demand_imports import _astropy
import warnings
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -11,7 +11,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -16,7 +16,7 @@
import os
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import math
from yt.config import ytcfg
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.funcs import mylog
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.funcs import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -20,7 +20,6 @@
yi = y / min_dx
zi = z / min_dx
index = xi + delta[0] * (yi + delta[1] * zi)
- index = index.astype('int64')
return index
def test_compose_no_overlap():
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -83,7 +83,11 @@
("CuttingPlaneBy", "cutting_plane_by"),
("MeanMolecularWeight", "mean_molecular_weight"),
("particle_density", "particle_density"),
+ ("ThermalEnergy", "thermal_energy"),
+ ("TotalEnergy", "total_energy"),
("MagneticEnergy", "magnetic_energy"),
+ ("GasEnergy", "thermal_energy"),
+ ("Gas_Energy", "thermal_energy"),
("BMagnitude", "b_magnitude"),
("PlasmaBeta", "plasma_beta"),
("MagneticPressure", "magnetic_pressure"),
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -806,6 +806,7 @@
registry.add_field(field_name, function = _vol_weight,
validators = [ValidateSpatial(0)],
units = field_units)
+ registry.find_dependencies((field_name,))
return [field_name]
def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -193,7 +193,7 @@
particle header, star files, etc.
"""
base_prefix, base_suffix = filename_pattern['amr']
- aexpstr = 'a'+file_amr.rsplit('a',1)[1].replace(base_suffix,'')
+ numericstr = file_amr.rsplit('_',1)[1].replace(base_suffix,'')
possibles = glob.glob(os.path.dirname(os.path.abspath(file_amr))+"/*")
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
@@ -201,7 +201,10 @@
continue
match = None
for possible in possibles:
- if possible.endswith(aexpstr+suffix):
+ if possible.endswith(numericstr+suffix):
+ if os.path.basename(possible).startswith(prefix):
+ match = possible
+ elif possible.endswith(suffix):
if os.path.basename(possible).startswith(prefix):
match = possible
if match is not None:
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import weakref
import glob #ST 9/12
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import re
import os
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -88,7 +88,7 @@
field_dict = {}
for key, val in self._handle.attrs.items():
if key.startswith('particle_'):
- comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
+ comp_number = int(re.match('particle_component_(\d+)', key).groups()[0])
field_dict[val.decode("ascii")] = comp_number
self._particle_field_index = field_dict
return self._particle_field_index
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/eagle/data_structures.py
--- a/yt/frontends/eagle/data_structures.py
+++ b/yt/frontends/eagle/data_structures.py
@@ -15,7 +15,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import types
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -78,16 +78,17 @@
def __call__(self):
# Read in the ds
- ds = load(self.data_file)
- exact = self.get_analytical_solution()
+ ds = load(self.data_file)
+ ds.setup_deprecated_fields()
+ exact = self.get_analytical_solution()
ad = ds.all_data()
position = ad['x']
for k in self.fields:
- field = ad[k]
+ field = ad[k].d
for xmin, xmax in zip(self.left_edges, self.right_edges):
mask = (position >= xmin)*(position <= xmax)
- exact_field = np.interp(position[mask], exact['pos'], exact[k])
+ exact_field = np.interp(position[mask], exact['pos'], exact[k])
myname = "ShockTubeTest_%s" % k
# yield test vs analytical solution
yield AssertWrapper(myname, assert_allclose, field[mask],
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import weakref
import numpy as np
import os
@@ -636,7 +636,7 @@
def _fill_arrays(self, ei, si, LE, RE, npart, nap):
self.grid_dimensions[:,:1] = ei
- self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
+ self.grid_dimensions[:,:1] -= np.array(si, dtype='i4')
self.grid_dimensions += 1
self.grid_left_edge[:,:1] = LE
self.grid_right_edge[:,:1] = RE
@@ -651,7 +651,7 @@
def _fill_arrays(self, ei, si, LE, RE, npart, nap):
self.grid_dimensions[:,:2] = ei
- self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
+ self.grid_dimensions[:,:2] -= np.array(si, dtype='i4')
self.grid_dimensions += 1
self.grid_left_edge[:,:2] = LE
self.grid_right_edge[:,:2] = RE
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -22,7 +22,7 @@
from yt.utilities.logger import ytLogger as mylog
from yt.geometry.selection_routines import mask_fill, AlwaysSelector
from yt.extern.six import u, b, iteritems
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.funcs import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import stat
import numpy as np
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
from yt.utilities.math_utils import prec_accum
from itertools import groupby
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -15,7 +15,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
import struct
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -60,10 +60,12 @@
def test_iso_collapse():
for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
iso_fields, ds_kwargs=iso_kwargs):
+ test_iso_collapse.__name__ = test.description
yield test
@requires_ds(gdg, big_data=True)
def test_gadget_disk_galaxy():
for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
ds_kwargs=gdg_kwargs):
+ test_gadget_disk_galaxy.__name__ = test.description
yield test
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -15,7 +15,7 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.utilities.exceptions import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import types
import numpy as np
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
from yt.funcs import \
mylog
from yt.utilities.io_handler import \
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.utilities.exceptions import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import os
import numpy as np
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls/data_structures.py
--- a/yt/frontends/owls/data_structures.py
+++ b/yt/frontends/owls/data_structures.py
@@ -15,7 +15,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import types
import yt.units
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -15,7 +15,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import os
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls/owls_ion_tables.py
--- a/yt/frontends/owls/owls_ion_tables.py
+++ b/yt/frontends/owls/owls_ion_tables.py
@@ -17,7 +17,7 @@
#-----------------------------------------------------------------------------
import sys
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -46,6 +46,7 @@
@requires_ds(os33, big_data=True)
def test_snapshot_033():
for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+ test_snapshot_033.__name__ = test.description
yield test
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -15,7 +15,7 @@
#-----------------------------------------------------------------------------
from collections import defaultdict
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/owls_subfind/io.py
--- a/yt/frontends/owls_subfind/io.py
+++ b/yt/frontends/owls_subfind/io.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.utilities.exceptions import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/rockstar/io.py
--- a/yt/frontends/rockstar/io.py
+++ b/yt/frontends/rockstar/io.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.utilities.exceptions import *
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -15,7 +15,7 @@
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import stat
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -111,6 +111,7 @@
@requires_ds(tipsy_gal)
def test_tipsy_galaxy():
for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
+ test_tipsy_galaxy.__name__ = test.description
yield test
@requires_file(gasoline_dmonly)
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -17,7 +17,7 @@
import os
from yt.extern.six.moves import cPickle
import weakref
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.config import ytcfg
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -14,7 +14,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
import weakref
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -467,6 +467,13 @@
yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
+ dimless = YTQuantity(1.0, "")
+ yield assert_equal, dimless.in_cgs(), dimless
+ yield assert_equal, dimless.in_cgs(), 1.0
+ yield assert_equal, dimless.in_mks(), dimless
+ yield assert_equal, dimless.in_mks(), 1.0
+ yield assert_equal, str(dimless.in_cgs().units), "dimensionless"
+
def test_temperature_conversions():
"""
Test conversions between various supported temperatue scales.
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -394,9 +394,15 @@
# Use sympy to factor the dimensions into base CGS unit symbols.
units = []
my_dims = self.dimensions.expand()
- for dim in base_units:
+ if my_dims is dimensionless:
+ return ""
+ for factor in my_dims.as_ordered_factors():
+ dim = list(factor.free_symbols)[0]
unit_string = base_units[dim]
- power_string = "**(%s)" % my_dims.as_coeff_exponent(dim)[1]
+ if factor.is_Pow:
+ power_string = "**(%s)" % factor.as_base_exp()[1]
+ else:
+ power_string = ""
units.append("".join([unit_string, power_string]))
return " * ".join(units)
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -16,7 +16,7 @@
from yt.funcs import *
import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
from .amr_kdtools import \
receive_and_reduce, send_to_parent, scatter_image
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -826,18 +826,21 @@
if ds_kwargs is None:
ds_kwargs = {}
ds = data_dir_load(ds_fn, kwargs=ds_kwargs)
- yield assert_equal, str(ds), ds_str_repr
+ yield AssertWrapper("%s_string_representation" % str(ds), assert_equal,
+ str(ds), ds_str_repr)
dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
dd = ds.all_data()
- yield assert_equal, dd["particle_position"].shape, (ds_nparticles, 3)
+ yield AssertWrapper("%s_all_data_part_shape" % str(ds), assert_equal,
+ dd["particle_position"].shape, (ds_nparticles, 3))
tot = sum(dd[ptype, "particle_position"].shape[0]
for ptype in ds.particle_types if ptype != "all")
- yield assert_equal, tot, ds_nparticles
+ yield AssertWrapper("%s_all_data_part_total" % str(ds), assert_equal,
+ tot, ds_nparticles)
for dobj_name in dso:
dobj = create_obj(ds, dobj_name)
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
- yield assert_equal, s1, s2
+ yield AssertWrapper("%s_mask_test" % str(ds), assert_equal, s1, s2)
for field, weight_field in fields.items():
if field[0] in ds.particle_types:
particle_type = True
@@ -850,7 +853,6 @@
dobj_name)
yield FieldValuesTest(ds_fn, field, dobj_name,
particle_type=particle_type)
- return
def create_obj(ds, obj_type):
# obj_type should be tuple of
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
from distutils.version import LooseVersion
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -3,7 +3,7 @@
import os
import weakref
import numpy as np
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
from .conversion_abc import *
from glob import glob
from collections import \
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/grid_data_format/tests/test_writer.py
--- a/yt/utilities/grid_data_format/tests/test_writer.py
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -15,7 +15,7 @@
import tempfile
import shutil
import os
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
from yt.testing import \
fake_random_ds, assert_equal
from yt.utilities.grid_data_format.writer import \
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -15,7 +15,7 @@
import os
import sys
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from contextlib import contextmanager
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -19,7 +19,7 @@
from yt.funcs import mylog
from yt.extern.six.moves import cPickle
import os
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.extern.six import add_metaclass
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -17,6 +17,7 @@
# Get compiler invocation
compiler = os.getenv('CC', 'cc')
+ compiler = compiler.split(' ')
# Attempt to compile a test script.
# See http://openmp.org/wp/openmp-compilers/
@@ -32,11 +33,13 @@
)
file.flush()
with open(os.devnull, 'w') as fnull:
- exit_code = subprocess.call([compiler, '-fopenmp', filename],
+ exit_code = subprocess.call(compiler + ['-fopenmp', filename],
stdout=fnull, stderr=fnull)
# Clean up
file.close()
+ except OSError:
+ return False
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -17,7 +17,7 @@
import abc
import json
import sys
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
import os
from uuid import uuid4
from yt.extern.six.moves import urllib
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -179,3 +179,97 @@
return self._spatial
_scipy = scipy_imports()
+
+class h5py_imports:
+ _name = "h5py"
+
+ _File = None
+ @property
+ def File(self):
+ if self._File is None:
+ try:
+ from h5py import File
+ except ImportError:
+ File = NotAModule(self._name)
+ self._File = File
+ return self._File
+
+ _Group = None
+ @property
+ def Group(self):
+ if self._Group is None:
+ try:
+ from h5py import Group
+ except ImportError:
+ Group = NotAModule(self._name)
+ self._Group = Group
+ return self._Group
+
+ ___version__ = None
+ @property
+ def __version__(self):
+ if self.___version__ is None:
+ try:
+ from h5py import __version__
+ except ImportError:
+ __version__ = NotAModule(self._name)
+ self.___version__ = __version__
+ return self.___version__
+
+ _get_config = None
+ @property
+ def get_config(self):
+ if self._get_config is None:
+ try:
+ from h5py import get_config
+ except ImportError:
+ get_config = NotAModule(self._name)
+ self._get_config = get_config
+ return self._get_config
+
+ _h5f = None
+ @property
+ def h5f(self):
+ if self._h5f is None:
+ try:
+ import h5py.h5f as h5f
+ except ImportError:
+ h5f = NotAModule(self._name)
+ self._h5f = h5f
+ return self._h5f
+
+ _h5d = None
+ @property
+ def h5d(self):
+ if self._h5d is None:
+ try:
+ import h5py.h5d as h5d
+ except ImportError:
+ h5d = NotAModule(self._name)
+ self._h5d = h5d
+ return self._h5d
+
+ _h5s = None
+ @property
+ def h5s(self):
+ if self._h5s is None:
+ try:
+ import h5py.h5s as h5s
+ except ImportError:
+ h5s = NotAModule(self._name)
+ self._h5s = h5s
+ return self._h5s
+
+ _version = None
+ @property
+ def version(self):
+ if self._version is None:
+ try:
+ import h5py.version as File
+ except ImportError:
+ version = NotAModule(self._name)
+ self._version = File
+ return self._version
+
+_h5py = h5py_imports()
+
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/utilities/png_writer.py
--- a/yt/utilities/png_writer.py
+++ b/yt/utilities/png_writer.py
@@ -10,20 +10,31 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import matplotlib
import matplotlib._png as _png
from yt.extern.six.moves import cStringIO
+from distutils.version import LooseVersion
+
+MPL_VERSION = LooseVersion(matplotlib.__version__)
+MPL_API_2_VERSION = LooseVersion("1.5.0")
+
+if MPL_VERSION < MPL_API_2_VERSION:
+ def call_png_write_png(buffer, width, height, filename, dpi):
+ _png.write_png(buffer, width, height, filename, dpi)
+else:
+ def call_png_write_png(buffer, width, height, filename, dpi):
+ _png.write_png(buffer, filename, dpi)
def write_png(buffer, filename, dpi=100):
width = buffer.shape[1]
height = buffer.shape[0]
- _png.write_png(buffer, width, height, filename, dpi)
+ call_png_write_png(buffer, width, height, filename, dpi)
def write_png_to_string(buffer, dpi=100, gray=0):
width = buffer.shape[1]
height = buffer.shape[0]
fileobj = cStringIO()
- _png.write_png(buffer, width, height, fileobj, dpi)
+ call_png_write_png(buffer, width, height, fileobj, dpi)
png_str = fileobj.getvalue()
fileobj.close()
return png_str
-
diff -r 95372a47bdc041d4a81e5b1587e70d443ad7345b -r e6ae53ea2ac78d68c0e35bcd7415bf0b9eb5cf59 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -12,7 +12,7 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
import numpy as np
from yt.funcs import mylog
https://bitbucket.org/yt_analysis/yt/commits/508299bf88f9/
Changeset: 508299bf88f9
Branch: yt
User: brittonsmith
Date: 2015-10-12 18:39:03+00:00
Summary: Merged in jzuhone/yt (pull request #1786)
[bugfix] Improvements/bugfixes for photon_simulator
Affected #: 2 files
diff -r 03e316bc6fddecfb4705d0f73754ce2553e45a3d -r 508299bf88f92f1191465d63e84a69eebd161954 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -536,6 +536,21 @@
D_A0 = self.parameters["FiducialAngularDiameterDistance"]
scale_factor = 1.0
+ # If we use an RMF, figure out where the response matrix actually is.
+ if "RMF" in parameters:
+ rmf = _astropy.pyfits.open(parameters["RMF"])
+ if "MATRIX" in rmf:
+ mat_key = "MATRIX"
+ elif "SPECRESP MATRIX" in rmf:
+ mat_key = "SPECRESP MATRIX"
+ else:
+ raise RuntimeError("Cannot find the response matrix in the RMF "
+ "file %s! " % parameters["RMF"]+"It should "
+ "be named \"MATRIX\" or \"SPECRESP MATRIX\".")
+ rmf.close()
+ else:
+ mat_key = None
+
if (exp_time_new is None and area_new is None and
redshift_new is None and dist_new is None):
my_n_obs = n_ph_tot
@@ -552,11 +567,10 @@
if comm.rank == 0:
mylog.info("Using energy-dependent effective area: %s" % (parameters["ARF"]))
f = _astropy.pyfits.open(area_new)
- elo = f["SPECRESP"].data.field("ENERG_LO")
- ehi = f["SPECRESP"].data.field("ENERG_HI")
+ earf = 0.5*(f["SPECRESP"].data.field("ENERG_LO")+f["SPECRESP"].data.field("ENERG_HI"))
eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
if "RMF" in parameters:
- weights = self._normalize_arf(parameters["RMF"])
+ weights = self._normalize_arf(parameters["RMF"], mat_key)
eff_area *= weights
else:
mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
@@ -583,7 +597,12 @@
(D_A*D_A*(1.+zobs)**3)
fak = Aratio*Tratio*Dratio
if fak > 1:
- raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
+ raise ValueError("This combination of requested parameters results in "
+ "%g%% more photons collected than are " % (100.*(fak-1.)) +
+ "available in the sample. Please reduce the collecting "
+ "area, exposure time, or increase the distance/redshift "
+ "of the object. Alternatively, generate a larger sample "
+ "of photons.")
my_n_obs = np.uint64(n_ph_tot*fak)
n_obs_all = comm.mpi_allreduce(my_n_obs)
@@ -652,7 +671,6 @@
detected = np.ones(eobs.shape, dtype='bool')
else:
mylog.info("Applying energy-dependent effective area.")
- earf = 0.5*(elo+ehi)
earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
randvec = eff_area.max()*np.random.random(eobs.shape)
detected = randvec < earea
@@ -676,11 +694,13 @@
num_events = len(events["xpix"])
- if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
+ if comm.rank == 0:
+ mylog.info("Total number of observed photons: %d" % num_events)
if "RMF" in parameters and convolve_energies:
- events, info = self._convolve_with_rmf(parameters["RMF"], events)
- for k, v in info.items(): parameters[k] = v
+ events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
+ for k, v in info.items():
+ parameters[k] = v
if exp_time_new is None:
parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
@@ -698,23 +718,22 @@
return EventList(events, parameters)
- def _normalize_arf(self, respfile):
+ def _normalize_arf(self, respfile, mat_key):
rmf = _astropy.pyfits.open(respfile)
- table = rmf["MATRIX"]
+ table = rmf[mat_key]
weights = np.array([w.sum() for w in table.data["MATRIX"]])
rmf.close()
return weights
- def _convolve_with_rmf(self, respfile, events):
+ def _convolve_with_rmf(self, respfile, events, mat_key):
"""
Convolve the events with a RMF file.
"""
- mylog.warning("This routine has not been tested to work with all RMFs. YMMV.")
mylog.info("Reading response matrix file (RMF): %s" % (respfile))
hdulist = _astropy.pyfits.open(respfile)
- tblhdu = hdulist["MATRIX"]
+ tblhdu = hdulist[mat_key]
n_de = len(tblhdu.data["ENERG_LO"])
mylog.info("Number of energy bins in RMF: %d" % (n_de))
mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
@@ -727,21 +746,19 @@
eidxs = np.argsort(events["eobs"])
phEE = events["eobs"][eidxs].d
- phXX = events["xpix"][eidxs]
- phYY = events["ypix"][eidxs]
detectedChannels = []
# run through all photon energies and find which bin they go in
k = 0
fcurr = 0
- last = len(phEE)-1
+ last = len(phEE)
- pbar = get_pbar("Scattering energies with RMF:", n_de)
+ pbar = get_pbar("Scattering energies with RMF:", last)
for low,high in zip(tblhdu.data["ENERG_LO"],tblhdu.data["ENERG_HI"]):
# weight function for probabilities from RMF
- weights = np.nan_to_num(tblhdu.data[k]["MATRIX"][:])
+ weights = np.nan_to_num(np.float64(tblhdu.data[k]["MATRIX"][:]))
weights /= weights.sum()
# build channel number list associated to array value,
# there are groups of channels in rmfs with nonzero probabilities
@@ -764,18 +781,18 @@
for q in range(fcurr,last):
if phEE[q] >= low and phEE[q] < high:
channelInd = np.random.choice(len(weights), p=weights)
- fcurr +=1
+ fcurr += 1
detectedChannels.append(trueChannel[channelInd])
if phEE[q] >= high:
break
- pbar.update(k)
- k+=1
+ pbar.update(fcurr)
+ k += 1
pbar.finish()
dchannel = np.array(detectedChannels)
- events["xpix"] = phXX
- events["ypix"] = phYY
+ events["xpix"] = events["xpix"][eidxs]
+ events["ypix"] = events["ypix"][eidxs]
events["eobs"] = YTArray(phEE, "keV")
events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int)
@@ -790,7 +807,6 @@
class EventList(object) :
def __init__(self, events, parameters):
-
self.events = events
self.parameters = parameters
self.num_events = events["xpix"].shape[0]
@@ -800,9 +816,6 @@
self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
self.wcs.wcs.cunit = ["deg"]*2
- x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
- self.events["xsky"] = YTArray(x, "degree")
- self.events["ysky"] = YTArray(y, "degree")
def keys(self):
return self.events.keys()
@@ -817,11 +830,19 @@
return self.events.values()
def __getitem__(self,key):
+ if key not in self.events:
+ if key == "xsky" or key == "ysky":
+ x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
+ self.events["xsky"] = YTArray(x, "degree")
+ self.events["ysky"] = YTArray(y, "degree")
return self.events[key]
def __repr__(self):
return self.events.__repr__()
+ def __contains__(self, key):
+ return key in self.events
+
def __add__(self, other):
assert_same_wcs(self.wcs, other.wcs)
keys1 = list(self.parameters.keys())
@@ -860,11 +881,11 @@
reg = pyregion.parse(region)
r = reg.as_imagecoord(header=self.wcs.to_header())
f = r.get_filter()
- idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+ idxs = f.inside_x_y(self["xpix"], self["ypix"])
if idxs.sum() == 0:
raise RuntimeError("No events are inside this region!")
new_events = {}
- for k, v in self.events.items():
+ for k, v in self.items():
new_events[k] = v[idxs]
return EventList(new_events, self.parameters)
@@ -964,28 +985,46 @@
Set *clobber* to True if you need to overwrite a previous file.
"""
pyfits = _astropy.pyfits
+ Time = _astropy.time.Time
+ TimeDelta = _astropy.time.TimeDelta
+
+ exp_time = float(self.parameters["ExposureTime"])
+
+ t_begin = Time.now()
+ dt = TimeDelta(exp_time, format='sec')
+ t_end = t_begin + dt
cols = []
- col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
- array=self.events["eobs"].in_units("eV").d)
- col2 = pyfits.Column(name='X', format='D', unit='pixel',
- array=self.events["xpix"])
- col3 = pyfits.Column(name='Y', format='D', unit='pixel',
- array=self.events["ypix"])
+ col_e = pyfits.Column(name='ENERGY', format='E', unit='eV',
+ array=self["eobs"].in_units("eV").d)
+ col_x = pyfits.Column(name='X', format='D', unit='pixel',
+ array=self["xpix"])
+ col_y = pyfits.Column(name='Y', format='D', unit='pixel',
+ array=self["ypix"])
- cols = [col1, col2, col3]
+ cols = [col_e, col_x, col_y]
if "ChannelType" in self.parameters:
chantype = self.parameters["ChannelType"]
if chantype == "PHA":
- cunit="adu"
+ cunit = "adu"
elif chantype == "PI":
- cunit="Chan"
- col4 = pyfits.Column(name=chantype.upper(), format='1J',
- unit=cunit, array=self.events[chantype])
- cols.append(col4)
+ cunit = "Chan"
+ col_ch = pyfits.Column(name=chantype.upper(), format='1J',
+ unit=cunit, array=self.events[chantype])
+ cols.append(col_ch)
+ mylog.info("Generating times for events assuming uniform time "
+ "distribution. In future versions this will be made "
+ "more general.")
+
+ time = np.random.uniform(size=self.num_events, low=0.0,
+ high=float(self.parameters["ExposureTime"]))
+ col_t = pyfits.Column(name="TIME", format='1D', unit='s',
+ array=time)
+ cols.append(col_t)
+
coldefs = pyfits.ColDefs(cols)
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.update_ext_name("EVENTS")
@@ -1006,7 +1045,9 @@
tbhdu.header["TLMIN3"] = 0.5
tbhdu.header["TLMAX2"] = 2.*self.parameters["pix_center"][0]-0.5
tbhdu.header["TLMAX3"] = 2.*self.parameters["pix_center"][1]-0.5
- tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+ tbhdu.header["EXPOSURE"] = exp_time
+ tbhdu.header["TSTART"] = 0.0
+ tbhdu.header["TSTOP"] = exp_time
if isinstance(self.parameters["Area"], string_types):
tbhdu.header["AREA"] = self.parameters["Area"]
else:
@@ -1016,8 +1057,18 @@
tbhdu.header["HDUVERS"] = "1.1.0"
tbhdu.header["RADECSYS"] = "FK5"
tbhdu.header["EQUINOX"] = 2000.0
+ tbhdu.header["HDUCLASS"] = "OGIP"
+ tbhdu.header["HDUCLAS1"] = "EVENTS"
+ tbhdu.header["HDUCLAS2"] = "ACCEPTED"
+ tbhdu.header["DATE"] = t_begin.tt.isot
+ tbhdu.header["DATE-OBS"] = t_begin.tt.isot
+ tbhdu.header["DATE-END"] = t_end.tt.isot
if "RMF" in self.parameters:
tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+ f = pyfits.open(self.parameters["RMF"])
+ nchan = int(f["EBOUNDS"].header["DETCHANS"])
+ tbhdu.header["PHA_BINS"] = nchan
+ f.close()
if "ARF" in self.parameters:
tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
if "ChannelType" in self.parameters:
@@ -1029,7 +1080,30 @@
if "Instrument" in self.parameters:
tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
- tbhdu.writeto(fitsfile, clobber=clobber)
+ hdulist = [pyfits.PrimaryHDU(), tbhdu]
+
+ if "ChannelType" in self.parameters:
+ start = pyfits.Column(name='START', format='1D', unit='s',
+ array=np.array([0.0]))
+ stop = pyfits.Column(name='STOP', format='1D', unit='s',
+ array=np.array([exp_time]))
+
+ tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop])
+ tbhdu_gti.update_ext_name("STDGTI")
+ tbhdu_gti.header["TSTART"] = 0.0
+ tbhdu_gti.header["TSTOP"] = exp_time
+ tbhdu_gti.header["HDUCLASS"] = "OGIP"
+ tbhdu_gti.header["HDUCLAS1"] = "GTI"
+ tbhdu_gti.header["HDUCLAS2"] = "STANDARD"
+ tbhdu_gti.header["RADECSYS"] = "FK5"
+ tbhdu_gti.header["EQUINOX"] = 2000.0
+ tbhdu_gti.header["DATE"] = t_begin.tt.isot
+ tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot
+ tbhdu_gti.header["DATE-END"] = t_end.tt.isot
+
+ hdulist.append(tbhdu_gti)
+
+ pyfits.HDUList(hdulist).writeto(fitsfile, clobber=clobber)
@parallel_root_only
def write_simput_file(self, prefix, clobber=False, emin=None, emax=None):
@@ -1055,21 +1129,17 @@
raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
if emin is None:
- emin = self.events["eobs"].min().value
+ emin = self["eobs"].min().value
if emax is None:
- emax = self.events["eobs"].max().value
+ emax = self["eobs"].max().value
- idxs = np.logical_and(self.events["eobs"].d >= emin,
- self.events["eobs"].d <= emax)
- flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
+ idxs = np.logical_and(self["eobs"].d >= emin, self["eobs"].d <= emax)
+ flux = np.sum(self["eobs"][idxs].in_units("erg")) / \
self.parameters["ExposureTime"]/self.parameters["Area"]
- col1 = pyfits.Column(name='ENERGY', format='E',
- array=self["eobs"].d)
- col2 = pyfits.Column(name='DEC', format='D',
- array=self["ysky"].d)
- col3 = pyfits.Column(name='RA', format='D',
- array=self["xsky"].d)
+ col1 = pyfits.Column(name='ENERGY', format='E', array=self["eobs"].d)
+ col2 = pyfits.Column(name='DEC', format='D', array=self["ysky"].d)
+ col3 = pyfits.Column(name='RA', format='D', array=self["xsky"].d)
coldefs = pyfits.ColDefs([col1, col2, col3])
@@ -1147,15 +1217,15 @@
if "Instrument" in self.parameters:
f.create_dataset("/instrument", data=self.parameters["Instrument"])
- f.create_dataset("/xpix", data=self.events["xpix"])
- f.create_dataset("/ypix", data=self.events["ypix"])
- f.create_dataset("/xsky", data=self.events["xsky"].d)
- f.create_dataset("/ysky", data=self.events["ysky"].d)
- f.create_dataset("/eobs", data=self.events["eobs"].d)
- if "PI" in self.events:
- f.create_dataset("/pi", data=self.events["PI"])
- if "PHA" in self.events:
- f.create_dataset("/pha", data=self.events["PHA"])
+ f.create_dataset("/xpix", data=self["xpix"])
+ f.create_dataset("/ypix", data=self["ypix"])
+ f.create_dataset("/xsky", data=self["xsky"].d)
+ f.create_dataset("/ysky", data=self["ysky"].d)
+ f.create_dataset("/eobs", data=self["eobs"].d)
+ if "PI" in self:
+ f.create_dataset("/pi", data=self["PI"])
+ if "PHA" in self:
+ f.create_dataset("/pha", data=self["PHA"])
f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
f.create_dataset("/pix_center", data=self.parameters["pix_center"])
f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
@@ -1181,13 +1251,13 @@
The maximum energy of the photons to put in the image, in keV.
"""
if emin is None:
- mask_emin = np.ones((self.num_events), dtype='bool')
+ mask_emin = np.ones(self.num_events, dtype='bool')
else:
- mask_emin = self.events["eobs"].d > emin
+ mask_emin = self["eobs"].d > emin
if emax is None:
- mask_emax = np.ones((self.num_events), dtype='bool')
+ mask_emax = np.ones(self.num_events, dtype='bool')
else:
- mask_emax = self.events["eobs"].d < emax
+ mask_emax = self["eobs"].d < emax
mask = np.logical_and(mask_emin, mask_emax)
@@ -1197,8 +1267,8 @@
xbins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True)
ybins = np.linspace(0.5, float(ny)+0.5, ny+1, endpoint=True)
- H, xedges, yedges = np.histogram2d(self.events["xpix"][mask],
- self.events["ypix"][mask],
+ H, xedges, yedges = np.histogram2d(self["xpix"][mask],
+ self["ypix"][mask],
bins=[xbins,ybins])
hdu = _astropy.pyfits.PrimaryHDU(H.T)
@@ -1252,21 +1322,27 @@
if bin_type == "channel" and "ChannelType" in self.parameters:
spectype = self.parameters["ChannelType"]
f = pyfits.open(self.parameters["RMF"])
- nchan = int(f[1].header["DETCHANS"])
- try:
- cmin = int(f[1].header["TLMIN4"])
- except KeyError:
+ nchan = int(f["EBOUNDS"].header["DETCHANS"])
+ num = 0
+ for i in range(1,len(f["EBOUNDS"].columns)+1):
+ if f["EBOUNDS"].header["TTYPE%d" % i] == "CHANNEL":
+ num = i
+ break
+ if num > 0:
+ tlmin = "TLMIN%d" % num
+ cmin = int(f["EBOUNDS"].header[tlmin])
+ else:
mylog.warning("Cannot determine minimum allowed value for channel. " +
"Setting to 0, which may be wrong.")
- cmin = int(0)
+ cmin = 0
f.close()
minlength = nchan
if cmin == 1: minlength += 1
- spec = np.bincount(self.events[spectype],minlength=minlength)
+ spec = np.bincount(self[spectype],minlength=minlength)
if cmin == 1: spec = spec[1:]
bins = (np.arange(nchan)+cmin).astype("int32")
else:
- espec = self.events["eobs"].d
+ espec = self["eobs"].d
erange = (emin, emax)
spec, ee = np.histogram(espec, bins=nchan, range=erange)
if bin_type == "energy":
@@ -1283,7 +1359,7 @@
coldefs = pyfits.ColDefs([col1, col2, col3, col4])
- tbhdu = pyfits.new_table(coldefs)
+ tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.update_ext_name("SPECTRUM")
tbhdu.header["DETCHANS"] = spec.shape[0]
diff -r 03e316bc6fddecfb4705d0f73754ce2553e45a3d -r 508299bf88f92f1191465d63e84a69eebd161954 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -67,6 +67,7 @@
if self._units is None:
try:
from astropy import units
+ self.log
except ImportError:
units = NotAModule(self._name)
self._units = units
@@ -84,6 +85,18 @@
self._conv = conv
return self._conv
+ _time = None
+ @property
+ def time(self):
+ if self._time is None:
+ try:
+ import astropy.time as time
+ self.log
+ except ImportError:
+ time = NotAModule(self._name)
+ self._time = time
+ return self._time
+
_astropy = astropy_imports()
class scipy_imports:
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list