[yt-svn] commit/yt: 7 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Aug 14 15:25:36 PDT 2013
7 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/02fe877276dc/
Changeset: 02fe877276dc
Branch: yt
User: hegan
Date: 2013-07-23 20:37:21
Summary: Added fit
Affected #: 2 files
diff -r f15815e182080e8619efd2636cce42bc6e598b7c -r 02fe877276dc48a3d0caa54a3caf957fd735db86 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,808 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+ import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts,
+ minError=1E-5, complexLim=.999,
+ fitLim=.99, minLength=3,
+ maxLength=1000, splitLim=.99,
+ output_file=None):
+
+ """
+ This function is designed to fit an absorption spectrum by breaking
+ the spectrum up into absorption complexes, and iteratively adding
+ and optimizing voigt profiles to each complex.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ 1d array of wavelengths
+ fluxData : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ orderFits : list
+ list of the names of the species in the order that they
+ should be fit. Names should correspond to the names of the species
+ given in speciesDicts. (ex: ['lya','OVI'])
+ speciesDicts : dictionary
+ Dictionary of dictionaries (I'm addicted to dictionaries, I
+ confess). Top level keys should be the names of all the species given
+ in orderFits. The entries should be dictionaries containing all
+ relevant parameters needed to create an absorption line of a given
+ species (f,Gamma,lambda0) as well as max and min values for parameters
+ to be fit
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+ output_file : string, optional
+ location to save the results of the fit.
+
+ Returns
+ -------
+ allSpeciesLines : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+ fits some part of the absorption spectrum). 'group#' is a similar list
+ but identifies which absorbing complex each line belongs to. Lines
+ with the same group# were fit at the same time. group#'s do not
+ correlate between species (ie: an lya line with group number 1 and
+ an OVI line with group number 1 were not fit together and do
+ not necessarily correspond to the same region)
+ yFit : (N) ndarray
+ array of flux corresponding to the combination of all fitted
+ absorption profiles. Same size as x.
+ """
+
+ #Empty dictionary for fitted lines
+ allSpeciesLines = {}
+
+ #Wavelength of beginning of array, wavelength resolution
+ x0,xRes=x[0],x[1]-x[0]
+
+ #Empty fit without any lines
+ yFit = na.ones(len(fluxData))
+
+ #Find all regions where lines/groups of lines are present
+ cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+ complexLim=complexLim, minLength=minLength,
+ maxLength=maxLength, splitLim=splitLim)
+
+ #Fit all species one at a time in given order from low to high wavelength
+ for species in orderFits:
+ speciesDict = speciesDicts[species]
+ speciesLines = {'N':na.array([]),
+ 'b':na.array([]),
+ 'z':na.array([]),
+ 'group#':na.array([])}
+
+ #Set up wavelengths for species
+ initWl = speciesDict['wavelength'][0]
+
+ for b_i,b in enumerate(cBounds):
+ xBounded=x[b[1]:b[2]]
+ yDatBounded=fluxData[b[1]:b[2]]
+ yFitBounded=yFit[b[1]:b[2]]
+
+ #Find init redshift
+ z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+ #Check if any flux at partner sites
+ if not _line_exists(speciesDict['wavelength'],
+ fluxData,z,x0,xRes,fitLim):
+ continue
+
+ #Fit Using complex tools
+ newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+ z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+ #Check existence of partner lines if applicable
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
+
+ #If flagged as a bad fit, species is lyman alpha,
+ # and it may be a saturated line, use special tools
+ if flag and species=='lya' and min(yDatBounded)<.1:
+ newLinesP=_large_flag_fit(xBounded,yDatBounded,
+ yFitBounded,z,speciesDict,
+ minSize,minError*(b[2]-b[1]))
+
+ #Adjust total current fit
+ yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+ #Add new group to all fitted lines
+ if na.size(newLinesP)>0:
+ speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+ speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+ speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+ speciesLines['group#']=na.append(speciesLines['group#'],b_i)
+
+ allSpeciesLines[species]=speciesLines
+
+ if output_file:
+ _output_fit(allSpeciesLines, output_file)
+
+ return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict,
+ initP=None):
+ """ Fit an absorption complex by iteratively adding and optimizing
+ voigt profiles.
+
+ A complex is defined as a region where some number of lines may be present,
+ or a region of non zero of absorption. Lines are iteratively added
+ and optimized until the difference between the flux generated using
+ the optimized parameters has a least squares difference between the
+ desired flux profile less than the error bound.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ initP : (,3,) ndarray
+ initial guess to try for line parameters to fit the region. Used
+ by large_flag_fit. Default = None, and initial guess generated
+ automatically.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ Array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ flag : bool
+ boolean value indicating the success of the fit (True if unsuccessful)
+ """
+
+ #Setup initial line guesses
+ if initP==None: #Regular fit
+ initP = [0,0,0]
+ if min(yDat)<.5: #Large lines get larger initial guess
+ initP[0] = 10**16
+ elif min(yDat)>.9: #Small lines get smaller initial guess
+ initP[0] = 10**12.5
+ else:
+ initP[0] = speciesDict['init_N']
+ initP[1] = speciesDict['init_b']
+ initP[2]=initz
+ initP=na.array([initP])
+
+ linesP = initP
+
+ #For generating new z guesses
+ wl0 = speciesDict['wavelength'][0]
+
+ #Check if first line exists still
+ if min(yDat-yFit+1)>minSize:
+ return [],False
+
+ #Values to proceed through first run
+ errSq,prevErrSq=1,1000
+
+ while True:
+ #Initial parameter guess from joining parameters from all lines
+ # in lines into a single array
+ initP = linesP.flatten()
+
+ #Optimize line
+ fitP,success=optimize.leastsq(_voigt_error,initP,
+ args=(x,yDat,yFit,speciesDict),
+ epsfcn=1E-10,maxfev=1000)
+
+ #Set results of optimization
+ linesP = na.reshape(fitP,(-1,3))
+
+ #Generate difference between current best fit and data
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+
+ #Sum to get idea of goodness of fit
+ errSq=sum(dif**2)
+
+ #If good enough, break
+ if errSq < errBound:
+ break
+
+ #If last fit was worse, reject the last line and revert to last fit
+ if errSq > prevErrSq*10:
+ #If its still pretty damn bad, cut losses and try flag fit tools
+ if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+ break
+
+ #If too many lines
+ if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+ #If its fitable by flag tools and still bad, use flag tools
+ if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ break
+
+ #Store previous data in case reject next fit
+ prevErrSq = errSq
+ prevLinesP = linesP
+
+
+ #Set up initial condition for new line
+ newP = [0,0,0]
+ if min(dif)<.1:
+ newP[0]=10**12
+ elif min(dif)>.9:
+ newP[0]=10**16
+ else:
+ newP[0]=10**14
+ newP[1] = speciesDict['init_b']
+ newP[2]=(x[dif.argmax()]-wl0)/wl0
+ linesP=na.append(linesP,[newP],axis=0)
+
+
+ #Check the parameters of all lines to see if they fall in an
+ # acceptable range, as given in dict ref
+ remove=[]
+ for i,p in enumerate(linesP):
+ check=_check_params(na.array([p]),speciesDict)
+ if check:
+ remove.append(i)
+ linesP = na.delete(linesP,remove,axis=0)
+
+ return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+ """
+ Attempts to more robustly fit saturated lyman alpha regions that have
+ not converged to satisfactory fits using the standard tools.
+
+ Uses a preselected sample of a wide range of initial parameter guesses
+ designed to fit saturated lines (see get_test_lines).
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+
+ Returns
+ -------
+ bestP : (3,) ndarray
+ array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ """
+
+ #Set up some initial line guesses
+ lineTests = _get_test_lines(initz)
+
+ #Keep track of the lowest achieved error
+ bestError = 1000
+
+ #Iterate through test line guesses
+ for initLines in lineTests:
+ if initLines[1,0]==0:
+ initLines = na.delete(initLines,1,axis=0)
+
+ #Do fitting with initLines as first guess
+ linesP,flag=_complex_fit(x,yDat,yFit,initz,
+ minSize,errBound,speciesDict,initP=initLines)
+
+ #Find error of last fit
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+ errSq=sum(dif**2)
+
+ #If error lower, keep track of the lines used to make that fit
+ if errSq < bestError:
+ bestError = errSq
+ bestP = linesP
+
+ if bestError>10*errBound*len(x):
+ return []
+ else:
+ return bestP
+
+def _get_test_lines(initz):
+ """
+ Returns a 3d numpy array of lines to test as initial guesses for difficult
+ to fit lyman alpha absorbers that are saturated.
+
+ The array is 3d because
+ the first dimension gives separate initial guesses, the second dimension
+ has multiple lines for the same guess (trying a broad line plus a
+ saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+ Parameters
+ ----------
+ initz : float
+ redshift to give all the test lines
+
+ Returns
+ -------
+ testP : (,3,) ndarray
+ numpy array of the form
+ [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+ """
+
+ #Set up a bunch of empty lines
+ testP = na.zeros((10,2,3))
+
+ testP[0,0,:]=[1E18,20,initz]
+ testP[1,0,:]=[1E18,40,initz]
+ testP[2,0,:]=[1E16,5, initz]
+ testP[3,0,:]=[1E16,20,initz]
+ testP[4,0,:]=[1E16,80,initz]
+
+ testP[5,0,:]=[1E18,20,initz]
+ testP[6,0,:]=[1E18,40,initz]
+ testP[7,0,:]=[1E16,5, initz]
+ testP[8,0,:]=[1E16,20,initz]
+ testP[9,0,:]=[1E16,80,initz]
+
+ testP[5,1,:]=[1E13,100,initz]
+ testP[6,1,:]=[1E13,100,initz]
+ testP[7,1,:]=[1E13,100,initz]
+ testP[8,1,:]=[1E13,100,initz]
+ testP[9,1,:]=[1E13,100,initz]
+
+ return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+ """
+ Gets the indices of range of wavelength that the wavelength wl is in
+ with the size of some initial wavelength range.
+
+ Used for checking if species with multiple lines (as in the OVI doublet)
+ fit all lines appropriately.
+
+ Parameters
+ ----------
+ z : float
+ redshift
+ b : (3) ndarray/list
+ initial bounds in form [i0,i1,i2] where i0 is the index of the
+ minimum flux for the complex, i1 is index of the lower wavelength
+ edge of the complex, and i2 is the index of the higher wavelength
+ edge of the complex.
+ wl : float
+ unredshifted wavelength of the peak of the new region
+ x0 : float
+ wavelength of the index 0
+ xRes : float
+ difference in wavelength for two consecutive indices
+
+ Returns
+ -------
+ indices : (2) tuple
+ Tuple (i1,i2) where i1 is the index of the lower wavelength bound of
+ the new region and i2 is the index of the higher wavelength bound of
+ the new region
+ """
+
+ r=[-b[1]+100+b[0],b[2]+100-b[0]]
+ redWl = (z+1)*wl
+ iRedWl=int((redWl-x0)/xRes)
+ indices = (iRedWl-r[0],iRedWl+r[1])
+
+ return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound,
+ x0, xRes, speciesDict):
+ """
+ Given a set of parameters [N,b,z] that form multiple lines for a given
+ species (as in the OVI doublet), remove any set of parameters where
+ not all transition wavelengths have a line that matches the fit.
+
+ (ex: if a fit is determined based on the first line of the OVI doublet,
+ but the given parameters give a bad fit of the wavelength space of
+ the second line then that set of parameters is removed from the array
+ of line parameters.)
+
+ Parameters
+ ----------
+ linesP : (3,) ndarray
+ array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ wavelength array [nm]
+ y : (N) ndarray
+ normalized flux array of original data
+ b : (3) tuple/list/ndarray
+ indices that give the bounds of the original region so that another
+ region of similar size can be used to determine the goodness
+ of fit of the other wavelengths
+ errBound : float
+ size of the error that is appropriate for a given region,
+ adjusted to account for the size of the region.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ array similar to linesP that only contains lines with
+ appropriate fits of all transition wavelengths.
+ """
+
+ #List of lines to remove
+ removeLines=[]
+
+ #Iterate through all sets of line parameters
+ for i,p in enumerate(linesP):
+
+ #iterate over all transition wavelengths
+ for wl in speciesDict['wavelength']:
+
+ #Get the bounds of a similar sized region around the
+ # appropriate wavelength, and then get the appropriate
+ # region of wavelength and flux
+ lb = _get_bounds(p[2],b,wl,x0,xRes)
+ xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+ #Generate a fit and find the difference to data
+ yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+ dif =yb-yFitb
+
+ #Only counts as an error if line is too big ---------------<
+ dif = [k for k in dif if k>0]
+ err = sum(dif)
+
+ #If the fit is too bad then add the line to list of removed lines
+ if err > errBound*1E2:
+ removeLines.append(i)
+ break
+
+ #Remove all bad line fits
+ linesP = na.delete(linesP,removeLines,axis=0)
+
+ return linesP
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+ """For a group of lines finds if the there is some change in flux greater
+ than some minimum at the same redshift with different initial wavelengths
+
+ Parameters
+ ----------
+ wavelengths : (N) ndarray
+ array of initial wavelengths to check
+ y : (N) ndarray
+ flux array to check
+ x0 : float
+ wavelength of the first value in y
+ xRes : float
+ difference in wavelength between consecutive cells in flux array
+ fluxMin : float
+ maximum flux to count as a line existing.
+
+ Returns
+ -------
+
+ flag : boolean
+ value indicating whether all lines exist. True if all lines exist
+ """
+
+ #Iterate through initial wavelengths
+ for wl in wavelengths:
+ #Redshifted wavelength
+ redWl = (z+1)*wl
+
+ #Index of the redshifted wavelength
+ indexRedWl = (redWl-x0)/xRes
+
+ #Check if surpasses minimum absorption bound
+ if y[int(indexRedWl)]>fluxMin:
+ return False
+
+ return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+ minLength =3, maxLength=1000, splitLim=.99):
+ """Breaks up the wavelength space into groups
+ where there is some absorption.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelengths
+ yDat : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+
+ Returns
+ -------
+ cBounds : (3,)
+ list of bounds in the form [[i0,i1,i2],...] where i0 is the
+ index of the maximum flux for a complex, i1 is the index of the
+ beginning of the complex, and i2 is the index of the end of the
+ complex. Indexes refer to the indices of x and yDat.
+ """
+
+ #Initialize empty list of bounds
+ cBounds=[]
+
+ #Iterate through cells of flux
+ i=0
+ while (i<len(x)):
+
+ #Start tracking at a region that surpasses flux of edge
+ if yDat[i]<complexLim:
+
+ #Iterate through until reach next edge
+ j=0
+ while yDat[i+j]<complexLim: j=j+1
+
+ #Check if the complex is big enough
+ if j >minLength:
+
+ #Check if there is enough absorption for the complex to
+ # be included
+ cPeak = yDat[i:i+j].argmin()
+ if yDat[cPeak+i]<fitLim:
+ cBounds.append([cPeak+i,i,i+j])
+
+ i=i+j
+ i=i+1
+
+ i=0
+ #Iterate through the bounds
+ while i < len(cBounds):
+ b=cBounds[i]
+
+ #Check if the region needs to be divided
+ if b[2]-b[1]>maxLength:
+
+ #Find the minimum absorption in the middle two quartiles of
+ # the large complex
+ q=(b[2]-b[1])/4
+ cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+ #Only break it up if the minimum absorption is actually low enough
+ if yDat[cut]>splitLim:
+
+ #Get the new two peaks
+ b1Peak = yDat[b[1]:cut].argmin()+b[1]
+ b2Peak = yDat[cut:b[2]].argmin()+cut
+
+ #add the two regions separately
+ cBounds.insert(i+1,[b1Peak,b[1],cut])
+ cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+ #Remove the original region
+ cBounds.pop(i)
+ i=i+1
+ i=i+1
+
+ return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+ """
+ Calculates the normalized flux for a region of wavelength space
+ generated by a set of absorption lines.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ Array of wavelength
+ linesP: (3,) ndarray
+ Array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ Dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+
+ Returns
+ -------
+ flux : (N) ndarray
+ Array of normalized flux generated by the line parameters
+ given in linesP over the wavelength space given in x. Same size as x.
+ """
+ y=0
+ for p in linesP:
+ for i in range(speciesDict['numLines']):
+ f=speciesDict['f'][i]
+ g=speciesDict['Gamma'][i]
+ wl=speciesDict['wavelength'][i]
+ y = y+ _gen_tau(x,p,f,g,wl)
+ flux = na.exp(-y)
+ return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+ """This calculates a flux distribution for given parameters using the yt
+ voigt profile generator"""
+ N,b,z= p
+
+ #Calculating quantities
+ tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+ a=7.95774715459E-15*Gamma*lambda_unshifted/b
+ x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+
+ H = na.zeros(len(x))
+ H = voigt(a,x)
+
+ tau = tau_o*H
+
+ return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+ """
+ Gives the error of each point used to optimize the fit of a group
+ of absorption lines to a given flux profile.
+
+ If the parameters are not in the acceptable range as defined
+ in speciesDict, the first value of the error array will
+ contain a large value (999), to prevent the optimizer from running
+ into negative number problems.
+
+ Parameters
+ ----------
+ pTotal : (3,) ndarray
+ Array with form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ array of wavelengths [nm]
+ yDat : (N) ndarray
+ desired normalized flux from fits of lines in wavelength
+ space given by x
+ yFit : (N) ndarray
+ previous fit over the wavelength space given by x.
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+
+ Returns
+ -------
+ error : (N) ndarray
+ the difference between the fit generated by the parameters
+ given in pTotal multiplied by the previous fit and the desired
+ flux profile, w/ first index modified appropriately for bad
+ parameter choices
+ """
+
+ pTotal.shape = (-1,3)
+ yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+ error = yDat-yFit*yNewFit
+ error[0] = _check_params(pTotal,speciesDict)
+
+ return error
+
+def _check_params(p, speciesDict):
+ """
+ Check to see if any of the parameters in p fall outside the range
+ given in speciesDict.
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+
+ Returns
+ -------
+ check : int
+ 0 if all values are fine
+ 999 if any values fall outside acceptable range
+ """
+ check = 0
+ if any(p[:,0] > speciesDict['maxN']) or\
+ any(p[:,0] < speciesDict['minN']) or\
+ any(p[:,1] > speciesDict['maxb']) or\
+ any(p[:,1] < speciesDict['minb']) or\
+ any(p[:,2] > speciesDict['maxz']) or\
+ any(p[:,2] < speciesDict['minz']):
+ check = 999
+ return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+ """
+ This function is designed to output the parameters of the series
+ of lines used to fit an absorption spectrum.
+
+ The dataset contains entries in the form species/N, species/b
+ species/z, and species/complex. The ith entry in each of the datasets
+ is the fitted parameter for the ith line fitted to the spectrum for
+ the given species. The species names come from the fitted line
+ dictionary.
+
+ Parameters
+ ----------
+ lineDic : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line.
+ fileName : string, optional
+ Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+ """
+ f = h5py.File(file_name, 'w')
+ for ion, params in lineDic.iteritems():
+ f.create_dataset("{0}/N".format(ion),data=params['N'])
+ f.create_dataset("{0}/b".format(ion),data=params['b'])
+ f.create_dataset("{0}/z".format(ion),data=params['z'])
+ f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+ print 'Writing spectrum fit to {0}'.format(file_name)
+
diff -r f15815e182080e8619efd2636cce42bc6e598b7c -r 02fe877276dc48a3d0caa54a3caf957fd735db86 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
from .absorption_spectrum import \
AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+ generate_total_fit
https://bitbucket.org/yt_analysis/yt/commits/41cb5fd0c1fb/
Changeset: 41cb5fd0c1fb
Branch: yt
User: hegan
Date: 2013-07-23 20:43:44
Summary: Merged yt_analysis/yt into yt
Affected #: 5 files
diff -r 02fe877276dc48a3d0caa54a3caf957fd735db86 -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
#!python
import os, re
-from distutils import version
+from distutils.version import LooseVersion
from yt.mods import *
from yt.data_objects.data_containers import AMRData
namespace = locals().copy()
@@ -23,10 +23,12 @@
code.interact(doc, None, namespace)
sys.exit()
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+ api_version = '0.11'
else:
- api_version = '0.11'
+ api_version = '1.0'
if api_version == "0.10" and "DISPLAY" in os.environ:
from matplotlib import rcParams
@@ -42,13 +44,18 @@
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
elif api_version == "0.10":
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
- from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+ if api_version == "0.11":
+ from IPython.frontend.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ elif api_version == "1.0":
+ from IPython.terminal.interactiveshell import TerminalInteractiveShell
+ else:
+ raise RuntimeError
ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
display_banner = True)
if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
- raise RuntimeError
+
# The rest is a modified version of the IPython default profile code
@@ -77,7 +84,7 @@
ip = ip_shell.IP.getapi()
try_next = IPython.ipapi.TryNext
kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
ip = ip_shell
try_next = IPython.core.error.TryNext
kwargs = dict()
diff -r 02fe877276dc48a3d0caa54a3caf957fd735db86 -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -366,5 +366,21 @@
add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
def _abar(field, data):
- return 1.0 / data['sumy']
+ try:
+ return 1.0 / data['sumy']
+ except:
+ pass
+ return data['dens']*Na*kboltz*data['temp']/data['pres']
add_field('abar', function=_abar, take_log=False)
+
+
+def _NumberDensity(fields,data) :
+ try:
+ return data["nele"]+data["nion"]
+ except:
+ pass
+ return data['pres']/(data['temp']*kboltz)
+add_field("NumberDensity", function=_NumberDensity,
+ units=r'\rm{cm}^{-3}')
+
+
diff -r 02fe877276dc48a3d0caa54a3caf957fd735db86 -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -28,7 +28,7 @@
import contextlib
import warnings, struct, subprocess
import numpy as np
-from distutils import version
+from distutils.version import LooseVersion
from math import floor, ceil
from yt.utilities.exceptions import *
@@ -260,10 +260,12 @@
"""
import IPython
- if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+ if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
api_version = '0.10'
+ elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+ api_version = '0.11'
else:
- api_version = '0.11'
+ api_version = '1.0'
stack = inspect.stack()
frame = inspect.stack()[num_up]
@@ -281,7 +283,10 @@
cfg.InteractiveShellEmbed.local_ns = loc
cfg.InteractiveShellEmbed.global_ns = glo
IPython.embed(config=cfg, banner2 = __header % dd)
- from IPython.frontend.terminal.embed import InteractiveShellEmbed
+ if api_version == '0.11':
+ from IPython.frontend.terminal.embed import InteractiveShellEmbed
+ else:
+ from IPython.terminal.embed import InteractiveShellEmbed
ipshell = InteractiveShellEmbed(config=cfg)
del ipshell
diff -r 02fe877276dc48a3d0caa54a3caf957fd735db86 -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1456,7 +1456,12 @@
"""
def __call__(self, args):
kwargs = {}
- from IPython.frontend.html.notebook.notebookapp import NotebookApp
+ try:
+ # IPython 1.0+
+ from IPython.html.notebookapp import NotebookApp
+ except ImportError:
+ # pre-IPython v1.0
+ from IPython.frontend.html.notebook.notebookapp import NotebookApp
pw = ytcfg.get("yt", "notebook_password")
if len(pw) == 0 and not args.no_password:
import IPython.lib
diff -r 02fe877276dc48a3d0caa54a3caf957fd735db86 -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -394,7 +394,7 @@
nWx, nWy = Wx/factor, Wy/factor
self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
-
+ return self
@invalidate_data
def pan(self, deltas):
@@ -408,6 +408,7 @@
"""
self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])
self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])
+ return self
@invalidate_data
def pan_rel(self, deltas):
@@ -422,6 +423,7 @@
Wx, Wy = self.width
self.xlim = (self.xlim[0] + Wx*deltas[0], self.xlim[1] + Wx*deltas[0])
self.ylim = (self.ylim[0] + Wy*deltas[1], self.ylim[1] + Wy*deltas[1])
+ return self
@invalidate_data
def set_window(self, bounds):
@@ -1110,6 +1112,11 @@
except YTNotInsideNotebook:
return self.save(name=name, mpl_kwargs=mpl_kwargs)
+ def __repr__(self):
+ if "__IPYTHON__" in dir(__builtin__):
+ self.show()
+ return super(PWViewerMPL, self).__repr__()
+
class SlicePlot(PWViewerMPL):
r"""Creates a slice plot from a parameter file
https://bitbucket.org/yt_analysis/yt/commits/e4cf339b7037/
Changeset: e4cf339b7037
Branch: yt
User: hegan
Date: 2013-08-08 02:41:21
Summary: Merged yt_analysis/yt into yt
Affected #: 12 files
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,6 +12,7 @@
yt/utilities/kdtree/forthonf2c.h
yt/utilities/libconfig_wrapper.c
yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/CICDeposit.c
yt/utilities/lib/ContourFinding.c
yt/utilities/lib/DepthFirstOctree.c
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
import ConfigParser, os, os.path, types
ytcfgDefaults = dict(
- serialize = 'True',
+ serialize = 'False',
onlydeserialize = 'False',
timefunctions = 'False',
logfile = 'False',
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -68,8 +68,9 @@
if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
- _cached_pfs[apath] = obj
- return _cached_pfs[apath]
+ if obj._skip_cache is False:
+ _cached_pfs[apath] = obj
+ return obj
def __init__(self, filename, data_style=None, file_style=None):
"""
@@ -137,6 +138,10 @@
def _mrep(self):
return MinimalStaticOutput(self)
+ @property
+ def _skip_cache(self):
+ return False
+
def hub_upload(self):
self._mrep.upload()
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -801,6 +801,8 @@
rdw = radius.copy()
for i, ax in enumerate('xyz'):
np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+ if data.pf.dimensionality < i+1:
+ break
if data.pf.periodicity[i] == True:
np.abs(r, r)
np.subtract(r, DW[i], rdw)
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -91,12 +91,11 @@
splitup = line.strip().split()
if "vtk" in splitup:
grid['vtk_version'] = splitup[-1]
- elif "Really" in splitup:
- grid['time'] = splitup[-1]
- elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
- grid['time'] = float(splitup[4].rstrip(','))
- grid['level'] = int(splitup[6].rstrip(','))
- grid['domain'] = int(splitup[8].rstrip(','))
+ elif "time=" in splitup:
+ time_index = splitup.index("time=")
+ grid['time'] = float(splitup[time_index+1].rstrip(','))
+ grid['level'] = int(splitup[time_index+3].rstrip(','))
+ grid['domain'] = int(splitup[time_index+5].rstrip(','))
elif "DIMENSIONS" in splitup:
grid['dimensions'] = np.array(splitup[-3:]).astype('int')
elif "ORIGIN" in splitup:
@@ -309,6 +308,10 @@
self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
self.grid_dimensions = gdims.astype("int32")
self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+ if self.parameter_file.dimensionality <= 2:
+ self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
+ if self.parameter_file.dimensionality == 1:
+ self.grid_right_edge[:,1:] = self.parameter_file.domain_right_edge[1:]
self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
def _populate_grid_objects(self):
@@ -335,7 +338,9 @@
_data_style = "athena"
def __init__(self, filename, data_style='athena',
- storage_filename=None, parameters={}):
+ storage_filename=None, parameters=None):
+ if parameters is None:
+ parameters = {}
self.specified_parameters = parameters
StaticOutput.__init__(self, filename, data_style)
self.filename = filename
@@ -467,6 +472,10 @@
pass
return False
+ @property
+ def _skip_cache(self):
+ return True
+
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -128,15 +128,16 @@
if "pressure" in data.pf.field_info:
return data["pressure"]/(data.pf["Gamma"]-1.0)/data["density"]
else:
- return (data["total_energy"] -
- 0.5*(data["cell_centered_B_x"]**2 +
- data["cell_centered_B_y"]**2 +
- data["cell_centered_B_z"]**2) -
- 0.5*(data["momentum_x"]**2 +
- data["momentum_y"]**2 +
- data["momentum_z"]**2)/data["density"])/data["density"]
+ eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+ data["momentum_y"]**2 +
+ data["momentum_z"]**2)/data["density"]
+ if "cell_centered_B_x" in data.pf.field_info:
+ eint -= 0.5*(data["cell_centered_B_x"]**2 +
+ data["cell_centered_B_y"]**2 +
+ data["cell_centered_B_z"]**2)
+ return eint/data["density"]
add_field("Gas_Energy", function=_gasenergy, take_log=False,
- units=r"\rm{erg}/\rm{g}")
+ convert_function=_convertEnergy, units=r"\rm{erg}/\rm{g}")
def _convertPressure(data) :
return data.convert("Density")*data.convert("x-velocity")**2
@@ -144,15 +145,17 @@
if "pressure" in data.pf.field_info:
return data["pressure"]
else:
- return (data["total_energy"] -
- 0.5*(data["cell_centered_B_x"]**2 +
- data["cell_centered_B_y"]**2 +
- data["cell_centered_B_z"]**2) -
- 0.5*(data["momentum_x"]**2 +
- data["momentum_y"]**2 +
- data["momentum_z"]**2)/data["density"])*(data.pf["Gamma"]-1.0)
-add_field("Pressure", function=_pressure, take_log=False, convert_function=_convertPressure,
- units=r"\rm{erg}/\rm{cm}^3", projected_units=r"\rm{erg}/\rm{cm}^2")
+ eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+ data["momentum_y"]**2 +
+ data["momentum_z"]**2)/data["density"]
+ if "cell_centered_B_x" in data.pf.field_info:
+ eint -= 0.5*(data["cell_centered_B_x"]**2 +
+ data["cell_centered_B_y"]**2 +
+ data["cell_centered_B_z"]**2)
+ return eint*(data.pf["Gamma"]-1.0)
+add_field("Pressure", function=_pressure, take_log=False,
+ convert_function=_convertPressure, units=r"\rm{erg}/\rm{cm}^3",
+ projected_units=r"\rm{erg}/\rm{cm}^2")
def _temperature(field, data):
if data.has_field_parameter("mu"):
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -339,6 +339,10 @@
def _is_valid(cls, *args, **kwargs):
return False
+ @property
+ def _skip_cache(self):
+ return True
+
class StreamDictFieldHandler(dict):
@property
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -107,7 +107,8 @@
for lvl in lvl_range:
gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
- gids = np.array([g.id for g in grids if g.Level == lvl])
+ gids = np.array([g.id for g in grids if g.Level == lvl],
+ dtype="int64")
add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
del gles, gres, gids
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -33,12 +33,20 @@
"""A base class for all yt plots made using matplotlib.
"""
- def __init__(self, fsize, axrect):
+ def __init__(self, fsize, axrect, figure, axes):
"""Initialize PlotMPL class"""
self._plot_valid = True
- self.figure = matplotlib.figure.Figure(figsize=fsize,
- frameon=True)
- self.axes = self.figure.add_axes(axrect)
+ if figure is None:
+ self.figure = matplotlib.figure.Figure(figsize=fsize, frameon=True)
+ else:
+ figure.set_size_inches(fsize)
+ self.figure = figure
+ if axes is None:
+ self.axes = self.figure.add_axes(axrect)
+ else:
+ axes.cla()
+ self.axes = axes
+ self.canvas = FigureCanvasAgg(self.figure)
def save(self, name, mpl_kwargs, canvas=None):
"""Choose backend and save image to disk"""
@@ -57,7 +65,7 @@
canvas = FigureCanvasPS(self.figure)
else:
mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
- canvas = FigureCanvasAgg(self.figure)
+ canvas = self.canvas
canvas.print_figure(name, **mpl_kwargs)
return name
@@ -67,13 +75,18 @@
"""A base class for yt plots made using imshow
"""
- def __init__(self, fsize, axrect, caxrect, zlim):
+ def __init__(self, fsize, axrect, caxrect, zlim, figure, axes, cax):
"""Initialize ImagePlotMPL class object"""
- PlotMPL.__init__(self, fsize, axrect)
+ PlotMPL.__init__(self, fsize, axrect, figure, axes)
self.zmin, self.zmax = zlim
- self.cax = self.figure.add_axes(caxrect)
+ if cax is None:
+ self.cax = self.figure.add_axes(caxrect)
+ else:
+ cax.cla()
+ cax.set_position(caxrect)
+ self.cax = cax
- def _init_image(self, data, cbnorm, cmap, extent, aspect=None):
+ def _init_image(self, data, cbnorm, cmap, extent, aspect):
"""Store output of imshow in image variable"""
if (cbnorm == 'log10'):
norm = matplotlib.colors.LogNorm()
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -90,6 +90,17 @@
return rv
return newfunc
+def invalidate_figure(f):
+ @wraps(f)
+ def newfunc(*args, **kwargs):
+ rv = f(*args, **kwargs)
+ for field in args[0].fields:
+ args[0].plots[field].figure = None
+ args[0].plots[field].axes = None
+ args[0].plots[field].cax = None
+ return rv
+ return newfunc
+
def invalidate_plot(f):
@wraps(f)
def newfunc(*args, **kwargs):
@@ -558,6 +569,7 @@
self.buff_size = (size, size)
@invalidate_plot
+ @invalidate_figure
def set_window_size(self, size):
"""Sets a new window size for the plot
@@ -878,9 +890,19 @@
fp = self._font_properties
+ fig = None
+ axes = None
+ cax = None
+ if self.plots.has_key(f):
+ if self.plots[f].figure is not None:
+ fig = self.plots[f].figure
+ axes = self.plots[f].axes
+ cax = self.plots[f].cax
+
self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name,
self._colormaps[f], extent, aspect,
- zlim, size, fp.get_size())
+ zlim, size, fp.get_size(), fig, axes,
+ cax)
axes_unit_labels = ['', '']
for i, un in enumerate((unit_x, unit_y)):
@@ -947,6 +969,7 @@
del self._frb[key]
@invalidate_plot
+ @invalidate_figure
def set_font(self, font_dict=None):
"""set the font and font properties
@@ -1137,12 +1160,11 @@
or the axis name itself
fields : string
The name of the field(s) to be plotted.
- center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain. If set to 'max', will be at the point
- of highest density.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1247,13 +1269,11 @@
or the axis name itself
fields : string
The name of the field(s) to be plotted.
- center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain. If set to 'max', will be at the point
- of highest density.
- width : tuple or a float.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1365,11 +1385,11 @@
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
- center : A two or three-element vector of sequence floats, 'c', or 'center'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : A tuple or a float
A tuple containing the width of image and the string key of
the unit: (width, 'unit'). If set to a float, code units
@@ -1448,11 +1468,11 @@
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
- center : A two or three-element vector of sequence floats, 'c', or 'center'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1751,7 +1771,9 @@
self._field_transform[field] = linear_transform
class WindowPlotMPL(ImagePlotMPL):
- def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+ def __init__(
+ self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
+ figure, axes, cax):
fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
if np.any(np.array(axrect) < 0):
mylog.warning('The axis ratio of the requested plot is very narrow. '
@@ -1760,7 +1782,8 @@
'and matplotlib.')
axrect = (0.07, 0.10, 0.80, 0.80)
caxrect = (0.87, 0.10, 0.04, 0.80)
- ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)
+ ImagePlotMPL.__init__(
+ self, fsize, axrect, caxrect, zlim, figure, axes, cax)
self._init_image(data, cbname, cmap, extent, aspect)
self.image.axes.ticklabel_format(scilimits=(-2,3))
if cbname == 'linear':
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r e4cf339b7037394db6412775bf50a3da59184cda yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -582,7 +582,7 @@
(-self.width[0]/2.0, self.width[0]/2.0,
-self.width[1]/2.0, self.width[1]/2.0),
image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
- np.array(self.width), self.transfer_function, self.sub_samples)
+ np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
return args
star_trees = None
@@ -2192,10 +2192,10 @@
def get_sampler_args(self, image):
rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
args = (rotp, self.box_vectors[2], self.back_center,
- (-self.width[0]/2, self.width[0]/2,
- -self.width[1]/2, self.width[1]/2),
+ (-self.width[0]/2., self.width[0]/2.,
+ -self.width[1]/2., self.width[1]/2.),
image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
- np.array(self.width), self.sub_samples)
+ np.array(self.width, dtype='float64'), self.sub_samples)
return args
def finalize_image(self,image):
https://bitbucket.org/yt_analysis/yt/commits/25d422083d69/
Changeset: 25d422083d69
Branch: yt
User: hegan
Date: 2013-08-13 17:56:11
Summary: Fixed length mismatch of group# in outputs
Affected #: 1 file
diff -r 41cb5fd0c1fb05ecf4c58a62a6a12acf47020b15 -r 25d422083d69e55927b5961a9994dfd246ddab1c yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -139,7 +139,8 @@
speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
- speciesLines['group#']=na.append(speciesLines['group#'],b_i)
+ groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+ speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
allSpeciesLines[species]=speciesLines
https://bitbucket.org/yt_analysis/yt/commits/818df2c5ef23/
Changeset: 818df2c5ef23
Branch: yt
User: hegan
Date: 2013-08-13 17:56:53
Summary: merged
Affected #: 12 files
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,6 +12,7 @@
yt/utilities/kdtree/forthonf2c.h
yt/utilities/libconfig_wrapper.c
yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/CICDeposit.c
yt/utilities/lib/ContourFinding.c
yt/utilities/lib/DepthFirstOctree.c
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
import ConfigParser, os, os.path, types
ytcfgDefaults = dict(
- serialize = 'True',
+ serialize = 'False',
onlydeserialize = 'False',
timefunctions = 'False',
logfile = 'False',
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -68,8 +68,9 @@
if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
- _cached_pfs[apath] = obj
- return _cached_pfs[apath]
+ if obj._skip_cache is False:
+ _cached_pfs[apath] = obj
+ return obj
def __init__(self, filename, data_style=None, file_style=None):
"""
@@ -137,6 +138,10 @@
def _mrep(self):
return MinimalStaticOutput(self)
+ @property
+ def _skip_cache(self):
+ return False
+
def hub_upload(self):
self._mrep.upload()
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -801,6 +801,8 @@
rdw = radius.copy()
for i, ax in enumerate('xyz'):
np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+ if data.pf.dimensionality < i+1:
+ break
if data.pf.periodicity[i] == True:
np.abs(r, r)
np.subtract(r, DW[i], rdw)
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -91,12 +91,11 @@
splitup = line.strip().split()
if "vtk" in splitup:
grid['vtk_version'] = splitup[-1]
- elif "Really" in splitup:
- grid['time'] = splitup[-1]
- elif any(x in ['PRIMITIVE','CONSERVED'] for x in splitup):
- grid['time'] = float(splitup[4].rstrip(','))
- grid['level'] = int(splitup[6].rstrip(','))
- grid['domain'] = int(splitup[8].rstrip(','))
+ elif "time=" in splitup:
+ time_index = splitup.index("time=")
+ grid['time'] = float(splitup[time_index+1].rstrip(','))
+ grid['level'] = int(splitup[time_index+3].rstrip(','))
+ grid['domain'] = int(splitup[time_index+5].rstrip(','))
elif "DIMENSIONS" in splitup:
grid['dimensions'] = np.array(splitup[-3:]).astype('int')
elif "ORIGIN" in splitup:
@@ -309,6 +308,10 @@
self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
self.grid_dimensions = gdims.astype("int32")
self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+ if self.parameter_file.dimensionality <= 2:
+ self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
+ if self.parameter_file.dimensionality == 1:
+ self.grid_right_edge[:,1:] = self.parameter_file.domain_right_edge[1:]
self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
def _populate_grid_objects(self):
@@ -335,7 +338,9 @@
_data_style = "athena"
def __init__(self, filename, data_style='athena',
- storage_filename=None, parameters={}):
+ storage_filename=None, parameters=None):
+ if parameters is None:
+ parameters = {}
self.specified_parameters = parameters
StaticOutput.__init__(self, filename, data_style)
self.filename = filename
@@ -467,6 +472,10 @@
pass
return False
+ @property
+ def _skip_cache(self):
+ return True
+
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -128,15 +128,16 @@
if "pressure" in data.pf.field_info:
return data["pressure"]/(data.pf["Gamma"]-1.0)/data["density"]
else:
- return (data["total_energy"] -
- 0.5*(data["cell_centered_B_x"]**2 +
- data["cell_centered_B_y"]**2 +
- data["cell_centered_B_z"]**2) -
- 0.5*(data["momentum_x"]**2 +
- data["momentum_y"]**2 +
- data["momentum_z"]**2)/data["density"])/data["density"]
+ eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+ data["momentum_y"]**2 +
+ data["momentum_z"]**2)/data["density"]
+ if "cell_centered_B_x" in data.pf.field_info:
+ eint -= 0.5*(data["cell_centered_B_x"]**2 +
+ data["cell_centered_B_y"]**2 +
+ data["cell_centered_B_z"]**2)
+ return eint/data["density"]
add_field("Gas_Energy", function=_gasenergy, take_log=False,
- units=r"\rm{erg}/\rm{g}")
+ convert_function=_convertEnergy, units=r"\rm{erg}/\rm{g}")
def _convertPressure(data) :
return data.convert("Density")*data.convert("x-velocity")**2
@@ -144,15 +145,17 @@
if "pressure" in data.pf.field_info:
return data["pressure"]
else:
- return (data["total_energy"] -
- 0.5*(data["cell_centered_B_x"]**2 +
- data["cell_centered_B_y"]**2 +
- data["cell_centered_B_z"]**2) -
- 0.5*(data["momentum_x"]**2 +
- data["momentum_y"]**2 +
- data["momentum_z"]**2)/data["density"])*(data.pf["Gamma"]-1.0)
-add_field("Pressure", function=_pressure, take_log=False, convert_function=_convertPressure,
- units=r"\rm{erg}/\rm{cm}^3", projected_units=r"\rm{erg}/\rm{cm}^2")
+ eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
+ data["momentum_y"]**2 +
+ data["momentum_z"]**2)/data["density"]
+ if "cell_centered_B_x" in data.pf.field_info:
+ eint -= 0.5*(data["cell_centered_B_x"]**2 +
+ data["cell_centered_B_y"]**2 +
+ data["cell_centered_B_z"]**2)
+ return eint*(data.pf["Gamma"]-1.0)
+add_field("Pressure", function=_pressure, take_log=False,
+ convert_function=_convertPressure, units=r"\rm{erg}/\rm{cm}^3",
+ projected_units=r"\rm{erg}/\rm{cm}^2")
def _temperature(field, data):
if data.has_field_parameter("mu"):
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -339,6 +339,10 @@
def _is_valid(cls, *args, **kwargs):
return False
+ @property
+ def _skip_cache(self):
+ return True
+
class StreamDictFieldHandler(dict):
@property
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -107,7 +107,8 @@
for lvl in lvl_range:
gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
- gids = np.array([g.id for g in grids if g.Level == lvl])
+ gids = np.array([g.id for g in grids if g.Level == lvl],
+ dtype="int64")
add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
del gles, gres, gids
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -33,12 +33,20 @@
"""A base class for all yt plots made using matplotlib.
"""
- def __init__(self, fsize, axrect):
+ def __init__(self, fsize, axrect, figure, axes):
"""Initialize PlotMPL class"""
self._plot_valid = True
- self.figure = matplotlib.figure.Figure(figsize=fsize,
- frameon=True)
- self.axes = self.figure.add_axes(axrect)
+ if figure is None:
+ self.figure = matplotlib.figure.Figure(figsize=fsize, frameon=True)
+ else:
+ figure.set_size_inches(fsize)
+ self.figure = figure
+ if axes is None:
+ self.axes = self.figure.add_axes(axrect)
+ else:
+ axes.cla()
+ self.axes = axes
+ self.canvas = FigureCanvasAgg(self.figure)
def save(self, name, mpl_kwargs, canvas=None):
"""Choose backend and save image to disk"""
@@ -57,7 +65,7 @@
canvas = FigureCanvasPS(self.figure)
else:
mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
- canvas = FigureCanvasAgg(self.figure)
+ canvas = self.canvas
canvas.print_figure(name, **mpl_kwargs)
return name
@@ -67,13 +75,18 @@
"""A base class for yt plots made using imshow
"""
- def __init__(self, fsize, axrect, caxrect, zlim):
+ def __init__(self, fsize, axrect, caxrect, zlim, figure, axes, cax):
"""Initialize ImagePlotMPL class object"""
- PlotMPL.__init__(self, fsize, axrect)
+ PlotMPL.__init__(self, fsize, axrect, figure, axes)
self.zmin, self.zmax = zlim
- self.cax = self.figure.add_axes(caxrect)
+ if cax is None:
+ self.cax = self.figure.add_axes(caxrect)
+ else:
+ cax.cla()
+ cax.set_position(caxrect)
+ self.cax = cax
- def _init_image(self, data, cbnorm, cmap, extent, aspect=None):
+ def _init_image(self, data, cbnorm, cmap, extent, aspect):
"""Store output of imshow in image variable"""
if (cbnorm == 'log10'):
norm = matplotlib.colors.LogNorm()
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -90,6 +90,17 @@
return rv
return newfunc
+def invalidate_figure(f):
+ @wraps(f)
+ def newfunc(*args, **kwargs):
+ rv = f(*args, **kwargs)
+ for field in args[0].fields:
+ args[0].plots[field].figure = None
+ args[0].plots[field].axes = None
+ args[0].plots[field].cax = None
+ return rv
+ return newfunc
+
def invalidate_plot(f):
@wraps(f)
def newfunc(*args, **kwargs):
@@ -558,6 +569,7 @@
self.buff_size = (size, size)
@invalidate_plot
+ @invalidate_figure
def set_window_size(self, size):
"""Sets a new window size for the plot
@@ -878,9 +890,19 @@
fp = self._font_properties
+ fig = None
+ axes = None
+ cax = None
+ if self.plots.has_key(f):
+ if self.plots[f].figure is not None:
+ fig = self.plots[f].figure
+ axes = self.plots[f].axes
+ cax = self.plots[f].cax
+
self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name,
self._colormaps[f], extent, aspect,
- zlim, size, fp.get_size())
+ zlim, size, fp.get_size(), fig, axes,
+ cax)
axes_unit_labels = ['', '']
for i, un in enumerate((unit_x, unit_y)):
@@ -947,6 +969,7 @@
del self._frb[key]
@invalidate_plot
+ @invalidate_figure
def set_font(self, font_dict=None):
"""set the font and font properties
@@ -1137,12 +1160,11 @@
or the axis name itself
fields : string
The name of the field(s) to be plotted.
- center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain. If set to 'max', will be at the point
- of highest density.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1247,13 +1269,11 @@
or the axis name itself
fields : string
The name of the field(s) to be plotted.
- center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain. If set to 'max', will be at the point
- of highest density.
- width : tuple or a float.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1365,11 +1385,11 @@
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
- center : A two or three-element vector of sequence floats, 'c', or 'center'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : A tuple or a float
A tuple containing the width of image and the string key of
the unit: (width, 'unit'). If set to a float, code units
@@ -1448,11 +1468,11 @@
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
- center : A two or three-element vector of sequence floats, 'c', or 'center'
- The coordinate of the center of the image. If left blank,
- the image centers on the location of the maximum density
- cell. If set to 'c' or 'center', the plot is centered on
- the middle of the domain.
+ center : two or three-element vector of sequence floats, or one of 'c',
+ 'center', 'max' or 'm'. The coordinate of the center of the image.
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
@@ -1751,7 +1771,9 @@
self._field_transform[field] = linear_transform
class WindowPlotMPL(ImagePlotMPL):
- def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+ def __init__(
+ self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
+ figure, axes, cax):
fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
if np.any(np.array(axrect) < 0):
mylog.warning('The axis ratio of the requested plot is very narrow. '
@@ -1760,7 +1782,8 @@
'and matplotlib.')
axrect = (0.07, 0.10, 0.80, 0.80)
caxrect = (0.87, 0.10, 0.04, 0.80)
- ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)
+ ImagePlotMPL.__init__(
+ self, fsize, axrect, caxrect, zlim, figure, axes, cax)
self._init_image(data, cbname, cmap, extent, aspect)
self.image.axes.ticklabel_format(scilimits=(-2,3))
if cbname == 'linear':
diff -r 25d422083d69e55927b5961a9994dfd246ddab1c -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -582,7 +582,7 @@
(-self.width[0]/2.0, self.width[0]/2.0,
-self.width[1]/2.0, self.width[1]/2.0),
image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
- np.array(self.width), self.transfer_function, self.sub_samples)
+ np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
return args
star_trees = None
@@ -2192,10 +2192,10 @@
def get_sampler_args(self, image):
rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
args = (rotp, self.box_vectors[2], self.back_center,
- (-self.width[0]/2, self.width[0]/2,
- -self.width[1]/2, self.width[1]/2),
+ (-self.width[0]/2., self.width[0]/2.,
+ -self.width[1]/2., self.width[1]/2.),
image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
- np.array(self.width), self.sub_samples)
+ np.array(self.width, dtype='float64'), self.sub_samples)
return args
def finalize_image(self,image):
https://bitbucket.org/yt_analysis/yt/commits/82ea31a9edbc/
Changeset: 82ea31a9edbc
Branch: yt
User: hegan
Date: 2013-08-13 17:57:54
Summary: Merged yt_analysis/yt into yt
Affected #: 9 files
diff -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c -r 82ea31a9edbcd16cb57a57c67af5e78b14b12b38 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
diff -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c -r 82ea31a9edbcd16cb57a57c67af5e78b14b12b38 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -8,7 +8,7 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('data_objects', parent_package, top_path)
+ config.add_subpackage("tests")
config.make_config_py() # installs __config__.py
- config.add_subpackage("tests")
#config.make_svn_version_py()
return config
diff -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c -r 82ea31a9edbcd16cb57a57c67af5e78b14b12b38 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -70,6 +70,8 @@
obj = object.__new__(cls)
if obj._skip_cache is False:
_cached_pfs[apath] = obj
+ else:
+ obj = _cached_pfs[apath]
return obj
def __init__(self, filename, data_style=None, file_style=None):
diff -r 818df2c5ef23aa9b9b6a6e57a105218b4c6b1e7c -r 82ea31a9edbcd16cb57a57c67af5e78b14b12b38 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -270,7 +270,7 @@
with temp_cwd(path):
try:
load(pf_fn)
- except:
+ except YTOutputNotIdentified:
return False
return AnswerTestingTest.result_storage is not None
https://bitbucket.org/yt_analysis/yt/commits/9f33ef8332b7/
Changeset: 9f33ef8332b7
Branch: yt
User: brittonsmith
Date: 2013-08-15 00:25:32
Summary: Merged in hegan/yt (pull request #565)
Added absorption spectrum fitting
Affected #: 2 files
diff -r ed0606519b9e3ba18ccf5724517f2785860c68ea -r 9f33ef8332b7678e69cb9e41f02747a1a057597a yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,809 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+ import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts,
+ minError=1E-5, complexLim=.999,
+ fitLim=.99, minLength=3,
+ maxLength=1000, splitLim=.99,
+ output_file=None):
+
+ """
+ This function is designed to fit an absorption spectrum by breaking
+ the spectrum up into absorption complexes, and iteratively adding
+ and optimizing voigt profiles to each complex.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ 1d array of wavelengths
+ fluxData : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ orderFits : list
+ list of the names of the species in the order that they
+ should be fit. Names should correspond to the names of the species
+ given in speciesDicts. (ex: ['lya','OVI'])
+ speciesDicts : dictionary
+ Dictionary of dictionaries (I'm addicted to dictionaries, I
+ confess). Top level keys should be the names of all the species given
+ in orderFits. The entries should be dictionaries containing all
+ relevant parameters needed to create an absorption line of a given
+ species (f,Gamma,lambda0) as well as max and min values for parameters
+ to be fit
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+ output_file : string, optional
+ location to save the results of the fit.
+
+ Returns
+ -------
+ allSpeciesLines : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+ fits some part of the absorption spectrum). 'group#' is a similar list
+ but identifies which absorbing complex each line belongs to. Lines
+ with the same group# were fit at the same time. group#'s do not
+ correlate between species (ie: an lya line with group number 1 and
+ an OVI line with group number 1 were not fit together and do
+ not necessarily correspond to the same region)
+ yFit : (N) ndarray
+ array of flux corresponding to the combination of all fitted
+ absorption profiles. Same size as x.
+ """
+
+ #Empty dictionary for fitted lines
+ allSpeciesLines = {}
+
+ #Wavelength of beginning of array, wavelength resolution
+ x0,xRes=x[0],x[1]-x[0]
+
+ #Empty fit without any lines
+ yFit = na.ones(len(fluxData))
+
+ #Find all regions where lines/groups of lines are present
+ cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+ complexLim=complexLim, minLength=minLength,
+ maxLength=maxLength, splitLim=splitLim)
+
+ #Fit all species one at a time in given order from low to high wavelength
+ for species in orderFits:
+ speciesDict = speciesDicts[species]
+ speciesLines = {'N':na.array([]),
+ 'b':na.array([]),
+ 'z':na.array([]),
+ 'group#':na.array([])}
+
+ #Set up wavelengths for species
+ initWl = speciesDict['wavelength'][0]
+
+ for b_i,b in enumerate(cBounds):
+ xBounded=x[b[1]:b[2]]
+ yDatBounded=fluxData[b[1]:b[2]]
+ yFitBounded=yFit[b[1]:b[2]]
+
+ #Find init redshift
+ z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+ #Check if any flux at partner sites
+ if not _line_exists(speciesDict['wavelength'],
+ fluxData,z,x0,xRes,fitLim):
+ continue
+
+ #Fit Using complex tools
+ newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+ z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+ #Check existence of partner lines if applicable
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
+
+ #If flagged as a bad fit, species is lyman alpha,
+ # and it may be a saturated line, use special tools
+ if flag and species=='lya' and min(yDatBounded)<.1:
+ newLinesP=_large_flag_fit(xBounded,yDatBounded,
+ yFitBounded,z,speciesDict,
+ minSize,minError*(b[2]-b[1]))
+
+ #Adjust total current fit
+ yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+ #Add new group to all fitted lines
+ if na.size(newLinesP)>0:
+ speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+ speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+ speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+ groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+ speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+
+ allSpeciesLines[species]=speciesLines
+
+ if output_file:
+ _output_fit(allSpeciesLines, output_file)
+
+ return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict,
+ initP=None):
+ """ Fit an absorption complex by iteratively adding and optimizing
+ voigt profiles.
+
+ A complex is defined as a region where some number of lines may be present,
+ or a region of non zero of absorption. Lines are iteratively added
+ and optimized until the difference between the flux generated using
+ the optimized parameters has a least squares difference between the
+ desired flux profile less than the error bound.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ initP : (,3,) ndarray
+ initial guess to try for line parameters to fit the region. Used
+ by large_flag_fit. Default = None, and initial guess generated
+ automatically.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ Array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ flag : bool
+ boolean value indicating the success of the fit (True if unsuccessful)
+ """
+
+ #Setup initial line guesses
+ if initP==None: #Regular fit
+ initP = [0,0,0]
+ if min(yDat)<.5: #Large lines get larger initial guess
+ initP[0] = 10**16
+ elif min(yDat)>.9: #Small lines get smaller initial guess
+ initP[0] = 10**12.5
+ else:
+ initP[0] = speciesDict['init_N']
+ initP[1] = speciesDict['init_b']
+ initP[2]=initz
+ initP=na.array([initP])
+
+ linesP = initP
+
+ #For generating new z guesses
+ wl0 = speciesDict['wavelength'][0]
+
+ #Check if first line exists still
+ if min(yDat-yFit+1)>minSize:
+ return [],False
+
+ #Values to proceed through first run
+ errSq,prevErrSq=1,1000
+
+ while True:
+ #Initial parameter guess from joining parameters from all lines
+ # in lines into a single array
+ initP = linesP.flatten()
+
+ #Optimize line
+ fitP,success=optimize.leastsq(_voigt_error,initP,
+ args=(x,yDat,yFit,speciesDict),
+ epsfcn=1E-10,maxfev=1000)
+
+ #Set results of optimization
+ linesP = na.reshape(fitP,(-1,3))
+
+ #Generate difference between current best fit and data
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+
+ #Sum to get idea of goodness of fit
+ errSq=sum(dif**2)
+
+ #If good enough, break
+ if errSq < errBound:
+ break
+
+ #If last fit was worse, reject the last line and revert to last fit
+ if errSq > prevErrSq*10:
+ #If its still pretty damn bad, cut losses and try flag fit tools
+ if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+ break
+
+ #If too many lines
+ if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+ #If its fitable by flag tools and still bad, use flag tools
+ if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ break
+
+ #Store previous data in case reject next fit
+ prevErrSq = errSq
+ prevLinesP = linesP
+
+
+ #Set up initial condition for new line
+ newP = [0,0,0]
+ if min(dif)<.1:
+ newP[0]=10**12
+ elif min(dif)>.9:
+ newP[0]=10**16
+ else:
+ newP[0]=10**14
+ newP[1] = speciesDict['init_b']
+ newP[2]=(x[dif.argmax()]-wl0)/wl0
+ linesP=na.append(linesP,[newP],axis=0)
+
+
+ #Check the parameters of all lines to see if they fall in an
+ # acceptable range, as given in dict ref
+ remove=[]
+ for i,p in enumerate(linesP):
+ check=_check_params(na.array([p]),speciesDict)
+ if check:
+ remove.append(i)
+ linesP = na.delete(linesP,remove,axis=0)
+
+ return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+ """
+ Attempts to more robustly fit saturated lyman alpha regions that have
+ not converged to satisfactory fits using the standard tools.
+
+ Uses a preselected sample of a wide range of initial parameter guesses
+ designed to fit saturated lines (see get_test_lines).
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+
+ Returns
+ -------
+ bestP : (3,) ndarray
+ array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ """
+
+ #Set up some initial line guesses
+ lineTests = _get_test_lines(initz)
+
+ #Keep track of the lowest achieved error
+ bestError = 1000
+
+ #Iterate through test line guesses
+ for initLines in lineTests:
+ if initLines[1,0]==0:
+ initLines = na.delete(initLines,1,axis=0)
+
+ #Do fitting with initLines as first guess
+ linesP,flag=_complex_fit(x,yDat,yFit,initz,
+ minSize,errBound,speciesDict,initP=initLines)
+
+ #Find error of last fit
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+ errSq=sum(dif**2)
+
+ #If error lower, keep track of the lines used to make that fit
+ if errSq < bestError:
+ bestError = errSq
+ bestP = linesP
+
+ if bestError>10*errBound*len(x):
+ return []
+ else:
+ return bestP
+
+def _get_test_lines(initz):
+ """
+ Returns a 3d numpy array of lines to test as initial guesses for difficult
+ to fit lyman alpha absorbers that are saturated.
+
+ The array is 3d because
+ the first dimension gives separate initial guesses, the second dimension
+ has multiple lines for the same guess (trying a broad line plus a
+ saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+ Parameters
+ ----------
+ initz : float
+ redshift to give all the test lines
+
+ Returns
+ -------
+ testP : (,3,) ndarray
+ numpy array of the form
+ [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+ """
+
+ #Set up a bunch of empty lines
+ testP = na.zeros((10,2,3))
+
+ testP[0,0,:]=[1E18,20,initz]
+ testP[1,0,:]=[1E18,40,initz]
+ testP[2,0,:]=[1E16,5, initz]
+ testP[3,0,:]=[1E16,20,initz]
+ testP[4,0,:]=[1E16,80,initz]
+
+ testP[5,0,:]=[1E18,20,initz]
+ testP[6,0,:]=[1E18,40,initz]
+ testP[7,0,:]=[1E16,5, initz]
+ testP[8,0,:]=[1E16,20,initz]
+ testP[9,0,:]=[1E16,80,initz]
+
+ testP[5,1,:]=[1E13,100,initz]
+ testP[6,1,:]=[1E13,100,initz]
+ testP[7,1,:]=[1E13,100,initz]
+ testP[8,1,:]=[1E13,100,initz]
+ testP[9,1,:]=[1E13,100,initz]
+
+ return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+ """
+ Gets the indices of range of wavelength that the wavelength wl is in
+ with the size of some initial wavelength range.
+
+ Used for checking if species with multiple lines (as in the OVI doublet)
+ fit all lines appropriately.
+
+ Parameters
+ ----------
+ z : float
+ redshift
+ b : (3) ndarray/list
+ initial bounds in form [i0,i1,i2] where i0 is the index of the
+ minimum flux for the complex, i1 is index of the lower wavelength
+ edge of the complex, and i2 is the index of the higher wavelength
+ edge of the complex.
+ wl : float
+ unredshifted wavelength of the peak of the new region
+ x0 : float
+ wavelength of the index 0
+ xRes : float
+ difference in wavelength for two consecutive indices
+
+ Returns
+ -------
+ indices : (2) tuple
+ Tuple (i1,i2) where i1 is the index of the lower wavelength bound of
+ the new region and i2 is the index of the higher wavelength bound of
+ the new region
+ """
+
+ r=[-b[1]+100+b[0],b[2]+100-b[0]]
+ redWl = (z+1)*wl
+ iRedWl=int((redWl-x0)/xRes)
+ indices = (iRedWl-r[0],iRedWl+r[1])
+
+ return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound,
+ x0, xRes, speciesDict):
+ """
+ Given a set of parameters [N,b,z] that form multiple lines for a given
+ species (as in the OVI doublet), remove any set of parameters where
+ not all transition wavelengths have a line that matches the fit.
+
+ (ex: if a fit is determined based on the first line of the OVI doublet,
+ but the given parameters give a bad fit of the wavelength space of
+ the second line then that set of parameters is removed from the array
+ of line parameters.)
+
+ Parameters
+ ----------
+ linesP : (3,) ndarray
+ array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ wavelength array [nm]
+ y : (N) ndarray
+ normalized flux array of original data
+ b : (3) tuple/list/ndarray
+ indices that give the bounds of the original region so that another
+ region of similar size can be used to determine the goodness
+ of fit of the other wavelengths
+ errBound : float
+ size of the error that is appropriate for a given region,
+ adjusted to account for the size of the region.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ array similar to linesP that only contains lines with
+ appropriate fits of all transition wavelengths.
+ """
+
+ #List of lines to remove
+ removeLines=[]
+
+ #Iterate through all sets of line parameters
+ for i,p in enumerate(linesP):
+
+ #iterate over all transition wavelengths
+ for wl in speciesDict['wavelength']:
+
+ #Get the bounds of a similar sized region around the
+ # appropriate wavelength, and then get the appropriate
+ # region of wavelength and flux
+ lb = _get_bounds(p[2],b,wl,x0,xRes)
+ xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+ #Generate a fit and find the difference to data
+ yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+ dif =yb-yFitb
+
+ #Only counts as an error if line is too big ---------------<
+ dif = [k for k in dif if k>0]
+ err = sum(dif)
+
+ #If the fit is too bad then add the line to list of removed lines
+ if err > errBound*1E2:
+ removeLines.append(i)
+ break
+
+ #Remove all bad line fits
+ linesP = na.delete(linesP,removeLines,axis=0)
+
+ return linesP
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+ """For a group of lines finds if the there is some change in flux greater
+ than some minimum at the same redshift with different initial wavelengths
+
+ Parameters
+ ----------
+ wavelengths : (N) ndarray
+ array of initial wavelengths to check
+ y : (N) ndarray
+ flux array to check
+ x0 : float
+ wavelength of the first value in y
+ xRes : float
+ difference in wavelength between consecutive cells in flux array
+ fluxMin : float
+ maximum flux to count as a line existing.
+
+ Returns
+ -------
+
+ flag : boolean
+ value indicating whether all lines exist. True if all lines exist
+ """
+
+ #Iterate through initial wavelengths
+ for wl in wavelengths:
+ #Redshifted wavelength
+ redWl = (z+1)*wl
+
+ #Index of the redshifted wavelength
+ indexRedWl = (redWl-x0)/xRes
+
+ #Check if surpasses minimum absorption bound
+ if y[int(indexRedWl)]>fluxMin:
+ return False
+
+ return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+ minLength =3, maxLength=1000, splitLim=.99):
+ """Breaks up the wavelength space into groups
+ where there is some absorption.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelengths
+ yDat : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+
+ Returns
+ -------
+ cBounds : (3,)
+ list of bounds in the form [[i0,i1,i2],...] where i0 is the
+ index of the maximum flux for a complex, i1 is the index of the
+ beginning of the complex, and i2 is the index of the end of the
+ complex. Indexes refer to the indices of x and yDat.
+ """
+
+ #Initialize empty list of bounds
+ cBounds=[]
+
+ #Iterate through cells of flux
+ i=0
+ while (i<len(x)):
+
+ #Start tracking at a region that surpasses flux of edge
+ if yDat[i]<complexLim:
+
+ #Iterate through until reach next edge
+ j=0
+ while yDat[i+j]<complexLim: j=j+1
+
+ #Check if the complex is big enough
+ if j >minLength:
+
+ #Check if there is enough absorption for the complex to
+ # be included
+ cPeak = yDat[i:i+j].argmin()
+ if yDat[cPeak+i]<fitLim:
+ cBounds.append([cPeak+i,i,i+j])
+
+ i=i+j
+ i=i+1
+
+ i=0
+ #Iterate through the bounds
+ while i < len(cBounds):
+ b=cBounds[i]
+
+ #Check if the region needs to be divided
+ if b[2]-b[1]>maxLength:
+
+ #Find the minimum absorption in the middle two quartiles of
+ # the large complex
+ q=(b[2]-b[1])/4
+ cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+ #Only break it up if the minimum absorption is actually low enough
+ if yDat[cut]>splitLim:
+
+ #Get the new two peaks
+ b1Peak = yDat[b[1]:cut].argmin()+b[1]
+ b2Peak = yDat[cut:b[2]].argmin()+cut
+
+ #add the two regions separately
+ cBounds.insert(i+1,[b1Peak,b[1],cut])
+ cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+ #Remove the original region
+ cBounds.pop(i)
+ i=i+1
+ i=i+1
+
+ return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+ """
+ Calculates the normalized flux for a region of wavelength space
+ generated by a set of absorption lines.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ Array of wavelength
+ linesP: (3,) ndarray
+ Array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ Dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+
+ Returns
+ -------
+ flux : (N) ndarray
+ Array of normalized flux generated by the line parameters
+ given in linesP over the wavelength space given in x. Same size as x.
+ """
+ y=0
+ for p in linesP:
+ for i in range(speciesDict['numLines']):
+ f=speciesDict['f'][i]
+ g=speciesDict['Gamma'][i]
+ wl=speciesDict['wavelength'][i]
+ y = y+ _gen_tau(x,p,f,g,wl)
+ flux = na.exp(-y)
+ return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+ """This calculates a flux distribution for given parameters using the yt
+ voigt profile generator"""
+ N,b,z= p
+
+ #Calculating quantities
+ tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+ a=7.95774715459E-15*Gamma*lambda_unshifted/b
+ x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+
+ H = na.zeros(len(x))
+ H = voigt(a,x)
+
+ tau = tau_o*H
+
+ return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+ """
+ Gives the error of each point used to optimize the fit of a group
+ of absorption lines to a given flux profile.
+
+ If the parameters are not in the acceptable range as defined
+ in speciesDict, the first value of the error array will
+ contain a large value (999), to prevent the optimizer from running
+ into negative number problems.
+
+ Parameters
+ ----------
+ pTotal : (3,) ndarray
+ Array with form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ array of wavelengths [nm]
+ yDat : (N) ndarray
+ desired normalized flux from fits of lines in wavelength
+ space given by x
+ yFit : (N) ndarray
+ previous fit over the wavelength space given by x.
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+
+ Returns
+ -------
+ error : (N) ndarray
+ the difference between the fit generated by the parameters
+ given in pTotal multiplied by the previous fit and the desired
+ flux profile, w/ first index modified appropriately for bad
+ parameter choices
+ """
+
+ pTotal.shape = (-1,3)
+ yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+ error = yDat-yFit*yNewFit
+ error[0] = _check_params(pTotal,speciesDict)
+
+ return error
+
+def _check_params(p, speciesDict):
+ """
+ Check to see if any of the parameters in p fall outside the range
+ given in speciesDict.
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+
+ Returns
+ -------
+ check : int
+ 0 if all values are fine
+ 999 if any values fall outside acceptable range
+ """
+ check = 0
+ if any(p[:,0] > speciesDict['maxN']) or\
+ any(p[:,0] < speciesDict['minN']) or\
+ any(p[:,1] > speciesDict['maxb']) or\
+ any(p[:,1] < speciesDict['minb']) or\
+ any(p[:,2] > speciesDict['maxz']) or\
+ any(p[:,2] < speciesDict['minz']):
+ check = 999
+ return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+ """
+ This function is designed to output the parameters of the series
+ of lines used to fit an absorption spectrum.
+
+ The dataset contains entries in the form species/N, species/b
+ species/z, and species/complex. The ith entry in each of the datasets
+ is the fitted parameter for the ith line fitted to the spectrum for
+ the given species. The species names come from the fitted line
+ dictionary.
+
+ Parameters
+ ----------
+ lineDic : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line.
+ fileName : string, optional
+ Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+ """
+ f = h5py.File(file_name, 'w')
+ for ion, params in lineDic.iteritems():
+ f.create_dataset("{0}/N".format(ion),data=params['N'])
+ f.create_dataset("{0}/b".format(ion),data=params['b'])
+ f.create_dataset("{0}/z".format(ion),data=params['z'])
+ f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+ print 'Writing spectrum fit to {0}'.format(file_name)
+
diff -r ed0606519b9e3ba18ccf5724517f2785860c68ea -r 9f33ef8332b7678e69cb9e41f02747a1a057597a yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
from .absorption_spectrum import \
AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+ generate_total_fit
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list