[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Apr 16 12:11:28 PDT 2014


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/62b9537e1aa9/
Changeset:   62b9537e1aa9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 00:01:57
Summary:     Merging from mainline yt development.
Affected #:  26 files

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -27,6 +27,7 @@
      cm_per_km, erg_per_keV
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
+from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
      op_names, parallel_capable
@@ -424,7 +425,8 @@
     def project_photons(self, L, area_new=None, exp_time_new=None, 
                         redshift_new=None, dist_new=None,
                         absorb_model=None, psf_sigma=None,
-                        sky_center=None, responses=None):
+                        sky_center=None, responses=None,
+                        convolve_energies=False):
         r"""
         Projects photons onto an image plane given a line of sight.
 
@@ -452,8 +454,10 @@
         sky_center : array_like, optional
             Center RA, Dec of the events in degrees.
         responses : list of strings, optional
-            The names of the ARF and RMF files to convolve the photons with.
-
+            The names of the ARF and/or RMF files to convolve the photons with.
+        convolve_energies : boolean, optional
+            If this is set, the photon energies will be convolved with the RMF.
+            
         Examples
         --------
         >>> L = np.array([0.1,-0.2,0.3])
@@ -495,8 +499,10 @@
         parameters = {}
         
         if responses is not None:
+            responses = ensure_list(responses)
             parameters["ARF"] = responses[0]
-            parameters["RMF"] = responses[1]
+            if len(responses) == 2:
+                parameters["RMF"] = responses[1]
             area_new = parameters["ARF"]
             
         if (exp_time_new is None and area_new is None and
@@ -518,8 +524,13 @@
                 elo = f["SPECRESP"].data.field("ENERG_LO")
                 ehi = f["SPECRESP"].data.field("ENERG_HI")
                 eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
-                weights = self._normalize_arf(parameters["RMF"])
-                eff_area *= weights
+                if "RMF" in parameters:
+                    weights = self._normalize_arf(parameters["RMF"])
+                    eff_area *= weights
+                else:
+                    mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
+                                  "responses are normalized properly. If not, you may "+
+                                  "get inconsistent results.")
                 f.close()
                 Aratio = eff_area.max()/self.parameters["FiducialArea"]
             else:
@@ -618,7 +629,7 @@
             
         if comm.rank == 0: mylog.info("Total number of observed photons: %d" % (num_events))
 
-        if responses is not None:
+        if "RMF" in parameters and convolve_energies:
             events, info = self._convolve_with_rmf(parameters["RMF"], events)
             for k, v in info.items(): parameters[k] = v
                 

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -16,6 +16,12 @@
 from yt.funcs import *
 from yt import units
 import h5py
+
+try:
+    import xspec
+except ImportError:
+    pass
+
 try:
     import xspec
     from scipy.integrate import cumtrapz

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -182,10 +182,14 @@
         LE   = self.domain_left_edge
         RE   = self.domain_right_edge
 
+        # Radmc3D wants the cell wall positions in cgs. Convert here:
+        LE_cgs = LE * self.pf.units['cm']
+        RE_cgs = RE * self.pf.units['cm']
+
         # calculate cell wall positions
-        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
-        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
-        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+        xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE_cgs[1], RE_cgs[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE_cgs[2], RE_cgs[2], dims[2]+1)]
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,11 +1,9 @@
 #!/usr/bin/env python
-import setuptools
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")
     config.add_subpackage("cosmological_observation")
@@ -14,10 +12,15 @@
     config.add_subpackage("halo_merger_tree")
     config.add_subpackage("halo_profiler")
     config.add_subpackage("level_sets")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
     config.add_subpackage("radial_column_density")
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
     config.add_subpackage("radmc3d_export")
-    config.add_subpackage("sunyaev_zeldovich")    
+    config.add_subpackage("sunrise_export")
+    config.add_subpackage("sunyaev_zeldovich")
+    config.add_subpackage("particle_trajectories")
+    config.add_subpackage("photon_simulator")
     return config

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -44,7 +44,7 @@
     star_creation_time : Ordered array or list of floats
         The creation time for the stars in code units.
     volume : Float
-        The volume of the region for the specified list of stars.
+        The comoving volume of the region for the specified list of stars.
     bins : Integer
         The number of time bins used for binning the stars. Default = 300.
     
@@ -72,7 +72,7 @@
                 If data_source is not provided, all of these paramters need to be set:
                 star_mass (array, Msun),
                 star_creation_time (array, code units),
-                volume (float, Mpc**3).
+                volume (float, cMpc**3).
                 """)
                 return None
             self.mode = 'provided'
@@ -130,15 +130,15 @@
         """
         if self.mode == 'data_source':
             try:
-                vol = self._data_source.volume('mpc')
+                vol = self._data_source.volume('mpccm')
             except AttributeError:
                 # If we're here, this is probably a HOPHalo object, and we
                 # can get the volume this way.
                 ds = self._data_source.get_sphere()
-                vol = ds.volume('mpc')
+                vol = ds.volume('mpccm')
         elif self.mode == 'provided':
-            vol = self.volume
-        tc = self._pf["Time"] #time to seconds?
+            vol = self.volume('mpccm')
+        tc = self._pf["Time"]
         self.time = []
         self.lookback_time = []
         self.redshift = []
@@ -153,6 +153,7 @@
             self.redshift.append(self.cosm.z_from_t(time * tc))
             self.Msol_yr.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR))
+            # changed vol from mpc to mpccm used in literature
             self.Msol_yr_vol.append(self.mass_bins[i] / \
                 (self.time_bins_dt[i] * tc / YEAR) / vol)
             self.Msol.append(self.mass_bins[i])

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -343,7 +343,7 @@
         self.domain_right_edge = np.array(
             [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
         if self.dimensionality < 3:
-            for d in (dimensionality)+range(3-dimensionality):
+            for d in [dimensionality]+range(3-dimensionality):
                 if self.domain_left_edge[d] == self.domain_right_edge[d]:
                     mylog.warning('Identical domain left edge and right edges '
                                   'along dummy dimension (%i), attempting to read anyway' % d)

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 
+from yt.config import ytcfg
 from yt.funcs import *
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level, \
@@ -56,7 +57,19 @@
 
     def find_max_cell_location(self, field, finest_levels = 3):
         if finest_levels is not False:
-            gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+            # This prevents bad values for the case that the number of grids to
+            # search is smaller than the number of processors being applied to
+            # the task, by 
+            nproc = ytcfg.getint("yt", "__topcomm_parallel_size")
+            while 1:
+                gi = (self.grid_levels >= self.max_level - finest_levels).ravel()
+                if gi.sum() >= nproc:
+                    break
+                elif finest_levels >= self.max_level:
+                    raise YTTooParallel
+                else:
+                    finest_levels += 1
+                
             source = self.grid_collection([0.0]*3, self.grids[gi])
         else:
             source = self.all_data()

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1037,7 +1037,7 @@
         mpd.upload()
 
 class YTInstInfoCmd(YTCommand):
-    name = "instinfo"
+    name = ["instinfo", "version"]
     args = (
             dict(short="-u", long="--update-source", action="store_true",
                  default = False,
@@ -1055,6 +1055,7 @@
 
     def __call__(self, opts):
         import pkg_resources
+        import yt
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
         print
@@ -1071,10 +1072,11 @@
         vstring = get_yt_version()
         if vstring is not None:
             print
-            print "The current version of the code is:"
+            print "The current version and changeset for the code is:"
             print
             print "---"
-            print vstring.strip()
+            print "Version = %s" % yt.__version__
+            print "Changeset = %s" % vstring.strip()
             print "---"
             print
             if "site-packages" not in path:
@@ -1605,6 +1607,7 @@
 
     def __call__(self, opts):
         import pkg_resources
+        import yt
         yt_provider = pkg_resources.get_provider("yt")
         path = os.path.dirname(yt_provider.module_path)
         print
@@ -1622,10 +1625,11 @@
         if "site-packages" not in path:
             vstring = get_hg_version(path)
             print
-            print "The current version of the code is:"
+            print "The current version and changeset for the code is:"
             print
             print "---"
-            print vstring.strip()
+            print "Version = %s" % yt.__version__
+            print "Changeset = %s" % vstring.strip()
             print "---"
             print
             print "This installation CAN be automatically updated."

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -352,6 +352,10 @@
 class YTEmptyProfileData(Exception):
     pass
 
+class YTTooParallel(YTException):
+    def __str__(self):
+        return "You've used too many processors for this dataset."
+
 class YTDuplicateFieldInProfile(Exception):
     def __init__(self, field, new_spec, old_spec):
         self.field = field

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -33,6 +33,7 @@
 
 cdef extern from "math.h": 
     double expf(double x) nogil 
+    int isnormal(double x) nogil
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -58,6 +59,7 @@
     cdef np.float64_t bv, dy, dd, tf, rv
     cdef int bin_id
     if dvs[fit.field_id] >= fit.bounds[1] or dvs[fit.field_id] <= fit.bounds[0]: return 0.0
+    if not isnormal(dvs[fit.field_id]): return 0.0
     bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
     bin_id = iclip(bin_id, 0, fit.nbins-2)
     dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -15,6 +15,7 @@
 import pyx
 import numpy as np
 from matplotlib import cm
+import matplotlib.pyplot as plt
 from _mpl_imports import FigureCanvasAgg
 
 from yt.utilities.logger import ytLogger as mylog
@@ -27,6 +28,7 @@
 from .profile_plotter import PhasePlot
 from .plot_modifications import get_smallest_appropriate_unit
 
+
 class DualEPS(object):
     def __init__(self, figsize=(12,12)):
         r"""Initializes the DualEPS class to which we can progressively add layers
@@ -335,6 +337,26 @@
                     _ylabel = plot[k].axes.get_ylabel()
             if tickcolor == None:
                 _tickcolor = None
+        elif isinstance(plot, np.ndarray):
+            ax = plt.gca()
+            _xrange = ax.get_xlim()
+            _yrange = ax.get_ylim()
+            _xlog=False
+            _ylog=False
+            if bare_axes:
+                _xlabel = ""
+                _ylabel = ""
+            else:
+                if xlabel != None:
+                    _xlabel = xlabel
+                else:
+                    _xlabel = ax.get_xlabel()
+                if ylabel != None:
+                    _ylabel = ylabel
+                else:
+                    _ylabel = ax.get_ylabel()
+            if tickcolor == None:
+                _tickcolor = None
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -447,6 +469,13 @@
             # hack to account for non-square display ratios (not sure why)
             if isinstance(plot, PlotWindow):
                 shift = 12.0 / 340
+        elif isinstance(plot, np.ndarray):
+            fig = plt.figure()
+            iplot = plt.figimage(plot)
+            _p1 =  iplot.figure
+            _p1.set_size_inches(self.figsize[0], self.figsize[1])
+            ax = plt.gca();
+            _p1.add_axes(ax)
         else:
             raise RuntimeError("Unknown plot type")
 
@@ -527,6 +556,12 @@
 
         # Scale the colorbar
         shift = (0.5*(1.0-shrink[0])*size[0], 0.5*(1.0-shrink[1])*size[1])
+        # To facilitate strething rather than shrinking
+        # If stretched in both directions (makes no sense?) then y dominates. 
+        if(shrink[0] > 1.0):
+            shift = (0.05*self.figsize[0], 0.5*(1.0-shrink[1])*size[1])
+        if(shrink[1] > 1.0):
+            shift = (0.5*(1.0-shrink[0])*size[0], 0.05*self.figsize[1])
         size = (size[0] * shrink[0], size[1] * shrink[1])
         origin = (origin[0] + shift[0], origin[1] + shift[1])
 
@@ -681,6 +716,59 @@
 
 #=============================================================================
 
+    def arrow(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
+              color=pyx.color.cmyk.white,
+              linewidth=pyx.style.linewidth.normal):
+        r"""Draws an arrow in the current figure
+
+        Parameters
+        ----------
+        size : float
+            Length of arrow (base to tip) in units of the figure size.
+        label : string
+            Annotation label of the arrow.
+        loc : tuple of floats
+            Location of the left hand side of the arrow in units of
+            the figure size.
+        labelloc : string
+            Location of the label with respect to the line.  Can be
+            "top" or "bottom"
+        color : `pyx.color.*.*`
+            Color of the arrow.  Example: pyx.color.cymk.white
+        linewidth : `pyx.style.linewidth.*`
+            Width of the arrow.  Example: pyx.style.linewidth.normal
+
+        Examples
+        --------
+        >>> d = DualEPS()
+        >>> d.axis_box(xrange=(0,100), yrange=(1e-3,1), ylog=True)
+        >>> d.insert_image("arrow_image.jpg")
+        >>> d.arrow(size=0.2, label="Black Hole!", loc=(0.05, 0.1))
+        >>> d.save_fig()
+        """
+        line = pyx.path.line(self.figsize[0]*loc[0],
+                             self.figsize[1]*loc[1],
+                             self.figsize[0]*(loc[0]+size),
+                             self.figsize[1]*loc[1])
+        self.canvas.stroke(line, [linewidth, color, pyx.deco.earrow()])
+       
+
+        if labelloc == "bottom":
+            yoff = -0.1*size
+            valign = pyx.text.valign.top
+        else:
+            yoff = +0.1*size
+            valign = pyx.text.valign.bottom
+        if label != "":
+            self.canvas.text(self.figsize[0]*(loc[0]+0.5*size),
+                             self.figsize[1]*(loc[1]+yoff), label,
+                             [color, valign, pyx.text.halign.center])
+
+        
+
+
+#=============================================================================
+
     def scale_line(self, size=0.2, label="", loc=(0.05,0.08), labelloc="top",
                    color=pyx.color.cmyk.white,
                    linewidth=pyx.style.linewidth.normal):
@@ -711,6 +799,7 @@
         >>> d.scale_line(size=0.2, label="1 kpc", loc=(0.05, 0.1))
         >>> d.save_fig()
         """
+        
         line = pyx.path.line(self.figsize[0]*loc[0],
                              self.figsize[1]*loc[1],
                              self.figsize[0]*(loc[0]+size),
@@ -781,7 +870,7 @@
         
 #=============================================================================
 
-    def save_fig(self, filename="test", format="eps"):
+    def save_fig(self, filename="test", format="eps", resolution=250):
         r"""Saves current figure to a file.
 
         Parameters
@@ -801,6 +890,10 @@
             self.canvas.writeEPSfile(filename)
         elif format == "pdf":
             self.canvas.writePDFfile(filename)
+        elif format == "png":
+             self.canvas.writeGSfile(filename+".png", "png16m", resolution=resolution)
+        elif format == "jpg":
+             self.canvas.writeGSfile(filename+".jpeg", "jpeg", resolution=resolution)
         else:
             raise RuntimeError("format %s unknown." % (format))
             
@@ -924,7 +1017,8 @@
     d = DualEPS(figsize=figsize)
     count = 0
     for j in range(nrow):
-        ypos = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos = i*(figsize[0] + margins[0])
             index = j*ncol + i
@@ -990,7 +1084,8 @@
             100.0 * d.canvas.bbox().bottom().t,
             100.0 * d.canvas.bbox().top().t - d.figsize[1])
     for j in range(nrow):
-        ypos0 = j*(figsize[1] + margins[1])
+        invj = nrow - j - 1
+        ypos0 = invj*(figsize[1] + margins[1])
         for i in range(ncol):
             xpos0 = i*(figsize[0] + margins[0])
             index = j*ncol + i

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -7,6 +7,7 @@
 from functools import wraps
 from matplotlib.font_manager import FontProperties
 
+from ._mpl_imports import FigureCanvasAgg
 from .tick_locators import LogLocator, LinearLocator
 from .color_maps import yt_colormaps, is_colormap
 from .plot_modifications import \

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1302,8 +1302,6 @@
         self.set_axes_unit(axes_unit)
 
     def _recreate_frb(self):
-        if self._frb is not None:
-            raise NotImplementedError
         super(OffAxisProjectionPlot, self)._recreate_frb()
 
 _metadata_template = """

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -718,6 +718,7 @@
                 label.set_fontproperties(fp)
                 if self._font_color is not None:
                     label.set_color(self._font_color)
+        self._plot_valid = True
 
         self._plot_valid = True
 

diff -r 5acca4b24803da91accfcb8937aed96432fe7edd -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -620,7 +620,7 @@
         --------
 
         >>> tf = ColorTransferFunction( (-10.0, -5.0) )
-        >>> tf.sample_colormap(-7.0, 0.01, 'algae')
+        >>> tf.sample_colormap(-7.0, 0.01, colormap='algae')
         """
         if col_bounds is None:
             rel = (v - self.x_bounds[0])/(self.x_bounds[1] - self.x_bounds[0])


https://bitbucket.org/yt_analysis/yt/commits/2ec0978bafb8/
Changeset:   2ec0978bafb8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 21:10:57
Summary:     Removing duplicate line.
Affected #:  1 file

diff -r 62b9537e1aa9085d3ca3673f1bdd7dd7b6c61cd9 -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -720,8 +720,6 @@
                     label.set_color(self._font_color)
         self._plot_valid = True
 
-        self._plot_valid = True
-
     def save(self, name=None, mpl_kwargs=None):
         r"""
         Saves a 2d profile plot.


https://bitbucket.org/yt_analysis/yt/commits/3d28859d2cec/
Changeset:   3d28859d2cec
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 21:11:11
Summary:     Merging in pull request 823
Affected #:  5 files

diff -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 -r 3d28859d2cec38a349e7335fc04e62978ffcc1a8 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -129,7 +129,7 @@
         """
         if self.CoM is not None:
             return self.CoM
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         c = {}
         # We shift into a box where the origin is the left edge
         c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
@@ -199,7 +199,7 @@
         """
         if self.group_total_mass is not None:
             return self.group_total_mass
-        return self["ParticleMassMsun"].sum()
+        return self["particle_mass"].in_units('Msun').sum()
 
     def bulk_velocity(self):
         r"""Returns the mass-weighted average velocity in cm/s.
@@ -213,7 +213,7 @@
         """
         if self.bulk_vel is not None:
             return self.bulk_vel
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
@@ -234,7 +234,7 @@
         if self.rms_vel is not None:
             return self.rms_vel
         bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         sm = pm.sum()
         vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
@@ -331,7 +331,7 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["ParticleMassMsun"]:
+                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
         if 'creation_time' in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
@@ -464,7 +464,7 @@
         if self["particle_position_x"].size > 1:
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                np.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["particle_mass"][inds == index]).in_units('Msun')
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -750,7 +750,7 @@
             inds = np.digitize(dist, self.radial_bins) - 1
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    np.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["particle_mass"][inds == index]).in_units('Msun')
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -1356,7 +1356,7 @@
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun"]
+              ["particle_mass"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
         self.threshold = threshold
@@ -1368,7 +1368,7 @@
             RunHOP(self.particle_fields["particle_position_x"] / self.period[0],
                 self.particle_fields["particle_position_y"] / self.period[1],
                 self.particle_fields["particle_position_z"] / self.period[2],
-                self.particle_fields["ParticleMassMsun"],
+                self.particle_fields["particle_mass"].in_units('Msun'),
                 self.threshold)
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
@@ -1555,7 +1555,7 @@
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun", "particle_index"]
+              ["particle_mass", "particle_index"]
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
@@ -1589,8 +1589,8 @@
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
-            self.particle_fields['ParticleMassMsun'])
+        np.divide(self.particle_fields['particle_mass'].in_units('Msun'), self.total_mass,
+            self.particle_fields['particle_mass'])
         np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
         np.divide(self.particle_fields["particle_position_y"],
@@ -2190,7 +2190,7 @@
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+            total_mass = self.comm.mpi_allreduce((self._data_source["particle_mass"].in_units('Msun').astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
             self.padding = (np.zeros(3, dtype='float64'),
@@ -2386,9 +2386,9 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2409,10 +2409,10 @@
             sub_mass = total_mass
         elif dm_only:
             select = self._get_dm_indices()
-            sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
+            sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
+                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 -r 3d28859d2cec38a349e7335fc04e62978ffcc1a8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -290,7 +290,7 @@
             self.create_field_info()
             np.seterr(**oldsettings)
         return self._instantiated_index
-    
+
     _index_proxy = None
     @property
     def h(self):
@@ -527,7 +527,7 @@
         source = self.all_data()
         max_val, maxi, mx, my, mz = \
             source.quantities["MaxLocation"](field)
-        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
+        mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
         return max_val, np.array([mx, my, mz], dtype="float64")
 
@@ -628,7 +628,8 @@
             DW = np.zeros(3)
         else:
             DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
+        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                               DW.units.dimensions)
 
     _arr = None
     @property

diff -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 -r 3d28859d2cec38a349e7335fc04e62978ffcc1a8 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -43,7 +43,8 @@
 
 from .fields import \
     BoxlibFieldInfo, \
-    MaestroFieldInfo
+    MaestroFieldInfo, \
+    CastroFieldInfo
 
 from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
@@ -603,7 +604,8 @@
         tmp.extend((1,1))
         self.domain_dimensions = np.array(tmp)
         tmp = list(self.periodicity)
-        tmp[1:] = False
+        tmp[1] = False
+        tmp[2] = False
         self.periodicity = ensure_tuple(tmp)
         
     def _setup2d(self):
@@ -728,6 +730,8 @@
 
 class CastroDataset(BoxlibDataset):
 
+    _field_info_class = CastroFieldInfo
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args

diff -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 -r 3d28859d2cec38a349e7335fc04e62978ffcc1a8 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -20,10 +20,6 @@
     mh, boltzmann_constant_cgs, amu_cgs
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from yt.fields.species_fields import \
-    add_species_field_by_fraction
-from yt.utilities.chemical_formulas import \
-    ChemicalFormula
 
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
@@ -114,6 +110,64 @@
                            function = _get_vel(ax),
                            units = "cm/s")
 
+class CastroFieldInfo(FieldInfoContainer):
+
+    known_other_fields = (
+        ("density", ("g/cm**3", ["density"], r"\rho")),
+        ("xmom", ("g*cm/s", ["momentum_x"], r"\rho u")),
+        ("ymom", ("g*cm/s", ["momentum_y"], r"\rho v")),
+        ("zmom", ("g*cm/s", ["momentum_z"], r"\rho w")),
+        # velocity components are not always present
+        ("x_velocity", ("cm/s", ["velocity_x"], r"u")),
+        ("y_velocity", ("cm/s", ["velocity_y"], r"v")),
+        ("z_velocity", ("cm/s", ["velocity_z"], r"w")),
+        ("rho_E", ("erg/cm**3", ["energy_density"], r"\rho E")),
+        # internal energy density (not just thermal)
+        ("rho_e", ("erg/cm**3", [], r"\rho e")),
+        ("Temp", ("K", ["temperature"], r"T")),
+        ("grav_x", ("cm/s**2", [], r"g\cdot e_x")),
+        ("grav_y", ("cm/s**2", [], r"g\cdot e_y")),
+        ("grav_z", ("cm/s**2", [], r"g\cdot e_z")),
+        ("pressure", ("dyne/cm**2", [], r"p")),
+        ("kineng", ("erg/cm**3", [], r"\frac{1}{2}\rho|U|**2")),
+        ("soundspeed", ("cm/s", ["sound_speed"], None)),
+        ("Machnumber", ("", ["mach_number"], None)),
+        ("entropy", ("erg/(g*K)", ["entropy"], r"s")),
+        ("magvort", ("1/s", ["vorticity_magnitude"], r"|\nabla \times U|")),
+        ("divu", ("1/s", [], r"\nabla \cdot U")),
+        ("eint_E", ("erg/g", [], r"e(E,U)")),
+        ("eint_e", ("erg/g", [], r"e")),
+        ("magvel", ("cm/s", ["velocity_magnitude"], r"|U|")),
+        ("radvel", ("cm/s", [], r"U\cdot e_r")),
+        ("magmom", ("g*cm/s", ["momentum_magnitude"], r"|\rho U|")),
+        ("maggrav", ("cm/s**2", [], r"|g|")),
+        ("phiGrav", ("erg/g", [], r"|\Phi|")),
+    )
+
+    def setup_fluid_fields(self):
+        # add X's
+        for _, field in self.pf.field_list:
+            if field.startswith("X("):
+                # We have a fraction
+                nice_name = field[2:-1]
+                self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
+                           units = "")
+                def _create_density_func(field_name):
+                    def _func(field, data):
+                        return data[field_name] * data["gas", "density"]
+                    return _func
+                func = _create_density_func(("gas", "%s_fraction" % nice_name))
+                self.add_field(name = ("gas", "%s_density" % nice_name),
+                               function = func,
+                               units = "g/cm**3")
+                # We know this will either have one letter, or two.
+                if field[3] in string.letters:
+                    element, weight = field[2:4], field[4:-1]
+                else:
+                    element, weight = field[2:3], field[3:-1]
+                weight = int(weight)
+                # Here we can, later, add number density.
+
 class MaestroFieldInfo(FieldInfoContainer):
 
     known_other_fields = (
@@ -166,7 +220,7 @@
     )
 
     def setup_fluid_fields(self):
-        # Add omegadots, units of 1/s
+        # pick the correct temperature field
         if self.pf.parameters["use_tfromp"]:
             self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
                        units = "K")
@@ -174,6 +228,7 @@
             self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
                        units = "K")
 
+        # Add X's and omegadots, units of 1/s
         for _, field in self.pf.field_list:
             if field.startswith("X("):
                 # We have a fraction

diff -r 2ec0978bafb855f2bc19692b6985ba4a6dd36b10 -r 3d28859d2cec38a349e7335fc04e62978ffcc1a8 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -196,7 +196,7 @@
     _preload_implemented = True
 
     def __init__(self, pf, dataset_type):
-        
+
         self.dataset_type = dataset_type
         if pf.file_style != None:
             self._bn = pf.file_style
@@ -868,7 +868,8 @@
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_velocity", self.velocity_unit)
         DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max()), DW.units.dimensions)
+        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                               DW.units.dimensions)
 
     def cosmology_get_units(self):
         """
@@ -984,8 +985,8 @@
     size = os.stat(f.name).st_size
     fullblocks, lastblock = divmod(size, blocksize)
 
-    # The first(end of file) block will be short, since this leaves 
-    # the rest aligned on a blocksize boundary.  This may be more 
+    # The first(end of file) block will be short, since this leaves
+    # the rest aligned on a blocksize boundary.  This may be more
     # efficient than having the last (first in file) block be short
     f.seek(-lastblock,2)
     yield f.read(lastblock)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list