[yt-svn] commit/yt: 12 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Aug 13 08:11:33 PDT 2012


12 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/843cf599c0a0/
changeset:   843cf599c0a0
branch:      yt
user:        jwise77
date:        2012-07-30 08:56:06
summary:     Adding Renyue's fit for Chandra emissivity.
affected #:  1 file

diff -r 31d1c069f5a6e880f68415c2bd738ee9eed86787 -r 843cf599c0a0caf3ede3f7c7d36491bcfc203523 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -384,6 +384,36 @@
           function=_CellVolume,
           convert_function=_ConvertCellVolumeCGS)
 
+def _ChandraEmissivity(field, data):
+    logT0 = na.log10(data["Temperature"]) - 7
+    return ((data["NumberDensity"].astype('float64')**2.0) \
+            *(10**(-0.0103*logT0**8 \
+                   +0.0417*logT0**7 \
+                   -0.0636*logT0**6 \
+                   +0.1149*logT0**5 \
+                   -0.3151*logT0**4 \
+                   +0.6655*logT0**3 \
+                   -1.1256*logT0**2 \
+                   +1.0026*logT0**1 \
+                   -0.6984*logT0) \
+              +data["Metallicity"]*10**(0.0305*logT0**11 \
+                                        -0.0045*logT0**10 \
+                                        -0.3620*logT0**9 \
+                                        +0.0513*logT0**8 \
+                                        +1.6669*logT0**7 \
+                                        -0.3854*logT0**6 \
+                                        -3.3604*logT0**5 \
+                                        +0.4728*logT0**4 \
+                                        +4.5774*logT0**3 \
+                                        -2.3661*logT0**2 \
+                                        -1.6667*logT0**1 \
+                                        -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+    return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+          convert_function=_convertChandraEmissivity,
+          projection_conversion="1")
+
 def _XRayEmissivity(field, data):
     return ((data["Density"].astype('float64')**2.0) \
             *data["Temperature"]**0.5)



https://bitbucket.org/yt_analysis/yt/changeset/b942a8fbf46e/
changeset:   b942a8fbf46e
branch:      yt
user:        jwise77
date:        2012-07-30 08:56:52
summary:     Adding total gas mass and separate total particle and gas mass derived quantities.
affected #:  1 file

diff -r 843cf599c0a0caf3ede3f7c7d36491bcfc203523 -r b942a8fbf46eaa8b8e4e844f2c38cddc349ca6ca yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,7 +156,32 @@
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret=1)
+             combine_function=_combTotalMass, n_ret = 1)
+
+def _TotalGasMass(data):
+    """
+    This function takes no arguments and returns the sum of cell
+    masses in the object.
+    """
+    baryon_mass = data["CellMassMsun"].sum()
+    return [baryon_mass]
+def _combTotalGasMass(data, baryon_mass):
+    return baryon_mass.sum()
+add_quantity("TotalGasMass", function=_TotalGasMass,
+             combine_function=_combTotalGasMass, n_ret = 1)
+                
+def _MatterMass(data):
+    """
+    This function takes no arguments and returns the array sum of cell masses
+    and particle masses.
+    """
+    cellvol = data["CellVolume"]
+    matter_rho = data["Matter_Density"]
+    return cellvol, matter_rho 
+def _combMatterMass(data, cellvol, matter_rho):
+    return cellvol*matter_rho
+add_quantity("MatterMass", function=_MatterMass,
+	     combine_function=_combMatterMass, n_ret=2)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """



https://bitbucket.org/yt_analysis/yt/changeset/80c99f3841a3/
changeset:   80c99f3841a3
branch:      yt
user:        jwise77
date:        2012-07-30 08:57:19
summary:     Adding option to specify colorbar label for HEALPix camera image.
affected #:  1 file

diff -r b942a8fbf46eaa8b8e4e844f2c38cddc349ca6ca -r 80c99f3841a3c23dba4357e9fd494540fda185fa yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -793,7 +793,7 @@
 
         return image
 
-    def save_image(self, fn, clim, image):
+    def save_image(self, fn, clim, image, label=None):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
@@ -807,7 +807,11 @@
             ax = fig.add_subplot(1,1,1,projection='hammer')
             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
-            cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+            if label == None:
+                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+            else:
+                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
             if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())



https://bitbucket.org/yt_analysis/yt/changeset/c4c575a49599/
changeset:   c4c575a49599
branch:      yt
user:        jwise77
date:        2012-08-06 14:25:40
summary:     If no stars are left for star_analysis, return.  Fixes to some B-field
labels.
affected #:  2 files

diff -r 0fae6c70a51b683546489137ac61184b3c11b39a -r c4c575a4959964a2d113247bec4de08ad383724a yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -393,6 +393,7 @@
         dt = na.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
+        if len(sub) == 0: return
         self.star_metal = self.star_metal[sub]
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]


diff -r 0fae6c70a51b683546489137ac61184b3c11b39a -r c4c575a4959964a2d113247bec4de08ad383724a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,16 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -247,7 +247,7 @@
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gau\ss}"
+    f._units=r"\rm{Gauss}"
     f.take_log=False
 
 def _convertRadiation(data):
@@ -447,14 +447,14 @@
     return data['star_creation_time']
 def _ConvertEnzoTimeYears(data):
     return data.pf.time_units['years']
-add_field('StarCreationTimeYears', units=r"\mathrm{yr}",
+add_field('StarCreationTimeYears', units=r"\rm{yr}",
           function=_StarCreationTime,
           convert_function=_ConvertEnzoTimeYears,
           projection_conversion="1")
 
 def _StarDynamicalTime(field, data):
     return data['star_dynamical_time']
-add_field('StarDynamicalTimeYears', units=r"\mathrm{yr}",
+add_field('StarDynamicalTimeYears', units=r"\rm{yr}",
           function=_StarDynamicalTime,
           convert_function=_ConvertEnzoTimeYears,
           projection_conversion="1")
@@ -466,7 +466,7 @@
         data.pf.current_time - \
         data['StarCreationTimeYears'][with_stars]
     return star_age
-add_field('StarAgeYears', units=r"\mathrm{yr}",
+add_field('StarAgeYears', units=r"\rm{yr}",
           function=_StarAge,
           projection_conversion="1")
 
@@ -476,20 +476,12 @@
 add_field('IsStarParticle', function=_IsStarParticle,
           particle_type = True)
 
-def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
-    f = KnownEnzoFields[field]
-    f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gauss}"
-    f.take_log=False
-
 def _Bmag(field, data):
     """ magnitude of bvec
     """
     return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
 
 # Particle functions
 
@@ -645,17 +637,3 @@
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)
 
-def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
-    f = KnownEnzoFields[field]
-    f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gauss}"
-    f.take_log=False
-
-def _Bmag(field, data):
-    """ magnitude of bvec
-    """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")



https://bitbucket.org/yt_analysis/yt/changeset/c949e94a5ea7/
changeset:   c949e94a5ea7
branch:      yt
user:        jwise77
date:        2012-08-06 14:35:14
summary:     Merging.
affected #:  3 files

diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,7 +156,32 @@
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret=1)
+             combine_function=_combTotalMass, n_ret = 1)
+
+def _TotalGasMass(data):
+    """
+    This function takes no arguments and returns the sum of cell
+    masses in the object.
+    """
+    baryon_mass = data["CellMassMsun"].sum()
+    return [baryon_mass]
+def _combTotalGasMass(data, baryon_mass):
+    return baryon_mass.sum()
+add_quantity("TotalGasMass", function=_TotalGasMass,
+             combine_function=_combTotalGasMass, n_ret = 1)
+                
+def _MatterMass(data):
+    """
+    This function takes no arguments and returns the array sum of cell masses
+    and particle masses.
+    """
+    cellvol = data["CellVolume"]
+    matter_rho = data["Matter_Density"]
+    return cellvol, matter_rho 
+def _combMatterMass(data, cellvol, matter_rho):
+    return cellvol*matter_rho
+add_quantity("MatterMass", function=_MatterMass,
+	     combine_function=_combMatterMass, n_ret=2)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """


diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -388,6 +388,36 @@
           function=_CellVolume,
           convert_function=_ConvertCellVolumeCGS)
 
+def _ChandraEmissivity(field, data):
+    logT0 = na.log10(data["Temperature"]) - 7
+    return ((data["NumberDensity"].astype('float64')**2.0) \
+            *(10**(-0.0103*logT0**8 \
+                   +0.0417*logT0**7 \
+                   -0.0636*logT0**6 \
+                   +0.1149*logT0**5 \
+                   -0.3151*logT0**4 \
+                   +0.6655*logT0**3 \
+                   -1.1256*logT0**2 \
+                   +1.0026*logT0**1 \
+                   -0.6984*logT0) \
+              +data["Metallicity"]*10**(0.0305*logT0**11 \
+                                        -0.0045*logT0**10 \
+                                        -0.3620*logT0**9 \
+                                        +0.0513*logT0**8 \
+                                        +1.6669*logT0**7 \
+                                        -0.3854*logT0**6 \
+                                        -3.3604*logT0**5 \
+                                        +0.4728*logT0**4 \
+                                        +4.5774*logT0**3 \
+                                        -2.3661*logT0**2 \
+                                        -1.6667*logT0**1 \
+                                        -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+    return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+          convert_function=_convertChandraEmissivity,
+          projection_conversion="1")
+
 def _XRayEmissivity(field, data):
     return ((data["Density"].astype('float64')**2.0) \
             *data["Temperature"]**0.5)


diff -r c4c575a4959964a2d113247bec4de08ad383724a -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -818,7 +818,7 @@
         self.save_image(fn, clim, image)
         return image
 
-    def save_image(self, fn, clim, image):
+    def save_image(self, fn, clim, image, label=None):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
@@ -832,7 +832,11 @@
             ax = fig.add_subplot(1,1,1,projection='hammer')
             implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
-            cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+            if label == None:
+                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+            else:
+                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
             if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())



https://bitbucket.org/yt_analysis/yt/changeset/4ae1fd5367e4/
changeset:   4ae1fd5367e4
branch:      yt
user:        jwise77
date:        2012-08-10 14:33:19
summary:     Merging
affected #:  9 files

diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -201,6 +201,12 @@
         echo "$ export CC=gcc-4.2"
         echo "$ export CXX=g++-4.2"
         echo
+        OSX_VERSION=`sw_vers -productVersion`
+        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+        then
+            MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+            MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+        fi
     fi
     if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
     then
@@ -411,6 +417,7 @@
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
 
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
@@ -431,6 +438,7 @@
 get_ytproject h5py-2.0.1.tar.gz
 get_ytproject Cython-0.16.tar.gz
 get_ytproject reason-js-20120623.zip
+get_ytproject Forthon-0.8.10.tar.gz
 
 if [ $INST_BZLIB -eq 1 ]
 then
@@ -668,6 +676,7 @@
 do_setup_py ipython-0.13
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.16
+do_setup_py Forthon-0.8.10
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -871,9 +871,12 @@
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
         height : height specifier
-            This will be the height of the FRB, by default it is equal to width.
+            This will be the physical height of the FRB, by default it is equal
+            to width.  Note that this will not make any corrections to
+            resolution for the aspect ratio.
         resolution : int or tuple of ints
-            The number of pixels on a side of the final FRB.
+            The number of pixels on a side of the final FRB.  If iterable, this
+            will be the width then the height.
         center : array-like of floats, optional
             The center of the FRB.  If not specified, defaults to the center of
             the current object.


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -107,15 +107,9 @@
             self.grid_left_edge[:,i] = DLE[i]
             self.grid_right_edge[:,i] = DRE[i]
         # We only go up to ND for 2D datasets
-        if (f["/bounding box"][:,:,0].shape[1] == ND) :
-            #FLASH 2/3 2D data
-            self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
-            self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
-        else:
-            self.grid_left_edge[:,:] = f["/bounding box"][:,:,0]
-            self.grid_right_edge[:,:] = f["/bounding box"][:,:,1]
-            
-
+        self.grid_left_edge[:,:ND] = f["/bounding box"][:,:ND,0]
+        self.grid_right_edge[:,:ND] = f["/bounding box"][:,:ND,1]
+        
         # Move this to the parameter file
         try:
             nxb = pf.parameters['nxb']


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -28,7 +28,8 @@
       StreamGrid, \
       StreamHierarchy, \
       StreamStaticOutput, \
-      StreamHandler
+      StreamHandler, \
+      load_uniform_grid
 
 from .fields import \
       KnownStreamFields, \


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -40,6 +40,8 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib import \
     get_box_grids_level
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
 
 from .fields import \
     StreamFieldInfo, \
@@ -288,3 +290,89 @@
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False
+
+class StreamDictFieldHandler(dict):
+
+    @property
+    def all_fields(self): return self[0].keys()
+
+def load_uniform_grid(data, domain_dimensions, domain_size_in_cm):
+    r"""Load a uniform grid of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a uniform grid of data to be loaded directly into yt and
+    analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+    domain_dimensiosn : array_like
+        This is the domain dimensions of the grid
+    domain_size_in_cm : float
+        The size of the domain, in centimeters
+
+    Examples
+    --------
+
+    >>> arr = na.random.random((256, 256, 256))
+    >>> data = dict(Density = arr)
+    >>> pf = load_uniform_grid(data, [256, 256, 256], 3.08e24)
+    """
+    sfh = StreamDictFieldHandler()
+    sfh.update({0:data})
+    domain_dimensions = na.array(domain_dimensions)
+    if na.unique(domain_dimensions).size != 1:
+        print "We don't support variably sized domains yet."
+        raise RuntimeError
+    domain_left_edge = na.zeros(3, 'float64')
+    domain_right_edge = na.ones(3, 'float64')
+    grid_left_edges = na.zeros(3, "int64").reshape((1,3))
+    grid_right_edges = na.array(domain_dimensions, "int64").reshape((1,3))
+
+    grid_levels = na.array([0], dtype='int32').reshape((1,1))
+    grid_dimensions = grid_right_edges - grid_left_edges
+
+    grid_left_edges  = grid_left_edges.astype("float64")
+    grid_left_edges /= domain_dimensions*2**grid_levels
+    grid_left_edges *= domain_right_edge - domain_left_edge
+    grid_left_edges += domain_left_edge
+
+    grid_right_edges  = grid_right_edges.astype("float64")
+    grid_right_edges /= domain_dimensions*2**grid_levels
+    grid_right_edges *= domain_right_edge - domain_left_edge
+    grid_right_edges += domain_left_edge
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        na.array([-1], dtype='int64'),
+        na.zeros(1, dtype='int64').reshape((1,1)),
+        na.zeros(1).reshape((1,1)),
+        sfh,
+    )
+
+    handler.name = "UniformGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = 0.0
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = domain_size_in_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = domain_size_in_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -98,8 +98,8 @@
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 
-from yt.frontends.maestro.api import \
-    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
+#from yt.frontends.maestro.api import \
+#    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
 
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods


diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/utilities/kdtree/Makefile
--- a/yt/utilities/kdtree/Makefile
+++ b/yt/utilities/kdtree/Makefile
@@ -9,9 +9,10 @@
 endif
 
 fKD: fKD.f90 fKD.v fKD_source.f90
-#	Forthon --compile_first fKD_source --no2underscores --with-numpy -g fKD fKD.f90 fKD_source.f90
+#	Forthon --compile_first fKD_source --no2underscores -g fKD fKD.f90 fKD_source.f90
 	@echo "Using $(FORTHON) ($(FORTHON_EXE))"
-	$(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --with-numpy --fopt "-O3" fKD fKD_source.f90
+	$(FORTHON) -F gfortran --compile_first fKD_source --no2underscores --fopt "-O3" fKD fKD_source.f90
+	mv build/lib*/fKDpy.so .
 
 clean:
 	rm -rf build fKDpy.a fKDpy.so




diff -r c949e94a5ea7cd1c222ed998bdc344e2beeaf9e7 -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -458,10 +458,15 @@
             Log on/off.
 
         """
-        if log:
-            self._field_transform[field] = log_transform
+        if field == 'all':
+            fields = self.plots.keys()
         else:
-            self._field_transform[field] = linear_transform
+            fields = [field]
+        for field in fields:
+            if log:
+                self._field_transform[field] = log_transform
+            else:
+                self._field_transform[field] = linear_transform
 
     @invalidate_plot
     def set_transform(self, field, name):
@@ -472,34 +477,70 @@
     @invalidate_plot
     def set_cmap(self, field, cmap_name):
         """set the colormap for one of the fields
-        
+
         Parameters
         ----------
         field : string
-            the field to set a transform
+            the field to set the colormap
+            if field == 'all', applies to all plots.
         cmap_name : string
             name of the colormap
 
         """
-        self._colorbar_valid = False
-        self._colormaps[field] = cmap_name
+
+        if field is 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in fields:
+            self._colorbar_valid = False
+            self._colormaps[field] = cmap_name
 
     @invalidate_plot
-    def set_zlim(self, field, zmin, zmax):
+    def set_zlim(self, field, zmin, zmax, dynamic_range=None):
         """set the scale of the colormap
-        
+
         Parameters
         ----------
         field : string
-            the field to set a transform
+            the field to set a colormap scale
+            if field == 'all', applies to all plots.
         zmin : float
-            the new minimum of the colormap scale
+            the new minimum of the colormap scale. If 'min', will
+            set to the minimum value in the current view.
         zmax : float
-            the new maximum of the colormap scale
+            the new maximum of the colormap scale. If 'max', will
+            set to the maximum value in the current view.
+
+        Keyword Parameters
+        ------------------
+        dyanmic_range : float (default: None)
+            The dynamic range of the image.
+            If zmin == None, will set zmin = zmax / dynamic_range
+            If zmax == None, will set zmax = zmin * dynamic_range
+            When dynamic_range is specified, defaults to setting
+            zmin = zmax / dynamic_range.
 
         """
-        self.plots[field].zmin = zmin
-        self.plots[field].zmax = zmax
+        if field is 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+        for field in fields:
+            myzmin = zmin
+            myzmax = zmax
+            if zmin == 'min':
+                myzmin = self.plots[field].image._A.min()
+            if zmax == 'max':
+                myzmax = self.plots[field].image._A.max()
+            if dynamic_range is not None:
+                if zmax is None:
+                    myzmax = myzmin * dynamic_range
+                else:
+                    myzmin = myzmax / dynamic_range
+
+            self.plots[field].zmin = myzmin
+            self.plots[field].zmax = myzmax
 
     def setup_callbacks(self):
         for key in callback_registry:
@@ -512,7 +553,7 @@
             callback = invalidate_plot(apply_callback(CallbackMaker))
             callback.__doc__ = CallbackMaker.__init__.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
-        
+
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
         mi = fval.min()
@@ -651,25 +692,32 @@
     @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields
-        
+
         Parameters
         ----------
         field : string
             the field to set a transform
+            if field == 'all', applies to all plots.
         cmap_name : string
             name of the colormap
 
         """
-        self._colorbar_valid = False
-        self._colormaps[field] = cmap
-        if isinstance(cmap, types.StringTypes):
-            if str(cmap) in yt_colormaps:
-                cmap = yt_colormaps[str(cmap)]
-            elif hasattr(matplotlib.cm, cmap):
-                cmap = getattr(matplotlib.cm, cmap)
-        if not is_colormap(cmap) and cmap is not None:
-            raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
-        self.plots[field].image.set_cmap(cmap)
+        if field == 'all':
+            fields = self.plots.keys()
+        else:
+            fields = [field]
+
+        for field in fields:
+            self._colorbar_valid = False
+            self._colormaps[field] = cmap
+            if isinstance(cmap, types.StringTypes):
+                if str(cmap) in yt_colormaps:
+                    cmap = yt_colormaps[str(cmap)]
+                elif hasattr(matplotlib.cm, cmap):
+                    cmap = getattr(matplotlib.cm, cmap)
+            if not is_colormap(cmap) and cmap is not None:
+                raise RuntimeError("Colormap '%s' does not exist!" % str(cmap))
+            self.plots[field].image.set_cmap(cmap)
 
     def save(self,name=None):
         """saves the plot to disk.
@@ -762,7 +810,7 @@
              the image centers on the location of the maximum density
              cell.  If set to 'c' or 'center', the plot is centered on
              the middle of the domain.
-	width : tuple or a float.
+        width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
              
@@ -781,7 +829,7 @@
              the y axis.  In the other two examples, code units are assumed, for example
              (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
              in code units.  
-	origin : string
+        origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
              to the bottom-left hand corner of the simulation domain, 'center-domain',
@@ -830,7 +878,7 @@
             the image centers on the location of the maximum density
             cell.  If set to 'c' or 'center', the plot is centered on
             the middle of the domain.
-	width : tuple or a float.
+        width : tuple or a float.
              Width can have four different formats to support windows with variable 
              x and y widths.  They are:
              



https://bitbucket.org/yt_analysis/yt/changeset/e5a91d56225c/
changeset:   e5a91d56225c
branch:      yt
user:        jwise77
date:        2012-08-10 15:32:35
summary:     Adding the option to use find_outputs with cosmology_splice and the
simulation time series for simulations that have dt_output or
dcycle_output varying over the simulation.
affected #:  4 files

diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -37,10 +37,11 @@
     cosmological distance.
     """
 
-    def __init__(self, parameter_filename, simulation_type):
+    def __init__(self, parameter_filename, simulation_type, find_outputs=False):
         self.parameter_filename = parameter_filename
         self.simulation_type = simulation_type
-        self.simulation = simulation(parameter_filename, simulation_type)
+        self.simulation = simulation(parameter_filename, simulation_type, 
+                                     find_outputs=find_outputs)
 
         self.cosmology = Cosmology(
             HubbleConstantNow=(100.0 * self.simulation.hubble_constant),


diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -112,7 +112,7 @@
     f.close()
     return proj
 
-def simulation(parameter_filename, simulation_type):
+def simulation(parameter_filename, simulation_type, find_outputs=False):
     """
     Loads a simulation time series object of the specified
     simulation type.
@@ -121,5 +121,6 @@
     if simulation_type not in simulation_time_series_registry:
         raise YTSimulationNotIdentified(simulation_type)
 
-    return simulation_time_series_registry[simulation_type](parameter_filename)
+    return simulation_time_series_registry[simulation_type](parameter_filename, 
+                                                            find_outputs=find_outputs)
 


diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -320,7 +320,7 @@
                 simulation_time_series_registry[code_name] = cls
                 mylog.debug("Registering simulation: %s as %s", code_name, cls)
 
-    def __init__(self, parameter_filename):
+    def __init__(self, parameter_filename, find_outputs=False):
         """
         Base class for generating simulation time series types.
         Principally consists of a *parameter_filename*.
@@ -345,7 +345,7 @@
         self.print_key_parameters()
 
         # Get all possible datasets.
-        self._get_all_outputs()
+        self._get_all_outputs(find_outputs)
 
     def __repr__(self):
         return self.parameter_filename


diff -r 4ae1fd5367e48c17acd6db0ca47a022fa6b839ac -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -48,7 +48,7 @@
     r"""Class for creating TimeSeriesData object from an Enzo
     simulation parameter file.
     """
-    def __init__(self, parameter_filename):
+    def __init__(self, parameter_filename, find_outputs=False):
         r"""Initialize an Enzo Simulation object.
 
         Upon creation, the parameter file is parsed and the time and redshift
@@ -67,7 +67,7 @@
         >>> print es.all_outputs
 
         """
-        SimulationTimeSeries.__init__(self, parameter_filename)
+        SimulationTimeSeries.__init__(self, parameter_filename, find_outputs=find_outputs)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None, time_units='1',
@@ -401,11 +401,12 @@
             self.all_time_outputs.append(output)
             index += 1
 
-    def _get_all_outputs(self):
+    def _get_all_outputs(self, find_outputs=False):
         "Get all potential datasets and combine into a time-sorted list."
 
-        if self.parameters['dtDataDump'] > 0 and \
-            self.parameters['CycleSkipDataDump'] > 0:
+        if find_outputs or \
+            (self.parameters['dtDataDump'] > 0 and \
+             self.parameters['CycleSkipDataDump'] > 0):
             mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
             mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
             self.all_time_outputs = self._find_outputs()



https://bitbucket.org/yt_analysis/yt/changeset/1756e6a8c5a4/
changeset:   1756e6a8c5a4
branch:      yt
user:        jwise77
date:        2012-08-10 15:33:40
summary:     Backing out these derived quantities because they were a convenience
and are possible with other quantities.
affected #:  1 file

diff -r e5a91d56225c2b5e4605a0a87c1f0ca689eb07e1 -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -156,32 +156,7 @@
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret = 1)
-
-def _TotalGasMass(data):
-    """
-    This function takes no arguments and returns the sum of cell
-    masses in the object.
-    """
-    baryon_mass = data["CellMassMsun"].sum()
-    return [baryon_mass]
-def _combTotalGasMass(data, baryon_mass):
-    return baryon_mass.sum()
-add_quantity("TotalGasMass", function=_TotalGasMass,
-             combine_function=_combTotalGasMass, n_ret = 1)
-                
-def _MatterMass(data):
-    """
-    This function takes no arguments and returns the array sum of cell masses
-    and particle masses.
-    """
-    cellvol = data["CellVolume"]
-    matter_rho = data["Matter_Density"]
-    return cellvol, matter_rho 
-def _combMatterMass(data, cellvol, matter_rho):
-    return cellvol*matter_rho
-add_quantity("MatterMass", function=_MatterMass,
-	     combine_function=_combMatterMass, n_ret=2)
+             combine_function=_combTotalMass, n_ret=1)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """



https://bitbucket.org/yt_analysis/yt/changeset/4d131b55bd0b/
changeset:   4d131b55bd0b
branch:      yt
user:        jwise77
date:        2012-08-10 16:33:37
summary:     Linking find_outputs to light_cone and light_ray.  Cleaning up a bit
by storing find_outputs into SimulationTimeSeries.
affected #:  4 files

diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -57,7 +57,7 @@
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
-                 set_parameters=None,
+                 find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
         """
         Initialize a LightCone object.
@@ -102,6 +102,10 @@
             Whether or not to include redshift outputs when gathering
             datasets for time series.
             Default: True.
+        find_outputs : bool
+            Whether or not to search for parameter files in the current 
+            directory.
+            Default: False.
         set_parameters : dict
             Dictionary of parameters to attach to pf.parameters.
             Default: None.
@@ -150,7 +154,8 @@
             only_on_root(os.mkdir, self.output_dir)
 
         # Calculate light cone solution.
-        CosmologySplice.__init__(self, parameter_filename, simulation_type)
+        CosmologySplice.__init__(self, parameter_filename, simulation_type,
+                                 find_outputs=find_outputs)
         self.light_cone_solution = \
           self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                        minimal=self.use_minimum_datasets,


diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -44,7 +44,8 @@
                  near_redshift, far_redshift,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
-                 time_data=True, redshift_data=True):
+                 time_data=True, redshift_data=True,
+                 find_outputs=False):
         """
         Create a LightRay object.  A light ray is much like a light cone,
         in that it stacks together multiple datasets in order to extend a
@@ -93,6 +94,10 @@
             Whether or not to include redshift outputs when gathering
             datasets for time series.
             Default: True.
+        find_outputs : bool
+            Whether or not to search for parameter files in the current 
+            directory.
+            Default: False.
 
         """
 
@@ -106,7 +111,8 @@
         self._data = {}
 
         # Get list of datasets for light ray solution.
-        CosmologySplice.__init__(self, parameter_filename, simulation_type)
+        CosmologySplice.__init__(self, parameter_filename, simulation_type,
+                                 find_outputs=find_outputs)
         self.light_ray_solution = \
           self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                        minimal=self.use_minimum_datasets,


diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -331,6 +331,7 @@
         self.parameter_filename = parameter_filename
         self.basename = os.path.basename(parameter_filename)
         self.directory = os.path.dirname(parameter_filename)
+        self.find_outputs = find_outputs
         self.parameters = {}
 
         # Set some parameter defaults.
@@ -345,7 +346,7 @@
         self.print_key_parameters()
 
         # Get all possible datasets.
-        self._get_all_outputs(find_outputs)
+        self._get_all_outputs()
 
     def __repr__(self):
         return self.parameter_filename


diff -r 1756e6a8c5a4954d4d92eda9df577ddf55a5ee90 -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -67,7 +67,8 @@
         >>> print es.all_outputs
 
         """
-        SimulationTimeSeries.__init__(self, parameter_filename, find_outputs=find_outputs)
+        SimulationTimeSeries.__init__(self, parameter_filename, 
+                                      find_outputs=find_outputs)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None, time_units='1',
@@ -401,10 +402,10 @@
             self.all_time_outputs.append(output)
             index += 1
 
-    def _get_all_outputs(self, find_outputs=False):
+    def _get_all_outputs(self):
         "Get all potential datasets and combine into a time-sorted list."
 
-        if find_outputs or \
+        if self.find_outputs or \
             (self.parameters['dtDataDump'] > 0 and \
              self.parameters['CycleSkipDataDump'] > 0):
             mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )



https://bitbucket.org/yt_analysis/yt/changeset/13fc27172523/
changeset:   13fc27172523
branch:      yt
user:        jwise77
date:        2012-08-10 17:26:26
summary:     Changing default label of HEALpixCamera to "Projected <field>".  The
users can still specify a label.
affected #:  1 file

diff -r 4d131b55bd0bd58cb6fdb7cbcdcd73eba0a60f0e -r 13fc2717252356e55ca6106ec58322ead8cf537f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -788,7 +788,7 @@
         return image
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
-                 num_threads = 0, clim = None):
+                 num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
 
         This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -815,10 +815,10 @@
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
         image = self._render(double_check, num_threads, image, sampler)
-        self.save_image(fn, clim, image)
+        self.save_image(fn, clim, image, label = label)
         return image
 
-    def save_image(self, fn, clim, image, label=None):
+    def save_image(self, fn, clim, image, label = None):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
@@ -834,9 +834,9 @@
             cb = fig.colorbar(implot, orientation='horizontal')
 
             if label == None:
-                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+                cb.set_label("Projected %s" % self.fields[0])
             else:
-                cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[%s]$" % units)
+                cb.set_label(label)
             if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())



https://bitbucket.org/yt_analysis/yt/changeset/fa9b83bd9358/
changeset:   fa9b83bd9358
branch:      yt
user:        brittonsmith
date:        2012-08-12 15:45:57
summary:     Moved find_outputs keyword to SimulationTimeSeries.__init__ to allow
further slection of outputs in get_time_series.  This will also makes
it accessible to the light cone and light ray.
affected #:  2 files

diff -r 13fc2717252356e55ca6106ec58322ead8cf537f -r fa9b83bd93588c21d8708cd115ef032e88a9471d yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -331,7 +331,6 @@
         self.parameter_filename = parameter_filename
         self.basename = os.path.basename(parameter_filename)
         self.directory = os.path.dirname(parameter_filename)
-        self.find_outputs = find_outputs
         self.parameters = {}
 
         # Set some parameter defaults.
@@ -346,7 +345,7 @@
         self.print_key_parameters()
 
         # Get all possible datasets.
-        self._get_all_outputs()
+        self._get_all_outputs(find_outputs=find_outputs)
 
     def __repr__(self):
         return self.parameter_filename


diff -r 13fc2717252356e55ca6106ec58322ead8cf537f -r fa9b83bd93588c21d8708cd115ef032e88a9471d yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -59,6 +59,14 @@
 
         parameter_filename : str
             The simulation parameter file.
+        find_outputs : bool
+            If True, subdirectories within the GlobalDir directory are
+            searched one by one for datasets.  Time and redshift
+            information are gathered by temporarily instantiating each
+            dataset.  This can be used when simulation data was created
+            in a non-standard way, making it difficult to guess the
+            corresponding time and redshift information.
+            Default: False.
 
         Examples
         --------
@@ -67,7 +75,7 @@
         >>> print es.all_outputs
 
         """
-        SimulationTimeSeries.__init__(self, parameter_filename, 
+        SimulationTimeSeries.__init__(self, parameter_filename,
                                       find_outputs=find_outputs)
 
     def get_time_series(self, time_data=True, redshift_data=True,
@@ -75,7 +83,7 @@
                         initial_redshift=None, final_redshift=None,
                         initial_cycle=None, final_cycle=None,
                         times=None, redshifts=None, tolerance=None,
-                        find_outputs=False, parallel=True):
+                        parallel=True):
 
         """
         Instantiate a TimeSeriesData object for a set of outputs.
@@ -146,14 +154,6 @@
             given the requested times or redshifts.  If None, the
             nearest output is always taken.
             Default: None.
-        find_outputs : bool
-            If True, subdirectories within the GlobalDir directory are
-            searched one by one for datasets.  Time and redshift
-            information are gathered by temporarily instantiating each
-            dataset.  This can be used when simulation data was created
-            in a non-standard way, making it difficult to guess the
-            corresponding time and redshift information.
-            Default: False.
         parallel : bool/int
             If True, the generated TimeSeriesData will divide the work
             such that a single processor works on each dataset.  If an
@@ -186,20 +186,15 @@
             mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
             return
 
-        # Create the set of outputs from which further selection will be done.
-        if find_outputs:
-            my_all_outputs = self._find_outputs()
-
+        if time_data and redshift_data:
+            my_all_outputs = self.all_outputs
+        elif time_data:
+            my_all_outputs = self.all_time_outputs
+        elif redshift_data:
+            my_all_outputs = self.all_redshift_outputs
         else:
-            if time_data and redshift_data:
-                my_all_outputs = self.all_outputs
-            elif time_data:
-                my_all_outputs = self.all_time_outputs
-            elif redshift_data:
-                my_all_outputs = self.all_redshift_outputs
-            else:
-                mylog.error('Both time_data and redshift_data are False.')
-                return
+            mylog.error('Both time_data and redshift_data are False.')
+            return
 
         # Apply selection criteria to the set.
         if times is not None:
@@ -355,6 +350,7 @@
         for output in self.all_redshift_outputs:
             output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
                 self.enzo_cosmology.TimeUnits
+        self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
 
     def _calculate_time_outputs(self):
         "Calculate time outputs and their redshifts if cosmological."
@@ -402,28 +398,32 @@
             self.all_time_outputs.append(output)
             index += 1
 
-    def _get_all_outputs(self):
+    def _get_all_outputs(self, find_outputs=False):
         "Get all potential datasets and combine into a time-sorted list."
 
-        if self.find_outputs or \
-            (self.parameters['dtDataDump'] > 0 and \
-             self.parameters['CycleSkipDataDump'] > 0):
+        # Create the set of outputs from which further selection will be done.
+        if find_outputs:
+            self._find_outputs()
+
+        elif self.parameters['dtDataDump'] > 0 and \
+          self.parameters['CycleSkipDataDump'] > 0:
             mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
             mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
-            self.all_time_outputs = self._find_outputs()
+            self._find_outputs()
 
-        # Get all time or cycle outputs.
-        elif self.parameters['CycleSkipDataDump'] > 0:
-            self._calculate_cycle_outputs()
         else:
-            self._calculate_time_outputs()
+            # Get all time or cycle outputs.
+            if self.parameters['CycleSkipDataDump'] > 0:
+                self._calculate_cycle_outputs()
+            else:
+                self._calculate_time_outputs()
 
-        # Calculate times for redshift outputs.
-        self._calculate_redshift_dump_times()
+            # Calculate times for redshift outputs.
+            self._calculate_redshift_dump_times()
 
-        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
-        if self.parameters['CycleSkipDataDump'] <= 0:
-            self.all_outputs.sort(key=lambda obj:obj['time'])
+            self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+            if self.parameters['CycleSkipDataDump'] <= 0:
+                self.all_outputs.sort(key=lambda obj:obj['time'])
 
         mylog.info("Total datasets: %d." % len(self.all_outputs))
 
@@ -505,14 +505,32 @@
         """
 
         # look for time outputs.
-        potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
-                                                   "%s*" % self.parameters['DataDumpDir'])) + \
-                            glob.glob(os.path.join(self.parameters['GlobalDir'],
-                                                   "%s*" % self.parameters['RedshiftDumpDir']))
-        time_outputs = []
-        mylog.info("Checking %d potential time outputs." %
+        potential_time_outputs = \
+          glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                 "%s*" % self.parameters['DataDumpDir']))
+        self.all_time_outputs = \
+          self._check_for_outputs(potential_time_outputs)
+        self.all_time_outputs.sort(key=lambda obj: obj['time'])
+
+        # look for redshift outputs.
+        potential_redshift_outputs = \
+          glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                 "%s*" % self.parameters['RedshiftDumpDir']))
+        self.all_redshift_outputs = \
+          self._check_for_outputs(potential_redshift_outputs)
+        self.all_redshift_outputs.sort(key=lambda obj: obj['time'])
+
+        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+        self.all_outputs.sort(key=lambda obj: obj['time'])
+        mylog.info("Located %d total outputs." % len(self.all_outputs))
+
+    def _check_for_outputs(self, potential_outputs):
+        r"""Check a list of files to see if they are valid datasets."""
+
+        mylog.info("Checking %d potential outputs." %
                    len(potential_outputs))
 
+        my_outputs = []
         for output in potential_outputs:
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -528,15 +546,14 @@
                 try:
                     pf = load(filename)
                     if pf is not None:
-                        time_outputs.append({'filename': filename, 'time': pf.current_time})
+                        my_outputs.append({'filename': filename,
+                                           'time': pf.current_time})
                         if pf.cosmological_simulation:
-                            time_outputs[-1]['redshift'] = pf.current_redshift
+                            my_outputs[-1]['redshift'] = pf.current_redshift
                 except YTOutputNotIdentified:
                     mylog.error('Failed to load %s' % filename)
 
-        mylog.info("Located %d time outputs." % len(time_outputs))
-        time_outputs.sort(key=lambda obj: obj['time'])
-        return time_outputs
+        return my_outputs
 
     def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
         r"""Get datasets at or near to given values.



https://bitbucket.org/yt_analysis/yt/changeset/514f5a6a60c8/
changeset:   514f5a6a60c8
branch:      yt
user:        brittonsmith
date:        2012-08-13 17:11:31
summary:     Merged in jwise77/yt (pull request #241)
affected #:  11 files

diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -37,10 +37,11 @@
     cosmological distance.
     """
 
-    def __init__(self, parameter_filename, simulation_type):
+    def __init__(self, parameter_filename, simulation_type, find_outputs=False):
         self.parameter_filename = parameter_filename
         self.simulation_type = simulation_type
-        self.simulation = simulation(parameter_filename, simulation_type)
+        self.simulation = simulation(parameter_filename, simulation_type, 
+                                     find_outputs=find_outputs)
 
         self.cosmology = Cosmology(
             HubbleConstantNow=(100.0 * self.simulation.hubble_constant),


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -57,7 +57,7 @@
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
-                 set_parameters=None,
+                 find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
         """
         Initialize a LightCone object.
@@ -102,6 +102,10 @@
             Whether or not to include redshift outputs when gathering
             datasets for time series.
             Default: True.
+        find_outputs : bool
+            Whether or not to search for parameter files in the current 
+            directory.
+            Default: False.
         set_parameters : dict
             Dictionary of parameters to attach to pf.parameters.
             Default: None.
@@ -150,7 +154,8 @@
             only_on_root(os.mkdir, self.output_dir)
 
         # Calculate light cone solution.
-        CosmologySplice.__init__(self, parameter_filename, simulation_type)
+        CosmologySplice.__init__(self, parameter_filename, simulation_type,
+                                 find_outputs=find_outputs)
         self.light_cone_solution = \
           self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                        minimal=self.use_minimum_datasets,


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -44,7 +44,8 @@
                  near_redshift, far_redshift,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
-                 time_data=True, redshift_data=True):
+                 time_data=True, redshift_data=True,
+                 find_outputs=False):
         """
         Create a LightRay object.  A light ray is much like a light cone,
         in that it stacks together multiple datasets in order to extend a
@@ -93,6 +94,10 @@
             Whether or not to include redshift outputs when gathering
             datasets for time series.
             Default: True.
+        find_outputs : bool
+            Whether or not to search for parameter files in the current 
+            directory.
+            Default: False.
 
         """
 
@@ -106,7 +111,8 @@
         self._data = {}
 
         # Get list of datasets for light ray solution.
-        CosmologySplice.__init__(self, parameter_filename, simulation_type)
+        CosmologySplice.__init__(self, parameter_filename, simulation_type,
+                                 find_outputs=find_outputs)
         self.light_ray_solution = \
           self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                        minimal=self.use_minimum_datasets,


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -393,6 +393,7 @@
         dt = na.maximum(dt, 0.0)
         # Remove young stars
         sub = dt >= self.min_age
+        if len(sub) == 0: return
         self.star_metal = self.star_metal[sub]
         dt = dt[sub]
         self.star_creation_time = self.star_creation_time[sub]


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -112,7 +112,7 @@
     f.close()
     return proj
 
-def simulation(parameter_filename, simulation_type):
+def simulation(parameter_filename, simulation_type, find_outputs=False):
     """
     Loads a simulation time series object of the specified
     simulation type.
@@ -121,5 +121,6 @@
     if simulation_type not in simulation_time_series_registry:
         raise YTSimulationNotIdentified(simulation_type)
 
-    return simulation_time_series_registry[simulation_type](parameter_filename)
+    return simulation_time_series_registry[simulation_type](parameter_filename, 
+                                                            find_outputs=find_outputs)
 




diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -320,7 +320,7 @@
                 simulation_time_series_registry[code_name] = cls
                 mylog.debug("Registering simulation: %s as %s", code_name, cls)
 
-    def __init__(self, parameter_filename):
+    def __init__(self, parameter_filename, find_outputs=False):
         """
         Base class for generating simulation time series types.
         Principally consists of a *parameter_filename*.
@@ -345,7 +345,7 @@
         self.print_key_parameters()
 
         # Get all possible datasets.
-        self._get_all_outputs()
+        self._get_all_outputs(find_outputs=find_outputs)
 
     def __repr__(self):
         return self.parameter_filename


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -388,6 +388,36 @@
           function=_CellVolume,
           convert_function=_ConvertCellVolumeCGS)
 
+def _ChandraEmissivity(field, data):
+    logT0 = na.log10(data["Temperature"]) - 7
+    return ((data["NumberDensity"].astype('float64')**2.0) \
+            *(10**(-0.0103*logT0**8 \
+                   +0.0417*logT0**7 \
+                   -0.0636*logT0**6 \
+                   +0.1149*logT0**5 \
+                   -0.3151*logT0**4 \
+                   +0.6655*logT0**3 \
+                   -1.1256*logT0**2 \
+                   +1.0026*logT0**1 \
+                   -0.6984*logT0) \
+              +data["Metallicity"]*10**(0.0305*logT0**11 \
+                                        -0.0045*logT0**10 \
+                                        -0.3620*logT0**9 \
+                                        +0.0513*logT0**8 \
+                                        +1.6669*logT0**7 \
+                                        -0.3854*logT0**6 \
+                                        -3.3604*logT0**5 \
+                                        +0.4728*logT0**4 \
+                                        +4.5774*logT0**3 \
+                                        -2.3661*logT0**2 \
+                                        -1.6667*logT0**1 \
+                                        -0.2193*logT0)))
+def _convertChandraEmissivity(data):
+    return 1.0 #1.0e-23*0.76**2
+add_field("ChandraEmissivity", function=_ChandraEmissivity,
+          convert_function=_convertChandraEmissivity,
+          projection_conversion="1")
+
 def _XRayEmissivity(field, data):
     return ((data["Density"].astype('float64')**2.0) \
             *data["Temperature"]**0.5)


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,16 +171,16 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          display_name = "\rm{Total}\/\rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -247,7 +247,7 @@
 for field in ['Bx','By','Bz']:
     f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gau\ss}"
+    f._units=r"\rm{Gauss}"
     f.take_log=False
 
 def _convertRadiation(data):
@@ -447,14 +447,14 @@
     return data['star_creation_time']
 def _ConvertEnzoTimeYears(data):
     return data.pf.time_units['years']
-add_field('StarCreationTimeYears', units=r"\mathrm{yr}",
+add_field('StarCreationTimeYears', units=r"\rm{yr}",
           function=_StarCreationTime,
           convert_function=_ConvertEnzoTimeYears,
           projection_conversion="1")
 
 def _StarDynamicalTime(field, data):
     return data['star_dynamical_time']
-add_field('StarDynamicalTimeYears', units=r"\mathrm{yr}",
+add_field('StarDynamicalTimeYears', units=r"\rm{yr}",
           function=_StarDynamicalTime,
           convert_function=_ConvertEnzoTimeYears,
           projection_conversion="1")
@@ -466,7 +466,7 @@
         data.pf.current_time - \
         data['StarCreationTimeYears'][with_stars]
     return star_age
-add_field('StarAgeYears', units=r"\mathrm{yr}",
+add_field('StarAgeYears', units=r"\rm{yr}",
           function=_StarAge,
           projection_conversion="1")
 
@@ -476,20 +476,12 @@
 add_field('IsStarParticle', function=_IsStarParticle,
           particle_type = True)
 
-def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
-    f = KnownEnzoFields[field]
-    f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gauss}"
-    f.take_log=False
-
 def _Bmag(field, data):
     """ magnitude of bvec
     """
     return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
 
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
+add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\rm{Gauss}")
 
 # Particle functions
 
@@ -645,17 +637,3 @@
 add_enzo_1d_field("z-velocity", function=_zvel)
 add_enzo_1d_field("y-velocity", function=_yvel)
 
-def _convertBfield(data): 
-    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
-for field in ['Bx','By','Bz']:
-    f = KnownEnzoFields[field]
-    f._convert_function=_convertBfield
-    f._units=r"\mathrm{Gauss}"
-    f.take_log=False
-
-def _Bmag(field, data):
-    """ magnitude of bvec
-    """
-    return na.sqrt(data['Bx']**2 + data['By']**2 + data['Bz']**2)
-
-add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -48,7 +48,7 @@
     r"""Class for creating TimeSeriesData object from an Enzo
     simulation parameter file.
     """
-    def __init__(self, parameter_filename):
+    def __init__(self, parameter_filename, find_outputs=False):
         r"""Initialize an Enzo Simulation object.
 
         Upon creation, the parameter file is parsed and the time and redshift
@@ -59,6 +59,14 @@
 
         parameter_filename : str
             The simulation parameter file.
+        find_outputs : bool
+            If True, subdirectories within the GlobalDir directory are
+            searched one by one for datasets.  Time and redshift
+            information are gathered by temporarily instantiating each
+            dataset.  This can be used when simulation data was created
+            in a non-standard way, making it difficult to guess the
+            corresponding time and redshift information.
+            Default: False.
 
         Examples
         --------
@@ -67,14 +75,15 @@
         >>> print es.all_outputs
 
         """
-        SimulationTimeSeries.__init__(self, parameter_filename)
+        SimulationTimeSeries.__init__(self, parameter_filename,
+                                      find_outputs=find_outputs)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None, time_units='1',
                         initial_redshift=None, final_redshift=None,
                         initial_cycle=None, final_cycle=None,
                         times=None, redshifts=None, tolerance=None,
-                        find_outputs=False, parallel=True):
+                        parallel=True):
 
         """
         Instantiate a TimeSeriesData object for a set of outputs.
@@ -145,14 +154,6 @@
             given the requested times or redshifts.  If None, the
             nearest output is always taken.
             Default: None.
-        find_outputs : bool
-            If True, subdirectories within the GlobalDir directory are
-            searched one by one for datasets.  Time and redshift
-            information are gathered by temporarily instantiating each
-            dataset.  This can be used when simulation data was created
-            in a non-standard way, making it difficult to guess the
-            corresponding time and redshift information.
-            Default: False.
         parallel : bool/int
             If True, the generated TimeSeriesData will divide the work
             such that a single processor works on each dataset.  If an
@@ -185,20 +186,15 @@
             mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
             return
 
-        # Create the set of outputs from which further selection will be done.
-        if find_outputs:
-            my_all_outputs = self._find_outputs()
-
+        if time_data and redshift_data:
+            my_all_outputs = self.all_outputs
+        elif time_data:
+            my_all_outputs = self.all_time_outputs
+        elif redshift_data:
+            my_all_outputs = self.all_redshift_outputs
         else:
-            if time_data and redshift_data:
-                my_all_outputs = self.all_outputs
-            elif time_data:
-                my_all_outputs = self.all_time_outputs
-            elif redshift_data:
-                my_all_outputs = self.all_redshift_outputs
-            else:
-                mylog.error('Both time_data and redshift_data are False.')
-                return
+            mylog.error('Both time_data and redshift_data are False.')
+            return
 
         # Apply selection criteria to the set.
         if times is not None:
@@ -354,6 +350,7 @@
         for output in self.all_redshift_outputs:
             output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
                 self.enzo_cosmology.TimeUnits
+        self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
 
     def _calculate_time_outputs(self):
         "Calculate time outputs and their redshifts if cosmological."
@@ -401,27 +398,32 @@
             self.all_time_outputs.append(output)
             index += 1
 
-    def _get_all_outputs(self):
+    def _get_all_outputs(self, find_outputs=False):
         "Get all potential datasets and combine into a time-sorted list."
 
-        if self.parameters['dtDataDump'] > 0 and \
-            self.parameters['CycleSkipDataDump'] > 0:
+        # Create the set of outputs from which further selection will be done.
+        if find_outputs:
+            self._find_outputs()
+
+        elif self.parameters['dtDataDump'] > 0 and \
+          self.parameters['CycleSkipDataDump'] > 0:
             mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
             mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
-            self.all_time_outputs = self._find_outputs()
+            self._find_outputs()
 
-        # Get all time or cycle outputs.
-        elif self.parameters['CycleSkipDataDump'] > 0:
-            self._calculate_cycle_outputs()
         else:
-            self._calculate_time_outputs()
+            # Get all time or cycle outputs.
+            if self.parameters['CycleSkipDataDump'] > 0:
+                self._calculate_cycle_outputs()
+            else:
+                self._calculate_time_outputs()
 
-        # Calculate times for redshift outputs.
-        self._calculate_redshift_dump_times()
+            # Calculate times for redshift outputs.
+            self._calculate_redshift_dump_times()
 
-        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
-        if self.parameters['CycleSkipDataDump'] <= 0:
-            self.all_outputs.sort(key=lambda obj:obj['time'])
+            self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+            if self.parameters['CycleSkipDataDump'] <= 0:
+                self.all_outputs.sort(key=lambda obj:obj['time'])
 
         mylog.info("Total datasets: %d." % len(self.all_outputs))
 
@@ -503,14 +505,32 @@
         """
 
         # look for time outputs.
-        potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
-                                                   "%s*" % self.parameters['DataDumpDir'])) + \
-                            glob.glob(os.path.join(self.parameters['GlobalDir'],
-                                                   "%s*" % self.parameters['RedshiftDumpDir']))
-        time_outputs = []
-        mylog.info("Checking %d potential time outputs." %
+        potential_time_outputs = \
+          glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                 "%s*" % self.parameters['DataDumpDir']))
+        self.all_time_outputs = \
+          self._check_for_outputs(potential_time_outputs)
+        self.all_time_outputs.sort(key=lambda obj: obj['time'])
+
+        # look for redshift outputs.
+        potential_redshift_outputs = \
+          glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                 "%s*" % self.parameters['RedshiftDumpDir']))
+        self.all_redshift_outputs = \
+          self._check_for_outputs(potential_redshift_outputs)
+        self.all_redshift_outputs.sort(key=lambda obj: obj['time'])
+
+        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+        self.all_outputs.sort(key=lambda obj: obj['time'])
+        mylog.info("Located %d total outputs." % len(self.all_outputs))
+
+    def _check_for_outputs(self, potential_outputs):
+        r"""Check a list of files to see if they are valid datasets."""
+
+        mylog.info("Checking %d potential outputs." %
                    len(potential_outputs))
 
+        my_outputs = []
         for output in potential_outputs:
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -526,15 +546,14 @@
                 try:
                     pf = load(filename)
                     if pf is not None:
-                        time_outputs.append({'filename': filename, 'time': pf.current_time})
+                        my_outputs.append({'filename': filename,
+                                           'time': pf.current_time})
                         if pf.cosmological_simulation:
-                            time_outputs[-1]['redshift'] = pf.current_redshift
+                            my_outputs[-1]['redshift'] = pf.current_redshift
                 except YTOutputNotIdentified:
                     mylog.error('Failed to load %s' % filename)
 
-        mylog.info("Located %d time outputs." % len(time_outputs))
-        time_outputs.sort(key=lambda obj: obj['time'])
-        return time_outputs
+        return my_outputs
 
     def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
         r"""Get datasets at or near to given values.


diff -r 16f5ecd96fabc021697b84c566a77ec729348418 -r 514f5a6a60c893b2a05f929150f352b153d68c65 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -788,7 +788,7 @@
         return image
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
-                 num_threads = 0, clim = None):
+                 num_threads = 0, clim = None, label = None):
         r"""Ray-cast the camera.
 
         This method instructs the camera to take a snapshot -- i.e., call the ray
@@ -815,10 +815,10 @@
         sampler = self.get_sampler(args)
         self.volume.initialize_source()
         image = self._render(double_check, num_threads, image, sampler)
-        self.save_image(fn, clim, image)
+        self.save_image(fn, clim, image, label = label)
         return image
 
-    def save_image(self, fn, clim, image):
+    def save_image(self, fn, clim, image, label = None):
         if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
@@ -832,7 +832,11 @@
             ax = fig.add_subplot(1,1,1,projection='hammer')
             implot = ax.imshow(img, extent=(-na.pi,na.pi,-na.pi/2,na.pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
-            cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+
+            if label == None:
+                cb.set_label("Projected %s" % self.fields[0])
+            else:
+                cb.set_label(label)
             if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list