[Yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Jul 25 05:55:39 PDT 2011


2 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/95ab2ad57cff/
changeset:   95ab2ad57cff
branch:      yt
user:        MatthewTurk
date:        2011-07-25 14:52:28
summary:     Removing all the make_svn_version calls; have left them in place but commented,
so that if a make_hg_version_py becomes available, we can use it.
affected #:  43 files (43 bytes)

--- a/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -95,7 +95,7 @@
                        quiet=True)
     
     config.make_config_py()
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     config.add_subpackage('yt','yt')
     config.add_scripts("scripts/*")
 


--- a/yt/analysis_modules/coordinate_transformation/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/coordinate_transformation/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('coordinate_transformation',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_finding/fof/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_finding/fof/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -12,5 +12,5 @@
                                      "kd.c"],
                                     libraries=["m"])
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_finding/hop/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_finding/hop/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -15,5 +15,5 @@
                                      "hop_slice.c",
                                      "hop_smooth.c",])
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_finding/parallel_hop/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_finding/parallel_hop/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('parallel_hop',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_finding/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_finding/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -11,5 +11,5 @@
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_mass_function/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_mass_function/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_mass_function',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_merger_tree/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_merger_tree/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_merger_tree',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/halo_profiler/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/halo_profiler/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_profiler',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/hierarchy_subset/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/hierarchy_subset/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('hierarchy_subset',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/level_sets/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/level_sets/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('level_sets',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/light_cone/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/light_cone/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('light_cone',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/light_ray/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/light_ray/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('light_ray',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -5,7 +5,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")
     config.add_subpackage("halo_finding")


--- a/yt/analysis_modules/simulation_handler/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/simulation_handler/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('simulation_handler',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/spectral_integrator/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/spectral_integrator/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('spectral_integrator',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/star_analysis/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/star_analysis/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('star_analysis',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/analysis_modules/two_point_functions/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/analysis_modules/two_point_functions/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('two_point_functions',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/astro_objects/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/astro_objects/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('astro_objects',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/data_objects/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/data_objects/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/art/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/art/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -6,5 +6,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('art',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/castro/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/castro/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('castro', parent_package, top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/chombo/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/chombo/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('chombo',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/enzo/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/enzo/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('enzo',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/flash/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/flash/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('flash',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/gadget/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/gadget/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('gadget',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/gdf/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/gdf/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('gdf',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/maestro/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/maestro/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('maestro',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/orion/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/orion/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('orion',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/ramses/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/ramses/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -13,5 +13,5 @@
         depends=glob.glob("yt/frontends/ramses/ramses_headers/*.hh")
         )
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -5,7 +5,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('frontends',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
     config.add_subpackage("flash")


--- a/yt/frontends/stream/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/stream/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('stream',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/frontends/tiger/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/frontends/tiger/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('tiger',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/gui/opengl_widgets/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/gui/opengl_widgets/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('opengl_widgets',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/gui/reason/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/gui/reason/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -6,5 +6,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('reason',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/gui/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/gui/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -9,5 +9,5 @@
     config.add_subpackage('traited_explorer')
     config.add_subpackage('reason')
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/gui/traited_explorer/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/gui/traited_explorer/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -6,5 +6,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('traited_explorer',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -13,5 +13,5 @@
     config.add_subpackage('utilities')
     config.add_subpackage('visualization')
     config.make_config_py()
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/utilities/answer_testing/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/utilities/answer_testing/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('answer_testing',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/utilities/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/utilities/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -184,5 +184,5 @@
         define_macros = [("HAVE_XLOCALE_H", True)]
         )
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/visualization/image_panner/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/visualization/image_panner/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -8,5 +8,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('image_panner',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


--- a/yt/visualization/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/visualization/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -7,6 +7,6 @@
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])
     return config


--- a/yt/visualization/volume_rendering/setup.py	Thu Jun 23 22:28:01 2011 -0600
+++ b/yt/visualization/volume_rendering/setup.py	Mon Jul 25 08:52:28 2011 -0400
@@ -10,5 +10,5 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('volume_rendering',parent_package,top_path)
     config.make_config_py() # installs __config__.py
-    config.make_svn_version_py()
+    #config.make_svn_version_py()
     return config


http://bitbucket.org/yt_analysis/yt/changeset/12fc325ddf8d/
changeset:   12fc325ddf8d
branch:      yt
user:        MatthewTurk
date:        2011-07-25 14:53:12
summary:     Merging
affected #:  11 files (11.9 KB)

--- a/yt/analysis_modules/halo_profiler/halo_filters.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py	Mon Jul 25 08:53:12 2011 -0400
@@ -33,30 +33,51 @@
                  virial_filters=[['TotalMassMsun', '>=','1e14']],
                  virial_quantities=['TotalMassMsun', 'RadiusMpc'],
                  virial_index=None, use_log=False):
-    """
-    Filter halos by virial quantities.
+    r"""Filter halos by virial quantities.
+    
     Return values are a True or False whether the halo passed the filter, 
     along with a dictionary of virial quantities for the fields specified in 
     the virial_quantities keyword.  Thresholds for virial quantities are 
-    given with the virial_filters keyword in the following way: 
+    given with the virial_filters keyword in the following way:
     [field, condition, value].
-    :param: overdensity_field (str): the field used for interpolation with the 
-    specified critical value given with 'virial_overdensity'.  
-    Default: 'ActualOverdensity'.
-    :param: virial_overdensity (flt): the value used for interpolation.  
-    Default: 200.[['TotalMassMsun', '>=','1e14']]
-    :param: must_be_virialized (bool): if no values in the profile are above the 
-    value of virial_overdensity, the halo does not pass the filter.  
-    Default: True.
-    :param: virial_filters (list): conditional filters based on virial quantities 
-    given in the following way: [field, condition, value].  
-    Default: [['TotalMassMsun', '>=','1e14']].
-    :param: virial_quantities (list): fields for which interpolated values should 
-    be calculated and returned.  Default: ['TotalMassMsun', 'RadiusMpc'].
-    :param: virial_index (list): if given as a list, the index of the radial profile 
-    which is used for interpolation is placed here.  Default: None.
-    :param: use_log (bool): if True, interpolation is done in log space.  
-    Default: False.
+    
+    This is typically used as part of a call to `add_halo_filter`.
+    
+    Parameters
+    ----------
+    overdensity_field : string
+        The field used for interpolation with the 
+        specified critical value given with 'virial_overdensity'.  
+        Default='ActualOverdensity'.
+    virial_overdensity : float
+        The value used to determine the outer radius of the virialized halo.
+        Default: 200.
+    must_be_virialized : bool
+        If no values in the profile are above the 
+        value of virial_overdensity, the halo does not pass the filter.  
+        Default: True.
+    virial_filters : array_like
+        Conditional filters based on virial quantities 
+        given in the following way: [field, condition, value].  
+        Default: [['TotalMassMsun', '>=','1e14']].
+    virial_quantities : array_like
+        Fields for which interpolated values should 
+        be calculated and returned.  Default: ['TotalMassMsun', 'RadiusMpc'].
+    virial_index : array_like
+        If given as a list, the index of the radial profile 
+        which is used for interpolation is placed here.  Default: None.
+    use_log : bool
+        If True, interpolation is done in log space.  
+        Default: False.
+    
+    Examples
+    --------
+    >>> hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True,
+                   overdensity_field='ActualOverdensity',
+                   virial_overdensity=200,
+                   virial_filters=[['TotalMassMsun','>=','1e14']],
+                   virial_quantities=['TotalMassMsun','RadiusMpc'])
+    
     """
 
     fields = deepcopy(virial_quantities)


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Jul 25 08:53:12 2011 -0400
@@ -68,53 +68,101 @@
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id','center'], 
                  use_critical_density=False):
-        """
-        Initialize a HaloProfiler object.
-        :param output_dir (str): if specified, all output will be put into this path instead of 
-               in the dataset directories.  Default: None.
-        :param halos (str): "multiple" for profiling more than one halo.  In this mode halos are read in 
-               from a list or identified with a halo finder.  In "single" mode, the one and only halo 
-               center is identified automatically as the location of the peak in the density field.  
-               Default: "multiple".
-        :param halo_list_file (str): name of file containing the list of halos.  The HaloProfiler will 
-               look for this file in the data directory.  Default: "HopAnalysis.out".
-        :param halo_list_format (str or dict): the format of the halo list file.  "yt_hop" for the format 
-               given by yt's halo finders.  "enzo_hop" for the format written by enzo_hop.  "p-groupfinder" 
-               for P-Groupfinder.  This keyword 
-               can also be given in the form of a dictionary specifying the column in which various 
-               properties can be found.  For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.  
-               Default: "yt_hop".
-        :param halo_finder_function (function): If halos is set to multiple and the file given by 
-               halo_list_file does not exit, the halo finding function specified here will be called.  
-               Default: HaloFinder (yt_hop).
-        :param halo_finder_args (tuple): args given with call to halo finder function.  Default: None.
-        :param halo_finder_kwargs (dict): kwargs given with call to halo finder function. Default: None.
-        :param recenter (str or function name): The name of a function that
-               recenters the halo.
-        :param halo_radius (float): if no halo radii are provided in the halo list file, this parameter is 
-               used to specify the radius out to which radial profiles will be made.  This keyword is also 
-               used when halos is set to single.  Default: 0.1.
-        :param radius_units (str): the units of halo_radius.  Default: "1" (code units).
-        :param n_profile_bins (int): the number of bins in the radial profiles.  Default: 50.
-        :param profile_output_dir (str): the subdirectory, inside the data directory, in which radial profile 
-               output files will be created.  The directory will be created if it does not exist.  
-               Default: "radial_profiles".
-        :param projection_output_dir (str): the subdirectory, inside the data directory, in which projection 
-               output files will be created.  The directory will be created if it does not exist.  
-               Default: "projections".
-        :param projection_width (float): the width of halo projections.  Default: 8.0.
-        :param projection_width_units (str): the units of projection_width. Default: "mpc".
-        :param project_at_level (int or "max"): the maximum refinement level to be included in projections.  
-               Default: "max" (maximum level within the dataset).
-        :param velocity_center (list): the method in which the halo bulk velocity is calculated (used for 
-               calculation of radial and tangential velocities.  Valid options are:
-     	          - ["bulk", "halo"] (Default): the velocity provided in the halo list
-                  - ["bulk", "sphere"]: the bulk velocity of the sphere centered on the halo center.
-    	          - ["max", field]: the velocity of the cell that is the location of the maximum of the field 
-                                    specified (used only when halos set to single).
-        :param filter_quantities (list): quantities from the original halo list file to be written out in the 
-               filtered list file.  Default: ['id','center'].
-        :param use_critical_density (bool): if True, the definition of overdensity for virial quantities is calculated with respect to the critical density.  If False, overdensity is with respect to mean matter density, which is lower by a factor of Omega_M.  Default: False.
+        r"""Initialize a Halo Profiler object.
+        
+        In order to run the halo profiler, the Halo Profiler object must be
+        instantiated. At the minimum, the path to a parameter file
+        must be provided as the first term.
+        
+        Parameters
+        ----------
+        
+        dataset : string, required
+            The path to the parameter file for the dataset to be analyzed.
+        output_dir : string, optional
+            If specified, all output will be put into this path instead of 
+            in the dataset directories.  Default: None.
+        halos :  {"multiple", "single"}, optional
+            For profiling more than one halo.  In this mode halos are read in 
+            from a list or identified with a halo finder.  In "single" mode,
+            the one and only halo 
+            center is identified automatically as the location of the peak
+            in the density field.  
+            Default: "multiple".
+        halo_list_file : string, optional
+            The name of a file containing the list of halos.  The HaloProfiler
+            will  look for this file in the data directory.
+            Default: "HopAnalysis.out".
+        halo_list_format : {string, dict}
+            The format of the halo list file.  "yt_hop" for the format 
+            given by yt's halo finders.  "enzo_hop" for the format written
+            by enzo_hop. "p-groupfinder"  for P-Groupfinder.  This keyword 
+            can also be given in the form of a dictionary specifying the
+            column in which various properties can be found.
+            For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
+            Default: "yt_hop".
+        halo_finder_function : function
+            If halos is set to multiple and the file given by 
+            halo_list_file does not exit, the halo finding function
+            specified here will be called.  
+            Default: HaloFinder (yt_hop).
+        halo_finder_args : tuple
+            Args given with call to halo finder function.  Default: None.
+        halo_finder_kwargs : dict
+            kwargs given with call to halo finder function. Default: None.
+        recenter : {string, function
+            The name of a function that recenters the halo for analysis.
+            Default: None.
+        halo_radius : float
+            If no halo radii are provided in the halo list file, this
+            parameter is used to specify the radius out to which radial
+            profiles will be made.  This keyword is also 
+            used when halos is set to single.  Default: 0.1.
+        radius_units : string
+            The units of halo_radius.  Default: "1" (code units).
+        n_profile_bins : int
+            The number of bins in the radial profiles.  Default: 50.
+        profile_output_dir : str
+            The subdirectory, inside the data directory, in which radial profile 
+            output files will be created.
+            The directory will be created if it does not exist.  
+            Default: "radial_profiles".
+        projection_output_dir : str
+            The subdirectory, inside the data directory, in which projection 
+            output files will be created.
+            The directory will be created if it does not exist.  
+            Default: "projections".
+        projection_width : float
+            The width of halo projections.  Default: 8.0.
+        projection_width_units : string
+            The units of projection_width. Default: "mpc".
+        project_at_level : {"max", int}
+            The maximum refinement level to be included in projections.  
+            Default: "max" (maximum level within the dataset).
+        velocity_center  : array_like
+            The method in which the halo bulk velocity is calculated (used for 
+            calculation of radial and tangential velocities.  Valid options are:
+     	        * ["bulk", "halo"] (Default): the velocity provided in
+     	          the halo list
+                * ["bulk", "sphere"]: the bulk velocity of the sphere
+                  centered on the halo center.
+    	        * ["max", field]: the velocity of the cell that is the
+    	          location of the maximum of the field 
+                  specified (used only when halos set to single).
+        filter_quantities : array_like
+            Quantities from the original halo list file to be written out in the 
+            filtered list file.  Default: ['id','center'].
+        use_critical_density : bool
+            If True, the definition of overdensity for virial quantities
+            is calculated with respect to the critical density.
+            If False, overdensity is with respect to mean matter density,
+            which is lower by a factor of Omega_M.  Default: False.
+        
+        Examples
+        --------
+        >>> import yt.analysis_modules.halo_profiler.api as HP
+        >>> hp = HP.halo_profiler("DD0242/DD0242")
+        
         """
 
         self.dataset = dataset
@@ -239,24 +287,126 @@
             return None
 
     def add_halo_filter(self, function, *args, **kwargs):
-        "Add a halo filter to the filter list."
+        r"""Filters can be added to create a refined list of halos based on
+        their profiles or to avoid profiling halos altogether based on
+        information given in the halo list file.
+        
+        It is often the case that one is looking to identify halos with a
+        specific set of properties. This can be accomplished through the
+        creation of filter functions. A filter function can take as many args
+        and kwargs as you like, as long as the first argument is a profile
+        object, or at least a dictionary which contains the profile arrays
+        for each field. Filter functions must return a list of two things.
+        The first is a True or False indicating whether the halo passed the
+        filter. The second is a dictionary containing quantities calculated 
+        for that halo that will be written to a file if the halo passes the
+        filter. A sample filter function based on virial quantities can be
+        found in yt/analysis_modules/halo_profiler/halo_filters.py.
+        
+        Parameters
+        ----------
+        function : function
+            The name of a halo filter function.
+        args : values
+            Arguments passed to the halo filter function.
+        kwargs : values
+            Arguments passed to the halo filter function.
+        
+        Examples
+        -------
+        >>> hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True,
+                overdensity_field='ActualOverdensity',
+                virial_overdensity=200,
+                virial_filters=[['TotalMassMsun','>=','1e14']],
+                virial_quantities=['TotalMassMsun','RadiusMpc'])
+        
+        """
 
         self._halo_filters.append({'function':function, 'args':args, 'kwargs':kwargs})
 
     def add_profile(self, field, weight_field=None, accumulation=False):
-        "Add a field for profiling."
+        r"""Add a field for profiling.
+        
+        Once the halo profiler object has been instantiated,
+        fields can be added for profiling using this function. This function
+        may be called multiple times, once per field to be added.
+        
+        Parameters
+        ----------
+        field : string
+            The name of the field.
+        weight_field : {None, string}, optional
+            The field that will be used to weight the field `field` when
+            the radial binning is done. Default: None.
+        accumulation : bool
+            Whether or not the `field` values should be summed up with the
+            radius of the profile.
+        
+        Examples
+        >>> hp.add_profile('CellVolume', weight_field=None, accumulation=True)
+        >>> hp.add_profile('TotalMassMsun', weight_field=None, accumulation=True)
+        >>> hp.add_profile('Density', weight_field=None, accumulation=False)
+        >>> hp.add_profile('Temperature', weight_field='CellMassMsun', accumulation=False)
+            
+        """
 
         self.profile_fields.append({'field':field, 'weight_field':weight_field, 'accumulation':accumulation})
 
     def add_projection(self, field, weight_field=None, cmap='algae'):
-        "Add a field for projection."
+        r"""Make a projection of the specified field.
+        
+        For the given field, a projection will be produced that can be saved
+        to HDF5 or image format. See `make_projections`.
+        
+        Parameters
+        ----------
+        field : string
+            The name of the field.
+        weight_field : string
+            The field that will be used to weight the field `field` when
+            the projection is done. Default: None.
+        cmap : string
+            The name of the matplotlib color map that will be used if an
+            image is made from the projection. Default="algae".
+        
+        Examples
+        --------
+        >>> hp.add_projection('Density', weight_field=None)
+        >>> hp.add_projection('Temperature', weight_field='Density')
+        >>> hp.add_projection('Metallicity', weight_field='Density')
+
+        """
 
         self.projection_fields.append({'field':field, 'weight_field':weight_field, 
                                        'cmap': cmap})
 
     @parallel_blocking_call
     def make_profiles(self, filename=None, prefilters=None, **kwargs):
-        "Make radial profiles for all halos on the list."
+        r"""Make radial profiles for all halos in the list.
+        
+        After all the calls to `add_profile`, this will trigger the actual
+        calculations and output the profiles to disk.
+        
+        Paramters
+        ---------
+        filename : string
+            If set, a file will be written with all of the filtered halos
+            and the quantities returned by the filter functions.
+            Default=None.
+        prefilters : array_like
+            A single dataset can contain thousands or tens of thousands of
+            halos. Significant time can be saved by not profiling halos
+            that are certain to not pass any filter functions in place.
+            Simple filters based on quantities provided in the initial
+            halo list can be used to filter out unwanted halos using this
+            parameter.
+        
+        Examples
+        --------
+        >>> hp.make_profiles(filename="FilteredQuantities.out",
+                 prefilters=["halo['mass'] > 1e13"])
+        
+        """
 
         if len(self.all_halos) == 0:
             mylog.error("Halo list is empty, returning.")
@@ -354,9 +504,9 @@
         if filename is not None:
             self._write_filtered_halo_list(filename, **kwargs)
 
-    def _get_halo_profile(self, halo, filename, virial_filter=True, force_write=False):
-        """
-        Profile a single halo and write profile data to a file.
+    def _get_halo_profile(self, halo, filename, virial_filter=True,
+            force_write=False):
+        """Profile a single halo and write profile data to a file.
         If file already exists, read profile data from file.
         Return a dictionary of id, center, and virial quantities if virial_filter is True.
         """
@@ -455,8 +605,34 @@
         return profile
 
     @parallel_blocking_call
-    def make_projections(self, axes=[0, 1, 2], halo_list='filtered', save_images=False, save_cube=True):
-        "Make projections of all halos using specified fields."
+    def make_projections(self, axes=[0, 1, 2], halo_list='filtered',
+            save_images=False, save_cube=True):
+        r"""Make projections of all halos using specified fields.
+        
+        After adding fields using `add_projection`, this starts the actual
+        calculations and saves the output to disk.
+        
+        Parameters
+        ---------
+        axes = array_like
+            A list of the axes to project along, using the usual 0,1,2
+            convention. Default=[0,1,2]
+        halo_list : {'filtered', 'all'}
+            Which set of halos to make profiles of, either ones passed by the
+            halo filters (if enabled/added), or all halos.
+            Default='filtered'.
+        save_images : bool
+            Whether or not to save images of the projections. Default=False.
+        save_cube : bool
+            Whether or not to save the HDF5 files of the halo projections.
+            Default=True.
+        
+        Examples
+        --------
+        >>> hp.make_projections(axes=[0, 1, 2], save_cube=True,
+            save_images=True, halo_list="filtered")
+        
+        """
 
         # Get list of halos for projecting.
         if halo_list == 'filtered':


--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py	Mon Jul 25 08:53:12 2011 -0400
@@ -39,36 +39,62 @@
     load
 
 class EnzoSimulation(object):
-    """
-    Super class for performing the same operation over all data dumps in 
+    r"""Super class for performing the same operation over all data dumps in 
     a simulation from one redshift to another.
     """
     def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
                  links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
                  get_data_by_force=False):
-        """
-        Initialize an EnzoSimulation object.
-        :param initial_time (float): the initial time in code units for the dataset list.  Default: None.
-        :param final_time (float): the final time in code units for the dataset list.  Default: None.
-        :param initial_redshift (float): the initial (highest) redshift for the dataset list.  Only for 
-               cosmological simulations.  Default: None.
-        :param final_redshift (float): the final (lowest) redshift for the dataset list.  Only for cosmological 
-               simulations.  Default: None.
-        :param links (bool): if True, each entry in the dataset list will contain entries, previous and next, that 
-               point to the previous and next entries on the dataset list.  Default: False.
-        :param enzo_parameters (dict): a dictionary specify additional parameters to be retrieved from the 
-               parameter file.  The format should be the name of the parameter as the key and the variable type as 
-               the value.  For example, {'CosmologyComovingBoxSize':float}.  All parameter values will be stored in 
-               the dictionary attribute, enzoParameters.  Default: None.
-        :param get_time_outputs (bool): if False, the time datasets, specified in Enzo with the dtDataDump, will not 
-               be added to the dataset list.  Default: True.
-        :param get_redshift_outputs (bool): if False, the redshift datasets will not be added to the dataset list.  Default: True.
-        :param get_available_data (bool): if True, only datasets that are found to exist at the file path are added 
-               to the list.  Default: False.
-        :param get_data_by_force (bool): if True, time data dumps are not calculated using dtDataDump.  Instead, the 
-               the working directory is searched for directories that match the datadumpname keyword.  Each dataset 
-               is loaded up to get the time and redshift manually.  This is useful with collapse simulations that use 
-               OutputFirstTimeAtLevel or with simulations that make outputs based on cycle numbers.  Default: False.
+        r"""Initialize an Enzo Simulation object.
+        
+        initial_time : float
+            The initial time in code units for the dataset list.  Default: None.
+        final_time : float
+            The final time in code units for the dataset list.  Default: None.
+        initial_redshift : float
+            The initial (highest) redshift for the dataset list. Only for 
+            cosmological simulations.  Default: None.
+        final_redshift : float
+            The final (lowest) redshift for the dataset list.
+            Only for cosmological 
+            simulations.  Default: None.
+        links : bool
+            If True, each entry in the dataset list will contain entries,
+            previous and next, that 
+            point to the previous and next entries on the dataset list.
+            Default: False.
+        enzo_parameters : dict
+            Adictionary specify additional parameters to be retrieved from the 
+            parameter file.  The format should be the name of the parameter
+            as the key and the variable type as 
+            the value.  For example, {'CosmologyComovingBoxSize':float}.
+            All parameter values will be stored in 
+            the dictionary attribute, enzoParameters.  Default: None.
+        get_time_outputs : bool
+            If False, the time datasets, specified in Enzo with the dtDataDump,
+            will not be added to the dataset list.  Default: True.
+        get_redshift_outputs : bool
+            If False, the redshift datasets will not be added to the
+            dataset list.  Default: True.
+        get_available_data : bool
+            If True, only datasets that are found to exist at the
+            file path are added 
+            to the list.  Default: False.
+        get_data_by_force : bool
+            If True, time data dumps are not calculated using dtDataDump.
+            Instead, the 
+            the working directory is searched for directories that match the
+            datadumpname keyword.  Each dataset 
+            is loaded up to get the time and redshift manually.
+            This is useful with collapse simulations that use 
+            OutputFirstTimeAtLevel or with simulations that make outputs based
+            on cycle numbers.  Default: False.
+        
+        Examples
+        --------
+        >>> import yt.analysis_modules.simulation_handler.api as ES
+        >>> es = ES.EnzoSimulation("my_simulation.par")
+
         """
         self.enzo_parameter_file = enzo_parameter_file
         self.enzoParameters = {}
@@ -331,16 +357,38 @@
 
     def imagine_minimal_splice(self, initial_redshift, final_redshift, decimals=3, filename=None, 
                                redshift_output_string='CosmologyOutputRedshift', start_index=0):
-        """
-        Create imaginary list of redshift outputs to maximally span a redshift interval.
-        :param decimals (int): The decimal place to which the output redshift will be rounded.  
-               If the decimal place in question is nonzero, the redshift will be rounded up to 
-               ensure continuity of the splice.  Default: 3.
-        :param filename (str): If provided, a file will be written with the redshift outputs in 
-               the form in which they should be given in the enzo parameter file.  Default: None.
-        :param redshift_output_string (str): The parameter accompanying the redshift outputs in the 
-               enzo parameter file.  Default: "CosmologyOutputRedshift".
-        :param start_index (int): The index of the first redshift output.  Default: 0.
+        r"""Create imaginary list of redshift outputs to maximally
+        span a redshift interval.
+        
+        If you want to run a cosmological simulation that will have just
+        enough data outputs to create a cosmology splice,
+        this method will calculate a list of redshifts outputs that will
+        minimally connect a redshift interval.
+        
+        Parameters
+        ----------
+        decimals : int
+            The decimal place to which the output redshift will be rounded.  
+            If the decimal place in question is nonzero, the redshift will
+            be rounded up to 
+            ensure continuity of the splice.  Default: 3.
+        filename : string
+            If provided, a file will be written with the redshift outputs in 
+            the form in which they should be given in the enzo parameter file.
+            Default: None.
+        redshift_output_string : string
+            The parameter accompanying the redshift outputs in the 
+            enzo parameter file.  Default: "CosmologyOutputRedshift".
+        start_index : int
+            The index of the first redshift output.  Default: 0.
+        
+        Examples
+        --------
+        >>> initial_redshift = 0.4
+        >>> final_redshift = 0.0
+        >>> outputs = es.imagine_minimal_splice(initial_redshift, final_redshift,
+            filename='outputs.out')
+
         """
 
         z = initial_redshift
@@ -370,17 +418,39 @@
         return outputs
 
     def create_cosmology_splice(self, minimal=True, deltaz_min=0.0, initial_redshift=None, final_redshift=None):
-        """
-        Create list of datasets to be used for LightCones or LightRays.
-        :param minimal (bool): if True, the minimum number of datasets is used to connect the initial and final 
-               redshift.  If false, the list will contain as many entries as possible within the redshift 
-               interval.  Default: True.
-        :param deltaz_min (float): specifies the minimum delta z between consecutive datasets in the returned 
-               list.  Default: 0.0.
-        :param initial_redshift (float): the initial (highest) redshift in the cosmology splice list.  If none 
-               given, the highest redshift dataset present will be used.  Default: None.
-        :param final_redshift (float): the final (lowest) redshift in the cosmology splice list.  If none given, 
-               the lowest redshift dataset present will be used.  Default: None.
+        r"""Create list of datasets to be used for `LightCones` or `LightRays`.
+        
+        For cosmological simulations, the physical width of the simulation
+        box corresponds to some \Delta z, which varies with redshift.
+        Using this logic, one can stitch together a series of datasets to
+        create a continuous volume or length element from one redshift to
+        another. This method will return such a list
+        
+        Parameters
+        ----------
+        minimal : bool
+            If True, the minimum number of datasets is used to connect the
+            initial and final redshift.  If false, the list will contain as
+            many entries as possible within the redshift 
+            interval.  Default: True.
+        deltaz_min : float
+            Specifies the minimum delta z between consecutive datasets
+            in the returned 
+            list.  Default: 0.0.
+        initial_redshift : float
+            The initial (highest) redshift in the cosmology splice list. If none 
+            given, the highest redshift dataset present will be used.
+            Default: None.
+        final_redshift : float
+            The final (lowest) redshift in the cosmology splice list.
+            If none given, 
+            the lowest redshift dataset present will be used.  Default: None.
+        
+        Examples
+        --------
+        >>> cosmo = es.create_cosmology_splice(minimal=True, deltaz_min=0.0,
+            initial_redshift=1.0, final_redshift=0.0)
+        
         """
 
         if initial_redshift is None: initial_redshift = self.initial_redshift
@@ -452,10 +522,20 @@
         return cosmology_splice
 
     def get_data_by_redshift(self, redshifts, tolerance=None):
-        """
-        : param redshifts: a list of redshifts.
-        : tolerance: if not None, do not return a dataset unless the redshift is within the tolerance value.
-        Get datasets for a list of redshifts.
+        r"""Get datasets at or near to given redshifts.
+        
+        Parameters
+        ----------
+        redshifts: array_like
+            A list of redshifts, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the redshift is
+            within the tolerance value. Default = None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_data_by_redshift([0, 1, 2], tolerance=0.1)
+        
         """
 
         redshifts = ensure_list(redshifts)
@@ -472,10 +552,20 @@
         return my_datasets
 
     def get_data_by_time(self, times, tolerance=None):
-        """
-        : param redshifts: a list of times.
-        : tolerance: if not None, do not return a dataset unless the redshift is within the tolerance value.
-        Get datasets for a list of times.
+        r"""Get datasets at or near to given times.
+        
+        Parameters
+        ----------
+        times: array_like
+            A list of times, given in code units as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the time is
+            within the tolerance value. Default = None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_data_by_time([600, 500, 400], tolerance=10.)
+        
         """
 
         times = ensure_list(times)
@@ -486,7 +576,7 @@
                     and self.allOutputs[0] not in my_datasets:
                 my_datasets.append(self.allOutputs[0])
             else:
-                mylog.error("No dataset added for z = %f." % my_time)
+                mylog.error("No dataset added for time = %f." % my_time)
 
         self.allOutputs.sort(key=lambda obj: obj['time'])
         return my_datasets


--- a/yt/data_objects/derived_quantities.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Mon Jul 25 08:53:12 2011 -0400
@@ -663,7 +663,7 @@
     totals = []
     for field in fields:
         if data[field].size < 1:
-            totals.append(0)
+            totals.append(0.0)
             continue
         totals.append(data[field].sum())
     return len(fields), totals


--- a/yt/data_objects/universal_fields.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/data_objects/universal_fields.py	Mon Jul 25 08:53:12 2011 -0400
@@ -525,12 +525,14 @@
     ds = div_fac * data['dx'].flat[0]
     f  = data["x-velocity"][sl_right,1:-1,1:-1]/ds
     f -= data["x-velocity"][sl_left ,1:-1,1:-1]/ds
-    ds = div_fac * data['dy'].flat[0]
-    f += data["y-velocity"][1:-1,sl_right,1:-1]/ds
-    f -= data["y-velocity"][1:-1,sl_left ,1:-1]/ds
-    ds = div_fac * data['dz'].flat[0]
-    f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
-    f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
+    if data.pf.dimensionality > 1:
+        ds = div_fac * data['dy'].flat[0]
+        f += data["y-velocity"][1:-1,sl_right,1:-1]/ds
+        f -= data["y-velocity"][1:-1,sl_left ,1:-1]/ds
+    if data.pf.dimensionality > 2:
+        ds = div_fac * data['dz'].flat[0]
+        f += data["z-velocity"][1:-1,1:-1,sl_right]/ds
+        f -= data["z-velocity"][1:-1,1:-1,sl_left ]/ds
     new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
     new_field[1:-1,1:-1,1:-1] = f
     return new_field
@@ -624,17 +626,17 @@
 def _AngularMomentumX(field, data):
     return data["CellMass"] * data["SpecificAngularMomentumX"]
 add_field("AngularMomentumX", function=_AngularMomentumX,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=True,
+         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
          validators=[ValidateParameter('center')])
 def _AngularMomentumY(field, data):
     return data["CellMass"] * data["SpecificAngularMomentumY"]
 add_field("AngularMomentumY", function=_AngularMomentumY,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=True,
+         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
          validators=[ValidateParameter('center')])
 def _AngularMomentumZ(field, data):
     return data["CellMass"] * data["SpecificAngularMomentumZ"]
 add_field("AngularMomentumZ", function=_AngularMomentumZ,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=True,
+         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=False,
          validators=[ValidateParameter('center')])
 
 def _ParticleSpecificAngularMomentum(field, data):
@@ -930,3 +932,47 @@
                         ValidateDataField("By"),
                         ValidateDataField("Bz")])
 
+def _VorticitySquared(field, data):
+    mylog.debug("Generating vorticity on %s", data)
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["x-velocity"].shape)
+    dvzdy = (data["z-velocity"][1:-1,sl_right,1:-1] -
+             data["z-velocity"][1:-1,sl_left,1:-1]) \
+             / (div_fac*data["dy"].flat[0])
+    dvydz = (data["y-velocity"][1:-1,1:-1,sl_right] -
+             data["y-velocity"][1:-1,1:-1,sl_left]) \
+             / (div_fac*data["dz"].flat[0])
+    new_field[1:-1,1:-1,1:-1] += (dvzdy - dvydz)**2.0
+    del dvzdy, dvydz
+    dvxdz = (data["x-velocity"][1:-1,1:-1,sl_right] -
+             data["x-velocity"][1:-1,1:-1,sl_left]) \
+             / (div_fac*data["dz"].flat[0])
+    dvzdx = (data["z-velocity"][sl_right,1:-1,1:-1] -
+             data["z-velocity"][sl_left,1:-1,1:-1]) \
+             / (div_fac*data["dx"].flat[0])
+    new_field[1:-1,1:-1,1:-1] += (dvxdz - dvzdx)**2.0
+    del dvxdz, dvzdx
+    dvydx = (data["y-velocity"][sl_right,1:-1,1:-1] -
+             data["y-velocity"][sl_left,1:-1,1:-1]) \
+             / (div_fac*data["dx"].flat[0])
+    dvxdy = (data["x-velocity"][1:-1,sl_right,1:-1] -
+             data["x-velocity"][1:-1,sl_left,1:-1]) \
+             / (div_fac*data["dy"].flat[0])
+    new_field[1:-1,1:-1,1:-1] += (dvydx - dvxdy)**2.0
+    del dvydx, dvxdy
+    new_field = na.abs(new_field)
+    return new_field
+def _convertVorticitySquared(data):
+    return data.convert("cm")**-2.0
+add_field("VorticitySquared", function=_VorticitySquared,
+          validators=[ValidateSpatial(1)],
+          units=r"\rm{s}^{-2}",
+          convert_function=_convertVorticitySquared)


--- a/yt/gui/reason/bottle_mods.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/gui/reason/bottle_mods.py	Mon Jul 25 08:53:12 2011 -0400
@@ -38,9 +38,6 @@
 route_watchers = []
 payloads = []
 
-orig_stdout = sys.stdout
-orig_stderr = sys.stderr
-
 def preroute(future_route, *args, **kwargs):
     def router(func):
         route_functions[future_route] = (args, kwargs, func)
@@ -153,8 +150,7 @@
             continue
             w._route_prefix = token
     repl.activate()
-    while not repl.execution_thread.queue.empty():
-        time.sleep(1)
+    repl.execution_thread.wait()
     print
     print
     print "============================================================================="


--- a/yt/gui/reason/extdirect_repl.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/gui/reason/extdirect_repl.py	Mon Jul 25 08:53:12 2011 -0400
@@ -108,8 +108,8 @@
         while 1:
             #print "Checking for a queue ..."
             try:
-                task = self.queue.get(True, 10)
-            except (Queue.Full, Queue.Empty):
+                task = self.queue.get(True, 1)
+            except Queue.Empty:
                 if self.repl.stopped: return
                 continue
             #print "Received the task", task
@@ -122,9 +122,12 @@
                 new_code = self.repl._add_widget(
                     task['name'], task['widget_data_name'])
                 #print "Got this command:", new_code
-                self.repl.execute(new_code, hide=True)
+                self.execute_one(new_code, hide=True)
                 #print "Executed!"
 
+    def wait(self):
+        self.queue.join()
+
     def execute_one(self, code, hide):
         self.repl.executed_cell_texts.append(code)
 
@@ -135,13 +138,13 @@
             print "====================                ===================="
             print result
             print "========================================================"
-        if hide: return
-        self.repl.payload_handler.add_payload(
-            {'type': 'cell_results',
-             'output': result,
-             'input': highlighter(code),
-             'raw_input': code},
-            )
+        if not hide:
+            self.repl.payload_handler.add_payload(
+                {'type': 'cell_results',
+                 'output': result,
+                 'input': highlighter(code),
+                 'raw_input': code},
+                )
 
 def deliver_image(im):
     if hasattr(im, 'read'):


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Mon Jul 25 08:53:12 2011 -0400
@@ -813,8 +813,8 @@
         current_node.r_corner = r_corner
         # current_node.owner = my_rank
         current_node.id = 0
-        par_tree_depth = long(na.log2(nprocs))
-
+        par_tree_depth = int(na.log2(nprocs))
+        anprocs = 2**par_tree_depth
         while current_node is not None:
             # If we don't have any grids, that means we are revisiting
             # a dividing node, and there is nothing to be done.
@@ -825,8 +825,8 @@
 
             # This is where all the domain decomposition occurs.  
             if ((current_node.id + 1)>>par_tree_depth) == 1:
-                # There are nprocs nodes that meet this criteria
-                if (current_node.id+1-nprocs) is my_rank:
+                # There are anprocs nodes that meet this criteria
+                if (current_node.id+1-anprocs) is my_rank:
                     # I own this shared node
                     self.my_l_corner = current_node.l_corner
                     self.my_r_corner = current_node.r_corner
@@ -973,11 +973,11 @@
         if image is not None:
             self.image = image
         rounds = int(na.log2(nprocs))
-
+        anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(nprocs+my_rank)
+        path = na.binary_repr(anprocs+my_rank)
         for i in range(rounds):
             my_node.left_child.owner = my_node.owner
             my_node.right_child.owner = my_node.owner + 2**(rounds-(i+1))


--- a/yt/utilities/data_point_utilities.c	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/utilities/data_point_utilities.c	Mon Jul 25 08:53:12 2011 -0400
@@ -1,5 +1,5 @@
 /************************************************************************
-* Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+* Copyright (C) 2007-2009 Matthew Turk.  All Rights Reserved.
 *
 * This file is part of yt.
 *
@@ -880,7 +880,7 @@
     npy_int64 gxs, gys, gzs, gxe, gye, gze;
     npy_int64 cxs, cys, czs, cxe, cye, cze;
     npy_int64 ixs, iys, izs, ixe, iye, ize;
-    int gxi, gyi, gzi, cxi, cyi, czi;
+    npy_int64 gxi, gyi, gzi, cxi, cyi, czi;
     npy_int64 cdx, cdy, cdz;
     npy_int64 dw[3];
     int i;
@@ -1014,17 +1014,17 @@
         ci = (cxi % dw[0]);
         ci = (ci < 0) ? ci + dw[0] : ci;
         if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
-        gxi = ((int) (ci / refratio)) - gxs;
+        gxi = floor(ci / refratio) - gxs;
         for(cyi=cys;cyi<=cye;cyi++) {
             cj = cyi % dw[1];
             cj = (cj < 0) ? cj + dw[1] : cj;
             if ( cj < gys*refratio || cj >= gye*refratio) continue;
-            gyi = ((int) (cj / refratio)) - gys;
+            gyi = floor(cj / refratio) - gys;
             for(czi=czs;czi<=cze;czi++) {
                 ck = czi % dw[2];
                 ck = (ck < 0) ? ck + dw[2] : ck;
                 if ( ck < gzs*refratio || ck >= gze*refratio) continue;
-                gzi = ((int) (ck / refratio)) - gzs;
+                gzi = floor(ck / refratio) - gzs;
                     if ((ll) || (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)) 
                 {
                 for(n=0;n<n_fields;n++){
@@ -1214,75 +1214,43 @@
     cye = (cys + cdy - 1);
     cze = (czs + cdz - 1);
 
+    /* It turns out that C89 doesn't define a mechanism for choosing the sign
+       of the remainder.
+    */
     int x_loc, y_loc; // For access into the buffer
-
-    /* We check here if the domain is important or not.
-       If it's not, then, well, we get to use the fast version. */
-    if (dw[0] == dw[1] == dw[2] == 0) {
-      for(gxi=gxs,cxi=gxs*refratio;gxi<gxe;gxi++,cxi+=refratio) {
-        for(gyi=gys,cyi=gys*refratio;gyi<gye;gyi++,cyi+=refratio) {
-          for(gzi=gzs,czi=gzs*refratio;gzi<gze;gzi++,czi+=refratio) {
-            if ((refratio!=1) &&
-                (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi)==0)) continue;
-            switch (axis) {
-              case 0: x_loc = cyi-cys; y_loc = czi-czs; break;
-              case 1: x_loc = cxi-cxs; y_loc = czi-czs; break;
-              case 2: x_loc = cxi-cys; y_loc = cyi-cys; break;
-            }
-            //fprintf(stderr, "%d %d %d %d %d\n", x_loc, y_loc, gxi, gyi, gzi);
-            for(ri=0;ri<refratio;ri++){
-              for(rj=0;rj<refratio;rj++){
-                for(n=0;n<n_fields;n++){
-                  for(n=0;n<n_fields;n++){
-                    *(npy_float64*) PyArray_GETPTR2(c_data[n], x_loc+ri, y_loc+rj)
-                      +=  *(npy_float64*) PyArray_GETPTR3(g_data[n],
-                          gxi-gxs, gyi-gys, gzi-gzs) * dls[n];
-                  }
-                }
-              }
-            }
-            total+=1;
-          }
-        }
-      }
-    } else {
-      /* Gotta go the slow route. */
-      for(cxi=gxs*refratio;cxi<=cxe;cxi++) {
-        /* It turns out that C89 doesn't define a mechanism for choosing the sign
-           of the remainder.
-         */
+    for(cxi=cxs;cxi<=cxe;cxi++) {
         ci = (cxi % dw[0]);
         ci = (ci < 0) ? ci + dw[0] : ci;
-        if ( ci >= gxe*refratio) break;
+        if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
         gxi = floor(ci / refratio) - gxs;
-        for(cyi=gys*refratio;cyi<=cye;cyi++) {
-          cj = cyi % dw[1];
-          cj = (cj < 0) ? cj + dw[1] : cj;
-          if ( cj >= gye*refratio) break;
-          gyi = floor(cj / refratio) - gys;
-          for(czi=gzs*refratio;czi<=cze;czi++) {
-            ck = czi % dw[2];
-            ck = (ck < 0) ? ck + dw[2] : ck;
-            if ( ck >= gze*refratio) break;
-            gzi = floor(ck / refratio) - gzs;
-            if (refratio == 1 || *(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)
-            {
-              switch (axis) {
-                case 0: x_loc = cyi-cys; y_loc = czi-czs; break;
-                case 1: x_loc = cxi-cxs; y_loc = czi-czs; break;
-                case 2: x_loc = cxi-cys; y_loc = cyi-cys; break;
-              }
-              for(n=0;n<n_fields;n++){
-                *(npy_float64*) PyArray_GETPTR2(c_data[n], x_loc, y_loc)
-                  +=  *(npy_float64*) PyArray_GETPTR3(g_data[n], gxi, gyi, gzi) 
-                  * dls[n] / refratio;
-              }
-              total += 1;
+        for(cyi=cys;cyi<=cye;cyi++) {
+            cj = cyi % dw[1];
+            cj = (cj < 0) ? cj + dw[1] : cj;
+            if ( cj < gys*refratio || cj >= gye*refratio) continue;
+            gyi = floor(cj / refratio) - gys;
+            for(czi=czs;czi<=cze;czi++) {
+                ck = czi % dw[2];
+                ck = (ck < 0) ? ck + dw[2] : ck;
+                if ( ck < gzs*refratio || ck >= gze*refratio) continue;
+                gzi = floor(ck / refratio) - gzs;
+                    if (refratio == 1 || *(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)
+                {
+                switch (axis) {
+                  case 0: x_loc = cyi-cys; y_loc = czi-czs; break;
+                  case 1: x_loc = cxi-cxs; y_loc = czi-czs; break;
+                  case 2: x_loc = cxi-cys; y_loc = cyi-cys; break;
+                }
+                for(n=0;n<n_fields;n++){
+                    *(npy_float64*) PyArray_GETPTR2(c_data[n], x_loc, y_loc)
+                    +=  *(npy_float64*) PyArray_GETPTR3(g_data[n], gxi, gyi, gzi) 
+                        * dls[n] / refratio;
+                }
+                total += 1;
+                }
             }
-          }
         }
-      }
     }
+
     Py_DECREF(g_start);
     Py_DECREF(c_start);
     Py_DECREF(g_dims);


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Mon Jul 25 08:53:12 2011 -0400
@@ -1067,7 +1067,7 @@
                 ncols, size = data.shape
         ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
         if size == 0:
-            data = na.empty((ncols,0), dtype='float64') # This only works for
+            data = na.zeros((ncols,0), dtype='float64') # This only works for
         size = data.shape[-1]
         sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
         outsize = na.array(size, dtype='int64')


--- a/yt/visualization/fixed_resolution.py	Mon Jul 25 08:52:28 2011 -0400
+++ b/yt/visualization/fixed_resolution.py	Mon Jul 25 08:53:12 2011 -0400
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 import _MPL
+import numpy as na
 
 class FixedResolutionBuffer(object):
     def __init__(self, data_source, bounds, buff_size, antialias = True,
@@ -96,15 +97,16 @@
         self.periodic = periodic
 
         # Handle periodicity, just in case
-        DLE = self.pf.domain_left_edge
-        DRE = self.pf.domain_right_edge
-        DD = float(self.periodic)*(DRE - DLE)
-        axis = self.data_source.axis
-        xax = x_dict[axis]
-        yax = y_dict[axis]
-        self._period = (DD[xax], DD[yax])
-        self._edges = ( (DLE[xax], DRE[xax]), (DLE[yax], DRE[yax]) )
-
+        if self.data_source.axis < 3:
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            DD = float(self.periodic)*(DRE - DLE)
+            axis = self.data_source.axis
+            xax = x_dict[axis]
+            yax = y_dict[axis]
+            self._period = (DD[xax], DD[yax])
+            self._edges = ( (DLE[xax], DRE[xax]), (DLE[yax], DRE[yax]) )
+        
     def __getitem__(self, item):
         if item in self.data: return self.data[item]
         mylog.info("Making a fixed resolution buffer of %d by %d" % \

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list