[Yt-svn] yt-commit r422 - trunk/examples

britton at wrangler.dreamhost.com britton at wrangler.dreamhost.com
Thu May 1 13:57:50 PDT 2008


Author: britton
Date: Thu May  1 13:57:49 2008
New Revision: 422
URL: http://yt.spacepope.org/changeset/422

Log:
Adding clumping_factor routine and wrapper.
Adding clump_finder routine and wrapper.

1 May, 2008

Britton Smith


Added:
   trunk/examples/clumping_factor.py
   trunk/examples/clumping_factors_all_datasets.py
   trunk/examples/find_clumps_all_datasets.py
   trunk/examples/find_clumps_dataset.py

Added: trunk/examples/clumping_factor.py
==============================================================================
--- (empty file)
+++ trunk/examples/clumping_factor.py	Thu May  1 13:57:49 2008
@@ -0,0 +1,54 @@
+# Calculates clumping factors over the entire simulation box.
+# Arguments:
+#     dataset_object: EnzoStaticOutput object.
+#     fields: array containing the fields for the clumping factor.
+#     exponent: array same size as fields of exponents for each of the fields.
+#     weight: weighting field.
+#
+# Example: The standard density clumping factor (<rho^2>/<rho>^2) with 
+#          mass-weighted averages.
+#          clumping_factor(my_object,["Density"],[2],[CellMassMsun])
+#          This is the same as:
+#          clumping_factor(my_object,["Density","Density"],[1,1],[CellMassMsun])
+
+import yt.lagos as lagos
+from yt.funcs import get_pbar
+import numpy as na
+
+def clumping_factor(dataset_object,fields,exponent,weight):
+    "Calculates clumping factor of array of fields with a weight."
+
+    string_top = "<"
+    string_bottom = "("
+    for q in range(len(fields)):
+        string_top = "%s %s^%s" % (string_top,fields[q],exponent[q])
+        string_bottom = "%s <%s>^%s" % (string_bottom,fields[q],exponent[q])
+    string_top = "%s >" % string_top
+    string_bottom = "%s )" % string_bottom
+    print "Clumping factor is %s / %s, weighted by %s." % (string_top,string_bottom,weight)
+
+    product = 0.0
+    weight_value = 0.0
+    individual = [0.0 for q in fields]
+    individual = na.array(individual)
+    exponent = na.array(exponent)
+
+    pb = get_pbar("Calculating clumping factor ", len(dataset_object.h.grids)) # Progress bar
+    for grid in dataset_object.h.grids:
+        pb.update(grid.id)
+        this_product = grid[weight] * grid.child_mask
+        for q in range(len(fields)):
+            individual[q] += (grid[fields[q]] * grid[weight] * grid.child_mask).sum()
+            this_product *= (grid[fields[q]]**exponent[q])
+        product += this_product.sum()
+        weight_value += (grid[weight] * grid.child_mask).sum()
+        grid.clear_data()
+    pb.finish()
+
+    top = product * (weight_value**(exponent.sum()-1))
+    bottom = (individual**exponent).prod()
+
+    clumping_factor = top / bottom
+
+    print "Clumping factor is %e." % clumping_factor
+    return clumping_factor

Added: trunk/examples/clumping_factors_all_datasets.py
==============================================================================
--- (empty file)
+++ trunk/examples/clumping_factors_all_datasets.py	Thu May  1 13:57:49 2008
@@ -0,0 +1,56 @@
+# This is a wrapper for clumping_factor.py, allowing you to calculate 
+# clumping factor for multiple datasets.
+# In this example, we calculate the standard density clumping factor 
+# (<rho^2> / <rho>^2), with both mass-weighted and volume-weighted 
+# average densities.
+# We also get the redshift of each dump and sort the datasets by redshift 
+# for output into a file.
+
+from clumping_factor import *
+
+data_dir = ""
+
+# Time datadumps.
+time_dumps = [252]
+time_dump_dir = "DD"
+time_dump_prefix = "DD"
+
+# Redshift datadumps.
+redshift_dumps = [(q+8) for q in range(68-8)]
+redshift_dump_dir = "RD"
+redshift_dump_prefix = "RD"
+
+datasets = []
+redshifts = []
+
+c_m = {}
+c_v = {}
+
+# Prepare list of datasets.
+
+for q in time_dumps:
+    datasets.append("%s%s%04d/%s%04d" % (data_dir,time_dump_dir,q,time_dump_prefix,q))
+for q in redshift_dumps:
+    datasets.append("%s%s%04d/%s%04d" % (data_dir,redshift_dump_dir,q,redshift_dump_prefix,q))
+
+for dataset in datasets:
+    dataset_object = lagos.EnzoStaticOutput(dataset)
+
+    redshifts.append(dataset_object.parameters["CosmologyCurrentRedshift"])
+
+    print "Calculating clumping factors for %s (z = %f)." % (dataset,redshifts[-1])
+
+    # Store clumping factors in dicts with the redshifts as the keys.
+    c_m[redshifts[-1]] = clumping_factor(dataset_object,["Density"],[2],"CellMassMsun")
+    c_v[redshifts[-1]] = clumping_factor(dataset_object,["Density"],[2],"CellVolume")
+
+    del dataset_object
+
+# Sort list of redshifts.
+redshifts.sort()
+
+file = open('clumping_factors.txt','w')
+file.write('#z\tc_m\tc_v\n')
+for z in redshifts:
+    file.write("%.6e\t%.6e\t%.6e\n" % (z,c_m[z],c_v[z]))
+file.close()

Added: trunk/examples/find_clumps_all_datasets.py
==============================================================================
--- (empty file)
+++ trunk/examples/find_clumps_all_datasets.py	Thu May  1 13:57:49 2008
@@ -0,0 +1,45 @@
+# This is a wrapper for find_clump_dataset.py, allowing you to run the clump finder
+# over multiple datasets.
+# In this example, we are looking for clumps in a sphere of radius 10 pc surrounding 
+# the density maximum in each datadump.
+
+from find_clumps_dataset import *
+
+data_dir = "/Users/britton/EnzoRuns/real_4/cl-4/"
+
+# Time datadumps.
+time_dumps = [q for q in range(25)]
+time_dump_dir = "DataDir"
+time_dump_prefix = "DataDump"
+
+# Redshift datadumps.
+redshift_dumps = [q for q in range(5)]
+redshift_dump_dir = "RedshiftDir"
+redshift_dump_prefix = "RedshiftDump"
+
+field = "Density"
+radius = 10
+units = "pc"
+step = 10**(1./5.) # 5 steps each order of magnitude
+
+# Prepare list of datasets.
+datasets = []
+
+for q in time_dumps:
+    datasets.append("%s%s%04d/%s%04d" % (data_dir,time_dump_dir,q,time_dump_prefix,q))
+for q in redshift_dumps:
+    datasets.append("%s%s%04d/%s%04d" % (data_dir,redshift_dump_dir,q,redshift_dump_prefix,q))
+
+for dataset in datasets:
+    print "Finding clumps in %s." % dataset
+
+    dataset_object = lagos.EnzoStaticOutput(dataset)
+
+    # Look for clumps in a sphere surrounding the density maximum.
+    v, c = dataset_object.h.find_max(field)
+    sphere = dataset_object.h.sphere(c, radius/dataset_object[units], [field]) # cache our field
+
+    find_clumps_dataset(dataset,sphere,field,step)
+
+    del sphere
+    del dataset_object

Added: trunk/examples/find_clumps_dataset.py
==============================================================================
--- (empty file)
+++ trunk/examples/find_clumps_dataset.py	Thu May  1 13:57:49 2008
@@ -0,0 +1,31 @@
+# This is a wrapper for the clump finder in Clump.py.
+# Arguments:
+#     dataset: name of dataset, used as a prefix for the output files.
+#     data_object: object over which contouring is performed (region or sphere).
+#     field: data field over which contours are made (example: "Density" or "AveragedDensity").
+#     step: contouring stepsize.  The field minimum is multiplied by this value each round of 
+#           the clump finding.
+
+import sys, time
+from math import *
+from Clump import *
+
+def find_clumps_dataset(dataset,data_object,field,step):
+    t1 = time.time()
+
+    c_min = 10**floor(log10(data_object[field].min()))
+    c_max = 10**floor(log10(data_object[field].max())+1)
+
+    master_clump = Clump(data_object, None, field)
+    find_clumps(master_clump, c_min, c_max, step)
+
+    t2=time.time()
+    print "Took %0.3e seconds" % (t2-t1)
+
+    f = open('%s_clump_hierarchy.txt' % dataset,'w')
+    write_clump_hierarchy(master_clump,0,f)
+    f.close()
+
+    f = open('%s_clumps.txt' % dataset,'w')
+    write_clumps(master_clump,0,f)
+    f.close()



More information about the yt-svn mailing list