[yt-svn] commit/yt: 15 new changesets
Bitbucket
commits-noreply at bitbucket.org
Fri Feb 15 14:22:39 PST 2013
15 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/a8bda76f28ad/
changeset: a8bda76f28ad
branch: yt
user: chummels
date: 2013-02-14 06:07:37
summary: Adding in support for halo_merger_tree to run on FOF files run by yt's own FOF halo finder. Also adding in support to generate hdf5 files following a specific halo throughout its evolution across timesteps with halo information (mass, position, etc.) included in the output file.
affected #: 2 files
diff -r 100c55efbac229d8b970e89f157bb52bfb1f71a8 -r a8bda76f28ad5cd3d3431486a1b9bd7ce87b15df yt/analysis_modules/halo_merger_tree/api.py
--- a/yt/analysis_modules/halo_merger_tree/api.py
+++ b/yt/analysis_modules/halo_merger_tree/api.py
@@ -38,5 +38,6 @@
MergerTreeTextOutput
from .enzofof_merger_tree import \
+ HaloCatalog, \
find_halo_relationships, \
EnzoFOFMergerTree
diff -r 100c55efbac229d8b970e89f157bb52bfb1f71a8 -r a8bda76f28ad5cd3d3431486a1b9bd7ce87b15df yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -1,11 +1,14 @@
"""
A very simple, purely-serial, merger tree script that knows how to parse FOF
-catalogs output by Enzo and then compare parent/child relationships.
+catalogs, either output by Enzo or output by yt's FOF halo finder, and then
+compare parent/child relationships.
Author: Matthew J. Turk <matthewturk at gmail.com>
Affiliation: Columbia University
Author: John H. Wise <jwise at astro.princeton.edu>
Affiliation: Princeton
+Author: Cameron Hummels <chummels at gmail.com>
+Affiliation: Arizona
Homepage: http://yt-project.org/
License:
Copyright (C) 2010-2011 Matthew Turk. All Rights Reserved.
@@ -79,7 +82,7 @@
class HaloCatalog(object):
cache = None
- def __init__(self, output_id, cache = True):
+ def __init__(self, output_id, cache = True, external_FOF=True):
r"""A catalog of halos, parsed from EnzoFOF outputs.
This class will read in catalogs output by the Enzo FOF halo finder and
@@ -98,17 +101,24 @@
cache : bool
Should we store, in between accesses, the particle IDs? If set to
true, the correct particle files must exist.
+ external_FOF : bool, optional
+ Are we building a tree from outputs generated by an
+ external FOF program, or an FOF internal to yt?
"""
self.output_id = output_id
+ self.external_FOF = external_FOF
self.redshift = 0.0
self.particle_file = h5py.File("FOF/particles_%05i.h5" % output_id, "r")
- self.parse_halo_catalog()
+ if self.external_FOF:
+ self.parse_halo_catalog_external()
+ else:
+ self.parse_halo_catalog_internal()
if cache: self.cache = dict()#MaxLengthDict()
def __del__(self):
self.particle_file.close()
- def parse_halo_catalog(self):
+ def parse_halo_catalog_external(self):
hp = []
for line in open("FOF/groups_%05i.dat" % self.output_id):
if line.strip() == "": continue # empty
@@ -126,13 +136,46 @@
self.halo_kdtree = None
return hp
- def read_particle_ids(self, halo_id):
+ def parse_halo_catalog_internal(self, filename_prefix="FOF/groups"):
+ """
+ This parser works on the files output directly out of yt's internal
+ halo_finder. The parse_halo_catalog_external works with an
+ external version of FOF.
+
+ Examples
+ --------
+ pf = load("DD0000/DD0000")
+ halo_list = FOFHaloFinder(pf)
+ halo_list.write_out("FOF/groups_00000.txt")
+ halos_COM = parse_halo_catalog_internal("FOF_groups")
+ """
+ hp = []
+ for line in open("%s_%05i.txt" % (filename_prefix, self.output_id)):
+ if line.strip() == "": continue # empty
+ if line[0] == "#": continue # comment
+ x,y,z = [float(f) for f in line.split()[7:10]] # COM x,y,z
+ hp.append([x,y,z])
+ if hp != []:
+ self.halo_positions = np.array(hp)
+ self.halo_kdtree = KDTree(self.halo_positions)
+ else:
+ self.halo_positions = None
+ self.halo_kdtree = None
+ return hp
+
+ def read_particle_ids(self, halo_id):
if self.cache is not None:
if halo_id not in self.cache:
- self.cache[halo_id] = self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
+ if self.external_FOF:
+ self.cache[halo_id] = self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
+ else:
+ self.cache[halo_id] = self.particle_file["/Halo%08i/particle_index" % halo_id][:]
ids = self.cache[halo_id]
else:
- ids = self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
+ if self.external_FOF:
+ ids = self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
+ else:
+ ids = self.particle_file["/Halo%08i/particle_index" % halo_id][:]
return HaloParticleList(halo_id, self.halo_positions[halo_id,:], ids)
def calculate_parentage_fractions(self, other_catalog, radius = 0.10):
@@ -200,7 +243,8 @@
"""
def __init__(self, zrange=None, cycle_range=None, output=False,
- load_saved=False, save_filename="merger_tree.cpkl"):
+ load_saved=False, save_filename="merger_tree.cpkl",
+ external_FOF=True):
r"""
Parameters
----------
@@ -208,7 +252,7 @@
This is the redshift range (min, max) to calculate the
merger tree.
cycle_range : tuple, optional
- This is the cycle number range (min, max) to caluclate the
+ This is the cycle number range (min, max) to calculate the
merger tree. If both zrange and cycle_number given,
ignore zrange.
output : bool, optional
@@ -218,6 +262,9 @@
Flag to load previously saved parental relationships
save_filename : str, optional
Filename to save parental relationships
+ external_FOF : bool, optional
+ Are we building a tree from outputs generated by an
+ external FOF program, or an FOF internal to yt?
Examples
--------
@@ -228,6 +275,7 @@
"""
self.relationships = {}
self.redshifts = {}
+ self.external_FOF = external_FOF
self.find_outputs(zrange, cycle_range, output)
if load_saved:
self.load_tree(save_filename)
@@ -251,12 +299,15 @@
def find_outputs(self, zrange, cycle_range, output):
self.numbers = []
- files = glob.glob("FOF/groups_*.dat")
+ if self.external_FOF:
+ files = glob.glob("FOF/groups_*.dat")
+ else:
+ files = glob.glob("FOF/groups_*.txt")
# If using redshift range, load redshifts only
for f in files:
num = int(f[-9:-4])
if cycle_range == None:
- HC = HaloCatalog(num)
+ HC = HaloCatalog(num, external_FOF=self.external_FOF)
# Allow for some epsilon
diff1 = (HC.redshift - zrange[0]) / zrange[0]
diff2 = (HC.redshift - zrange[1]) / zrange[1]
@@ -272,11 +323,11 @@
# Run merger tree for all outputs, starting with the last output
for i in range(len(self.numbers)-1, 0, -1):
if output:
- output = "tree-%5.5d-%5.5d" % (self.numbers[i], self.numbers[i-1])
+ output = "FOF/tree-%5.5d-%5.5d" % (self.numbers[i], self.numbers[i-1])
else:
output = None
z0, z1, fr = find_halo_relationships(self.numbers[i], self.numbers[i-1],
- output_basename=output)
+ output_basename=output, external_FOF=self.external_FOF)
self.relationships[self.numbers[i]] = fr
self.redshifts[self.numbers[i]] = z0
# Fill in last redshift
@@ -380,6 +431,54 @@
if i > self.max_children: break
print "--> Halo %8.8d :: fraction = %g" % (c[0], c[1])
+ def save_halo_evolution(self, filename):
+ """
+ Saves as an HDF5 file the relevant details about a halo
+ over the course of its evolution (location, mass, id, etc.)
+ """
+ f = h5py.File(filename, 'a')
+ cycle_fin = self.levels.keys()[0]
+ halo_id = self.levels[cycle_fin][0].halo_id
+ halo = "halo%05d" % halo_id
+ if halo in f:
+ del f["halo%05d" % halo_id]
+ g = f.create_group("halo%05d" % halo_id)
+ size = len(self.levels.keys())
+ cycle = np.empty(size)
+ redshift = np.empty(size)
+ halo_id = np.empty(size)
+ fraction = np.empty(size)
+ mass = np.empty(size)
+ densest_point = np.empty((3,size))
+ COM = np.empty((6,size))
+ fraction[0] = 1.
+
+ for i, lvl in enumerate(sorted(self.levels, reverse=True)):
+ print "========== Cycle %5.5d (z=%f) ==========" % \
+ (lvl, self.redshifts[lvl])
+ cycle[i] = lvl
+ redshift[i] = self.redshifts[lvl]
+
+ br = self.levels[lvl][0]
+ print "Parent halo = %d" % br.halo_id
+ print "--> Most massive progenitor == Halo %d" % (br.progenitor)
+ halo_id[i] = br.halo_id
+ if i < size-1:
+ fraction[i+1] = br.children[1]
+
+ # open up FOF file to parse for details
+ filename = "FOF/groups_%05d.txt" % lvl
+ mass[i], densest_point[:,i], COM[:,i] = grab_FOF_halo_info_internal(filename, br.halo_id)
+
+ g.create_dataset("cycle", data=cycle)
+ g.create_dataset("redshift", data=redshift)
+ g.create_dataset("halo_id", data=halo_id)
+ g.create_dataset("fraction", data=fraction)
+ g.create_dataset("mass", data=mass)
+ g.create_dataset("densest_point", data=densest_point)
+ g.create_dataset("COM", data=COM)
+ f.close()
+
def write_dot(self, filename=None):
r"""Writes merger tree to a GraphViz or image file.
@@ -446,7 +545,7 @@
self.graph.write("%s" % filename, format=suffix)
def find_halo_relationships(output1_id, output2_id, output_basename = None,
- radius = 0.10):
+ radius = 0.10, external_FOF=True):
r"""Calculate the parentage and child relationships between two EnzoFOF
halo catalogs.
@@ -456,8 +555,9 @@
particle IDs from the child halos are identified in potential parents, and
then both percent-of-parent and percent-to-child values are recorded.
- Note that this works only with catalogs constructed by Enzo's FOF halo
- finder, not with catalogs constructed by yt.
+ Note that this works with catalogs constructed by Enzo's FOF halo
+ when used in external_FOF=True mode, whereas it will work with
+ catalogs constructed by yt using external_FOF=False mode.
Parameters
----------
@@ -484,9 +584,9 @@
parent.
"""
mylog.info("Parsing Halo Catalog %04i", output1_id)
- HC1 = HaloCatalog(output1_id, False)
+ HC1 = HaloCatalog(output1_id, False, external_FOF=external_FOF)
mylog.info("Parsing Halo Catalog %04i", output2_id)
- HC2 = HaloCatalog(output2_id, True)
+ HC2 = HaloCatalog(output2_id, True, external_FOF=external_FOF)
mylog.info("Calculating fractions")
pfrac = HC1.calculate_parentage_fractions(HC2)
@@ -504,3 +604,16 @@
cPickle.dump(pfrac, open("%s.cpkl" % (output_basename), "wb"))
return HC1.redshift, HC2.redshift, pfrac
+
+def grab_FOF_halo_info_internal(filename, halo_id):
+ """
+ Finds a specific halo's information in the FOF group output information
+ and pass relevant parameters to caller.
+ """
+ # open up FOF file to parse for details
+ groups_file = open(filename, 'r')
+ for line in groups_file:
+ if line.startswith("#"): continue
+ if int(line.split()[0]) == halo_id:
+ ar = np.array(line.split()).astype('float64')
+ return ar[1], ar[4:7], ar[7:13] # mass, xyz_dens, xyzvxvyvz_COM
https://bitbucket.org/yt_analysis/yt/commits/7eb999176655/
changeset: 7eb999176655
branch: yt
user: chummels
date: 2013-02-15 02:03:03
summary: Adding the capability to save an hdf5 file containing a halo's history over a timeseries, as well as a function for plotting that up in a reasonably programmatic way.
affected #: 2 files
diff -r a8bda76f28ad5cd3d3431486a1b9bd7ce87b15df -r 7eb999176655deb8dec8ab32a467f70f2bea75df yt/analysis_modules/halo_merger_tree/api.py
--- a/yt/analysis_modules/halo_merger_tree/api.py
+++ b/yt/analysis_modules/halo_merger_tree/api.py
@@ -40,4 +40,5 @@
from .enzofof_merger_tree import \
HaloCatalog, \
find_halo_relationships, \
- EnzoFOFMergerTree
+ EnzoFOFMergerTree, \
+ plot_halo_evolution
diff -r a8bda76f28ad5cd3d3431486a1b9bd7ce87b15df -r 7eb999176655deb8dec8ab32a467f70f2bea75df yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -272,6 +272,11 @@
mt.build_tree(0) # Create tree for halo 0
mt.print_tree()
mt.write_dot()
+
+ See Also
+ --------
+ plot_halo_evolution()
+
"""
self.relationships = {}
self.redshifts = {}
@@ -279,9 +284,15 @@
self.find_outputs(zrange, cycle_range, output)
if load_saved:
self.load_tree(save_filename)
+ if cycle_range is not None:
+ # actually make merger tree work within specified cycle limits
+ self.redshifts = {}
+ for i in range(cycle_range[0],cycle_range[1]+1):
+ self.redshifts[i] = 0.0 # don't have redshift info
else:
self.run_merger_tree(output)
self.save_tree(save_filename)
+
def save_tree(self, filename):
cPickle.dump((self.redshifts, self.relationships),
@@ -421,6 +432,7 @@
r"""Prints the merger tree to stdout.
"""
for lvl in sorted(self.levels, reverse=True):
+ if lvl not in self.redshifts.keys(): continue
print "========== Cycle %5.5d (z=%f) ==========" % \
(lvl, self.redshifts[lvl])
for br in self.levels[lvl]:
@@ -434,26 +446,29 @@
def save_halo_evolution(self, filename):
"""
Saves as an HDF5 file the relevant details about a halo
- over the course of its evolution (location, mass, id, etc.)
+ over the course of its evolution following the most massive
+ progenitor to have given it the bulk of its particles.
+ It stores info from the FOF_groups file: location, mass, id, etc.
"""
f = h5py.File(filename, 'a')
- cycle_fin = self.levels.keys()[0]
+ cycle_fin = self.redshifts.keys()[-1]
halo_id = self.levels[cycle_fin][0].halo_id
halo = "halo%05d" % halo_id
if halo in f:
del f["halo%05d" % halo_id]
g = f.create_group("halo%05d" % halo_id)
- size = len(self.levels.keys())
- cycle = np.empty(size)
- redshift = np.empty(size)
- halo_id = np.empty(size)
- fraction = np.empty(size)
- mass = np.empty(size)
- densest_point = np.empty((3,size))
- COM = np.empty((6,size))
+ size = len(self.redshifts.keys())
+ cycle = np.zeros(size)
+ redshift = np.zeros(size)
+ halo_id = np.zeros(size)
+ fraction = np.zeros(size)
+ mass = np.zeros(size)
+ densest_point = np.zeros((3,size))
+ COM = np.zeros((6,size))
fraction[0] = 1.
for i, lvl in enumerate(sorted(self.levels, reverse=True)):
+ if lvl not in self.redshifts.keys(): continue
print "========== Cycle %5.5d (z=%f) ==========" % \
(lvl, self.redshifts[lvl])
cycle[i] = lvl
@@ -463,13 +478,25 @@
print "Parent halo = %d" % br.halo_id
print "--> Most massive progenitor == Halo %d" % (br.progenitor)
halo_id[i] = br.halo_id
+
+ if len(br.children) == 0: # lineage for this halo ends
+ cycle = cycle[:i+1] # so truncate arrays, and break
+ redshift = redshift[:i+1]
+ halo_id = halo_id[:i+1]
+ fraction = fraction[:i+1]
+ mass = mass[:i+1]
+ densest_point = densest_point[:,:i+1]
+ COM = COM[:,:i+1]
+ break
+
if i < size-1:
- fraction[i+1] = br.children[1]
+ fraction[i+1] = br.children[0][1]
# open up FOF file to parse for details
filename = "FOF/groups_%05d.txt" % lvl
mass[i], densest_point[:,i], COM[:,i] = grab_FOF_halo_info_internal(filename, br.halo_id)
+ # save the arrays in the hdf5 file
g.create_dataset("cycle", data=cycle)
g.create_dataset("redshift", data=redshift)
g.create_dataset("halo_id", data=halo_id)
@@ -617,3 +644,87 @@
if int(line.split()[0]) == halo_id:
ar = np.array(line.split()).astype('float64')
return ar[1], ar[4:7], ar[7:13] # mass, xyz_dens, xyzvxvyvz_COM
+
+def plot_halo_evolution(filename, halo_id, x_quantity='cycle', y_quantity='mass',
+ x_log=False, y_log=True):
+ """
+ Once you have generated a file using the
+ EnzoFOFMergerTree.save_halo_evolution function, this is a simple way of
+ plotting the evolution in the quantities of that halo over its lifetime.
+
+ Parameters
+ ----------
+ filename : str
+ The filename to which you saved the hdf5 data from save_halo_evolution
+ halo_id : int
+ The halo in 'filename' that you want to follow
+ x_quantity, y_quantity : str, optional
+ The quantity that you want to plot as the x_coord (or y_coords).
+ Valid options are:
+ cycle, mass, fraction, halo_id, redshift, dense_x, dense_y, dense_z
+ COM_x, COM_y, COM_z, COM_vx, COM_vy, COM_vz
+ x_log, y_log : bool, optional
+ Do you want the x(y)-axis to be in log or linear?
+
+ Examples
+ --------
+ files = glob.glob("DD????/DD????")
+ files.sort()
+ ts = TimeSeriesData.from_filenames(files)
+ if not os.path.isdir('FOF'): os.mkdir('FOF')
+ for pf in ts:
+ halo_list = FOFHaloFinder(pf)
+ i = int(pf.basename[2:])
+ halo_list.write_out("FOF/groups_%05i.txt" % i)
+ halo_list.write_particle_lists("FOF/particles_%05i" % i)
+ mt = EnzoFOFMergerTree(cycle_range=(0,63), external_FOF=False)
+ for i in range(20):
+ mt.build_tree(i)
+ mt.save_halo_evolution('halos.h5')
+ for i in range(20):
+ plot_halo_evolution('halos.h5', i)
+ # generates mass history plots for the 20 most massive halos at t_fin.
+ """
+ import yt.visualization._mpl_imports as mpl
+ import matplotlib.pyplot as plt
+ f = h5py.File(filename, 'r')
+ basename = os.path.splitext(filename)[0]
+ halo = "halo%05d" % halo_id
+ basename = basename + "_" + halo
+ g = f[halo]
+ values = list(g)
+ index_dict = {'x' : 0, 'y' : 1, 'z' : 2, 'vx' : 3, 'vy' : 4, 'vz' : 5}
+ coords = {}
+ fields = {}
+ for i, quantity in enumerate((x_quantity, y_quantity)):
+ field = quantity
+ if quantity.startswith('COM'):
+ index = index_dict[quantity.split('_')[-1]]
+ quantity = ('COM')
+ if quantity.startswith('dense'):
+ index = index_dict[quantity.split('_')[-1]]
+ quantity = ('densest_point')
+ if quantity not in values:
+ exit('%s not in list of values in %s for halo %d' % \
+ (quantity, filename, halo_id))
+ if not field == quantity:
+ coords[i] = g[quantity][index,:]
+ else:
+ coords[i] = g[quantity]
+ if len(coords[i]) == 1:
+ # ("Only 1 value for Halo %d. Ignoring." % halo_id)
+ return
+ fields[i] = field
+
+ ax = plt.axes()
+ ax.plot(coords[0], coords[1])
+ ax.set_title(basename)
+ ax.set_xlabel(fields[0])
+ ax.set_ylabel(fields[1])
+ if x_log:
+ ax.set_xscale("log")
+ if y_log:
+ ax.set_yscale("log")
+ ofn = "%s_%s_%s.png" % (basename, fields[0], fields[1])
+ plt.savefig(ofn)
+ plt.clf()
https://bitbucket.org/yt_analysis/yt/commits/9c01dfb37280/
changeset: 9c01dfb37280
branch: yt
user: chummels
date: 2013-02-15 02:10:16
summary: Merging.
affected #: 56 files
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,6 +5152,7 @@
0000000000000000000000000000000000000000 svn.993
fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
0000000000000000000000000000000000000000 hop callback
-0000000000000000000000000000000000000000 hop callback
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -7,7 +7,7 @@
# There are a few options, but you only need to set *one* of them. And
# that's the next one, DEST_DIR. But, if you want to use an existing HDF5
# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too. (It'll already
+# subversion checkout of yt, you can set YT_DIR, too. (It'll already
# check the current directory and one up.
#
# And, feel free to drop me a line: matthewturk at gmail.com
@@ -26,7 +26,7 @@
# and install it on its own
#HDF5_DIR=
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
# This one turns on gfortran manually:
#NUMPY_ARGS="--fcompiler=gnu95"
# If you absolutely can't get the fortran to work, try this:
@@ -49,7 +49,7 @@
INST_ROCKSTAR=0 # Install the Rockstar halo finder?
INST_SCIPY=0 # Install scipy?
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
YT_DIR=""
# If you need to pass anything to matplotlib, do so here.
@@ -159,18 +159,6 @@
echo " $ module swap PE-pgi PE-gnu"
echo
fi
- if [ "${MYHOSTLONG%%ranger}" != "${MYHOSTLONG}" ]
- then
- echo "Looks like you're on Ranger."
- echo
- echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
- echo "These commands should take care of that for you:"
- echo
- echo " $ module unload mvapich2"
- echo " $ module swap pgi gcc"
- echo " $ module load mvapich2"
- echo
- fi
if [ "${MYHOST##steele}" != "${MYHOST}" ]
then
echo "Looks like you're on Steele."
@@ -188,24 +176,53 @@
echo
echo "NOTE: you must have the Xcode command line tools installed."
echo
- echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
- echo "website"
+ echo "The instructions for obtaining these tools varies according"
+ echo "to your exact OS version. On older versions of OS X, you"
+ echo "must register for an account on the apple developer tools"
+ echo "website: https://developer.apple.com/downloads to obtain the"
+ echo "download link."
+ echo
+ echo "We have gathered some additional instructions for each"
+ echo "version of OS X below. If you have trouble installing yt"
+ echo "after following these instructions, don't hesitate to contact"
+ echo "the yt user's e-mail list."
+ echo
+ echo "You can see which version of OSX you are running by clicking"
+ echo "'About This Mac' in the apple menu on the left hand side of"
+ echo "menu bar. We're assuming that you've installed all operating"
+ echo "system updates; if you have an older version, we suggest"
+ echo "running software update and installing all available updates."
+ echo
+ echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+ echo "Apple developer tools website."
echo
- echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
- echo "website"
+ echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+ echo "developer tools website. You can either download the"
+ echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+ echo "Software Update to update to XCode 3.2.6 or"
+ echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+ echo "bundle (4.1 GB)."
echo
- echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
- echo "alternatively download the Xcode command line tools from"
- echo "the mac developer tools website"
+ echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+ echo "(search for Xcode)."
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
echo
- echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
- echo "Leopard) or newer. If you do, please set the following"
- echo "environment variables, remove any broken installation tree, and"
- echo "re-run this script verbatim."
+ echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+ echo "(search for Xcode)."
+ echo "Additionally, you will have to manually install the Xcode"
+ echo "command line tools, see:"
+ echo "http://stackoverflow.com/questions/9353444"
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
+ echo
+ echo "NOTE: It's possible that the installation will fail, if so,"
+ echo "please set the following environment variables, remove any"
+ echo "broken installation tree, and re-run this script verbatim."
echo
echo "$ export CC=gcc-4.2"
echo "$ export CXX=g++-4.2"
- echo
+ echo
OSX_VERSION=`sw_vers -productVersion`
if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
then
@@ -213,6 +230,27 @@
MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
fi
fi
+ if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+ then
+ echo "Looks like you're on an OpenSUSE-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * devel_C_C++"
+ echo " * libopenssl-devel"
+ echo " * libuuid-devel"
+ echo " * zip"
+ echo " * gcc-c++"
+ echo
+ echo "You can accomplish this by executing:"
+ echo
+ echo "$ sudo zypper install -t pattern devel_C_C++"
+ echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+ echo
+ echo "I am also setting special configure arguments to Python to"
+ echo "specify control lib/lib64 issues."
+ PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+ fi
if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
then
echo "Looks like you're on an Ubuntu-compatible machine."
@@ -244,6 +282,20 @@
echo " to avoid conflicts with other command-line programs "
echo " (like eog and evince, for example)."
fi
+ if [ $INST_SCIPY -eq 1 ]
+ then
+ echo
+ echo "Looks like you've requested that the install script build SciPy."
+ echo
+ echo "If the SciPy build fails, please uncomment one of the the lines"
+ echo "at the top of the install script that sets NUMPY_ARGS, delete"
+ echo "any broken installation tree, and re-run the install script"
+ echo "verbatim."
+ echo
+ echo "If that doesn't work, don't hesitate to ask for help on the yt"
+ echo "user's mailing list."
+ echo
+ fi
if [ ! -z "${CFLAGS}" ]
then
echo "******************************************"
@@ -262,9 +314,9 @@
echo
echo "========================================================================"
echo
-echo "Hi there! This is the YT installation script. We're going to download"
+echo "Hi there! This is the yt installation script. We're going to download"
echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
echo
echo "Inside the installation script you can set a few variables. Here's what"
echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
@@ -303,7 +355,7 @@
echo "be installing PyX"
printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
-get_willwont ${INST_PYX}
+get_willwont ${INST_SCIPY}
echo "be installing scipy"
printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
@@ -445,7 +497,7 @@
echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8 libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0 matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161 mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9 numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474 sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2 zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -478,7 +530,7 @@
get_ytproject Python-2.7.3.tgz
get_ytproject numpy-1.6.1.tar.gz
get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.4.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
get_ytproject ipython-0.13.1.tar.gz
get_ytproject h5py-2.1.0.tar.gz
get_ytproject Cython-0.17.1.tar.gz
@@ -605,10 +657,10 @@
if [ ! -e Python-2.7.3/done ]
then
- echo "Installing Python. This may take a while, but don't worry. YT loves you."
+ echo "Installing Python. This may take a while, but don't worry. yt loves you."
[ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
cd Python-2.7.3
- ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -623,7 +675,7 @@
if [ $INST_HG -eq 1 ]
then
echo "Installing Mercurial."
- do_setup_py mercurial-2.4
+ do_setup_py mercurial-2.5.1
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
@@ -694,7 +746,7 @@
echo "Building LAPACK"
cd lapack-3.4.2/
cp INSTALL/make.inc.gfortran make.inc
- make lapacklib CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+ make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -142,18 +142,30 @@
if self.CoM is not None:
return self.CoM
pm = self["ParticleMassMsun"]
- cx = self["particle_position_x"]
- cy = self["particle_position_y"]
- cz = self["particle_position_z"]
- if isinstance(self, FOFHalo):
- c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
- else:
- c_vec = self.maximum_density_location() - self.pf.domain_center
- cx = (cx - c_vec[0])
- cy = (cy - c_vec[1])
- cz = (cz - c_vec[2])
- com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
- return (com * pm).sum(axis=1) / pm.sum() + c_vec
+ c = {}
+ c[0] = self["particle_position_x"]
+ c[1] = self["particle_position_y"]
+ c[2] = self["particle_position_z"]
+ c_vec = np.zeros(3)
+ com = []
+ for i in range(3):
+ # A halo is likely periodic around a boundary if the distance
+ # between the max and min particle
+ # positions are larger than half the box.
+ # So skip the rest if the converse is true.
+ # Note we might make a change here when periodicity-handling is
+ # fully implemented.
+ if (c[i].max() - c[i].min()) < (self.pf.domain_width[i] / 2.):
+ com.append(c[i])
+ continue
+ # Now we want to flip around only those close to the left boundary.
+ d_left = c[i] - self.pf.domain_left_edge[i]
+ sel = (d_left <= (self.pf.domain_width[i]/2))
+ c[i][sel] += self.pf.domain_width[i]
+ com.append(c[i])
+ com = np.array(com)
+ c = (com * pm).sum(axis=1) / pm.sum()
+ return c%self.pf.domain_width
def maximum_density(self):
r"""Return the HOP-identified maximum density. Not applicable to
@@ -809,7 +821,6 @@
_radjust = 1.05
def __init__(self, pf, id, size=None, CoM=None,
-
max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
e1_vec=None, tilt=None, supp=None):
@@ -843,6 +854,10 @@
self.supp = {}
else:
self.supp = supp
+ self._saved_fields = {}
+ self._ds_sort = None
+ self._particle_mask = None
+
def __getitem__(self, key):
# This function will try to get particle data in one of three ways,
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -132,7 +132,6 @@
not stored in enzo datasets, so must be entered by hand.
sigma8input=%f primordial_index=%f omega_baryon0=%f
""" % (self.sigma8input, self.primordial_index, self.omega_baryon0))
- time.sleep(1)
# Do the calculations.
self.sigmaM()
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold005',
+ gold_standard_filename = 'gold006',
local_standard_filename = 'local001',
sketchfab_api_key = 'None'
)
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1387,9 +1387,11 @@
else:
self.fields = ensure_list(fields)
from yt.visualization.plot_window import \
- GetOffAxisBoundsAndCenter, PWViewerMPL
+ GetObliqueWindowParameters, PWViewerMPL
from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
- (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+ (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+ if axes_unit is None and units != ('1', '1'):
+ axes_units = units
pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
pw.set_axes_unit(axes_unit)
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -80,6 +80,9 @@
mylog.debug("Detecting fields.")
self._detect_fields()
+ mylog.debug("Detecting fields in backup file")
+ self._detect_fields_backup()
+
mylog.debug("Adding unknown detected fields")
self._setup_unknown_fields()
@@ -93,6 +96,22 @@
if self._data_file is not None:
self._data_file.close()
+ def _detect_fields_backup(self):
+ # grab fields from backup file as well, if present
+ try:
+ backup_filename = self.parameter_file.backup_filename
+ f = h5py.File(backup_filename, 'r')
+ g = f["data"]
+ grid = self.grids[0] # simply check one of the grids
+ grid_group = g["grid_%010i" % (grid.id - grid._id_offset)]
+ for field_name in grid_group:
+ if field_name != 'particles':
+ self.field_list.append(field_name)
+ except KeyError:
+ return
+ except IOError:
+ return
+
def _get_parameters(self):
return self.parameter_file.parameters
parameters=property(_get_parameters)
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -81,6 +81,10 @@
self.basename = os.path.basename(filename)
self.directory = os.path.expanduser(os.path.dirname(filename))
self.fullpath = os.path.abspath(self.directory)
+ self.backup_filename = self.parameter_filename + '_backup.gdf'
+ self.read_from_backup = False
+ if os.path.exists(self.backup_filename):
+ self.read_from_backup = True
if len(self.directory) == 0:
self.directory = "."
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/data_objects/tests/test_cutting_plane.py
--- /dev/null
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -0,0 +1,45 @@
+from yt.testing import *
+import os
+
+def setup():
+ from yt.config import ytcfg
+ ytcfg["yt","__withintesting"] = "True"
+
+def teardown_func(fns):
+ for fn in fns:
+ os.remove(fn)
+
+def test_cutting_plane():
+ for nprocs in [8, 1]:
+ # We want to test both 1 proc and 8 procs, to make sure that
+ # parallelism isn't broken
+ pf = fake_random_pf(64, nprocs = nprocs)
+ dims = pf.domain_dimensions
+ center = [0.5,0.5,0.5]
+ normal = [1,1,1]
+ fns = []
+ cut = pf.h.cutting(normal, center, ["Ones", "Density"])
+ yield assert_equal, cut["Ones"].sum(), cut["Ones"].size
+ yield assert_equal, cut["Ones"].min(), 1.0
+ yield assert_equal, cut["Ones"].max(), 1.0
+ pw = cut.to_pw()
+ fns += pw.save()
+ frb = cut.to_frb((1.0,'unitary'), 64)
+ for cut_field in ['Ones', 'Density']:
+ yield assert_equal, frb[cut_field].info['data_source'], \
+ cut.__str__()
+ yield assert_equal, frb[cut_field].info['axis'], \
+ 4
+ yield assert_equal, frb[cut_field].info['field'], \
+ cut_field
+ yield assert_equal, frb[cut_field].info['units'], \
+ pf.field_info[cut_field].get_units()
+ yield assert_equal, frb[cut_field].info['xlim'], \
+ frb.bounds[:2]
+ yield assert_equal, frb[cut_field].info['ylim'], \
+ frb.bounds[2:]
+ yield assert_equal, frb[cut_field].info['length_to_cm'], \
+ pf['cm']
+ yield assert_equal, frb[cut_field].info['center'], \
+ cut.center
+ teardown_func(fns)
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -7,7 +7,7 @@
_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
-def test_covering_grid():
+def test_streamlines():
# We decompose in different ways
cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
cs = np.array([a.ravel() for a in cs]).T
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -33,12 +33,12 @@
_particle_reader = False
_data_style = "skeleton"
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
# This must return the array, of size/shape grid.ActiveDimensions, that
# corresponds to 'field'.
pass
def _read_data_slice(self, grid, field, axis, coord):
# If this is not implemented, the IO handler will just slice a
- # _read_data_set item.
+ # _read_data item.
pass
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -132,7 +132,7 @@
return dat[idx]
raise KeyError
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
if field in particle_fields:
return self._read_particle_field(grid, field)
pf = grid.pf
@@ -161,11 +161,6 @@
l_delta += 1
return tr.astype("float64")
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- return self._read_data_set(grid, field)[sl]
-
def _count_art_octs(f, offset,
MinLev, MaxLevelNow):
level_oct_offsets= [0,]
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -43,7 +43,7 @@
def _read_field_names(self,grid):
pass
- def _read_data_set(self,grid,field):
+ def _read_data(self,grid,field):
f = file(grid.filename, 'rb')
dtype, offsetr = grid.hierarchy._field_map[field]
grid_ncells = np.prod(grid.ActiveDimensions)
@@ -77,31 +77,8 @@
sl[axis] = slice(coord, coord + 1)
if grid.pf.field_ordering == 1:
sl.reverse()
+ return self._read_data_set(grid, field)[sl]
- f = file(grid.filename, 'rb')
- dtype, offsetr = grid.hierarchy._field_map[field]
- grid_ncells = np.prod(grid.ActiveDimensions)
- grid_dims = grid.ActiveDimensions
- grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
- read_table_offset = get_read_table_offset(f)
- if grid_ncells != grid0_ncells:
- offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
- if grid_ncells == grid0_ncells:
- offset = offsetr
- f.seek(read_table_offset+offset)
- if dtype == 'scalar':
- data = np.fromfile(f, dtype='>f4',
- count=grid_ncells).reshape(grid.ActiveDimensions,order='F')[sl].copy()
- if dtype == 'vector':
- data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
- if '_x' in field:
- data = data[0::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
- elif '_y' in field:
- data = data[1::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
- elif '_z' in field:
- data = data[2::3].reshape(grid.ActiveDimensions,order='F')[sl].copy()
- f.close()
- return data
def get_read_table_offset(f):
line = f.readline()
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -53,7 +53,7 @@
tr)
return tr
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
"""
reads packed multiFABs output by BoxLib in "NATIVE" format.
@@ -133,13 +133,3 @@
inFile.close()
return field
-
- def _read_data_slice(self, grid, field, axis, coord):
- """wishful thinking?
- """
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- #sl = tuple(reversed(sl))
- return self._read_data_set(grid, field)[sl]
-
-
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -146,7 +146,7 @@
def _detect_fields(self):
ncomp = int(self._fhandle['/'].attrs['num_components'])
self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
-
+
def _setup_classes(self):
dd = self._get_data_reader_dict()
AMRHierarchy._setup_classes(self, dd)
@@ -220,6 +220,7 @@
self.fullplotdir = os.path.abspath(filename)
StaticOutput.__init__(self,filename,data_style)
self.storage_filename = storage_filename
+ self.cosmological_simulation = False
fileh.close()
def _set_units(self):
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -24,6 +24,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import h5py
+import os
import re
import numpy as np
@@ -49,7 +50,8 @@
fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
fhandle.close()
- def _read_data_set(self,grid,field):
+ def _read_data(self,grid,field):
+
fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
field_dict = self._field_dict(fhandle)
@@ -62,15 +64,10 @@
start = grid_offset+field_dict[field]*boxsize
stop = start + boxsize
data = lev[self._data_string][start:stop]
-
+
fhandle.close()
return data.reshape(dims, order='F')
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- return self._read_data_set(grid,field)[sl]
-
def _read_particles(self, grid, field):
"""
parses the Orion Star Particle text files
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -51,7 +51,6 @@
from .io import \
IOHandlerEnzoHDF4, \
- IOHandlerEnzoHDF4_2D, \
IOHandlerEnzoHDF5, \
IOHandlerPackedHDF5, \
IOHandlerInMemory, \
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -995,6 +995,7 @@
for p, v in self._conversion_override.items():
self.conversion_factors[p] = v
self.refine_by = self.parameters["RefineBy"]
+ self.periodicity = ensure_tuple(self.parameters["LeftFaceBoundaryCondition"] == 3)
self.dimensionality = self.parameters["TopGridRank"]
self.domain_dimensions = self.parameters["TopGridDimensions"]
self.current_time = self.parameters["InitialTime"]
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -52,7 +52,7 @@
"""
return SD.SD(grid.filename).datasets().keys()
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
"""
Returns after having obtained or generated a field. Should throw an
exception. Should only be called as EnzoGridInstance.readData()
@@ -62,41 +62,10 @@
"""
return SD.SD(grid.filename).select(field).get().swapaxes(0,2)
- def _read_data_slice(self, grid, field, axis, coord):
- """
- Reads a slice through the HDF4 data
-
- @param grid: Grid to slice
- @type grid: L{EnzoGrid<EnzoGrid>}
- @param field: field to get
- @type field: string
- @param sl: region to get
- @type sl: SliceType
- """
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- sl = tuple(reversed(sl))
- return SD.SD(grid.filename).select(field)[sl].swapaxes(0,2)
-
@property
def _read_exception(self):
return SD.HDF4Error
-class IOHandlerEnzoHDF4_2D(IOHandlerEnzoHDF4):
-
- _data_style = "enzo_hdf4_2d"
-
- def _read_data_set(self, grid, field):
- t = SD.SD(grid.filename).select(field).get()[:,:,None]
- return t.swapaxes(0,1)
-
- def _read_data_slice(self, grid, field, axis, coord):
- t = SD.SD(grid.filename).select(field).get()
- return t.transpose()
-
- def modify(self, field):
- return field
-
class IOHandlerEnzoHDF5(BaseIOHandler):
_data_style = "enzo_hdf5"
@@ -109,25 +78,9 @@
"""
return hdf5_light_reader.ReadListOfDatasets(grid.filename, "/")
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
return hdf5_light_reader.ReadData(grid.filename, "/%s" % field).swapaxes(0,2)
- def _read_data_slice(self, grid, field, axis, coord):
- """
- Reads a slice through the HDF5 data
-
- @param grid: Grid to slice
- @type grid: L{EnzoGrid<EnzoGrid>}
- @param field: field to get
- @type field: string
- @param axis: axis to slice along
- @param coord: coord to slice at
- """
- axis = {0:2,1:1,2:0}[axis]
- t = hdf5_light_reader.ReadDataSlice(grid.filename, "/%s" %
- (field), axis, coord).transpose()
- return t
-
def modify(self, field):
return field.swapaxes(0,2)
@@ -180,17 +133,12 @@
for gid in data: self.queue[gid].update(data[gid])
mylog.debug("Finished read of %s", sets)
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
tr = hdf5_light_reader.ReadData(grid.filename,
"/Grid%08i/%s" % (grid.id, field))
if tr.dtype == "float32": tr = tr.astype("float64")
return self.modify(tr)
- def _read_data_slice(self, grid, field, axis, coord):
- axis = _axis_ids[axis]
- return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
- (grid.id, field), axis, coord).transpose()
-
def _read_field_names(self, grid):
return hdf5_light_reader.ReadListOfDatasets(
grid.filename, "/Grid%08i" % grid.id)
@@ -208,11 +156,6 @@
tr = field[3:-3,3:-3,3:-3].swapaxes(0,2)
return tr.copy() # To ensure contiguous
- def _read_data_slice(self, grid, field, axis, coord):
- axis = _axis_ids[axis]
- return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
- (grid.id, field), axis, coord)[3:-3,3:-3].transpose()
-
def _read_raw_data_set(self, grid, field):
return hdf5_light_reader.ReadData(grid.filename,
"/Grid%08i/%s" % (grid.id, field))
@@ -231,7 +174,7 @@
slice(ghost_zones,-ghost_zones))
BaseIOHandler.__init__(self)
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
if grid.id not in self.grids_in_memory:
mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
raise KeyError
@@ -256,6 +199,7 @@
return self.grids_in_memory[grid.id].keys()
def _read_data_slice(self, grid, field, axis, coord):
+ # This data style cannot have a sidecar file
sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
sl[axis] = slice(coord + 3, coord + 4)
sl = tuple(reversed(sl))
@@ -272,7 +216,7 @@
_data_style = "enzo_packed_2d"
_particle_reader = False
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
return hdf5_light_reader.ReadData(grid.filename,
"/Grid%08i/%s" % (grid.id, field)).transpose()[:,:,None]
@@ -290,7 +234,7 @@
_data_style = "enzo_packed_1d"
_particle_reader = False
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
return hdf5_light_reader.ReadData(grid.filename,
"/Grid%08i/%s" % (grid.id, field)).transpose()[:,None,None]
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -221,7 +221,7 @@
self._handle = h5py.File(filename, "r")
if conversion_override is None: conversion_override = {}
self._conversion_override = conversion_override
-
+
self.particle_filename = particle_filename
if self.particle_filename is None :
@@ -451,7 +451,7 @@
self.current_time = self.parameters["time"]
# Determine if this is a periodic box
- p = [self.parameters.get("%sl_boundary_type" % ax, None) == Periodic for ax in 'xyz']
+ p = [self.parameters.get("%sl_boundary_type" % ax, None) == "periodic" for ax in 'xyz']
self.periodicity = tuple(p)
# Determine cosmological parameters.
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -53,7 +53,7 @@
count_list, conv_factors):
pass
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
f = self._handle
f_part = self._particle_handle
if field in self._particle_fields:
@@ -65,11 +65,3 @@
else:
tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
return tr.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- f = self._handle
- tr = f["/%s" % field][grid.id - grid._id_offset].transpose()[sl]
- return tr.astype("float64")
-
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/flash/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -0,0 +1,60 @@
+"""
+FLASH frontend tests
+
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 John ZuHone. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+ requires_pf, \
+ small_patch_amr, \
+ big_patch_amr, \
+ data_dir_load
+from yt.frontends.flash.api import FLASHStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+ at requires_pf(sloshing)
+def test_sloshing():
+ pf = data_dir_load(sloshing)
+ yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
+ for test in small_patch_amr(sloshing, _fields):
+ yield test
+
+_fields_2d = ("Temperature", "Density")
+
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+ at requires_pf(wt)
+def test_wind_tunnel():
+ pf = data_dir_load(wt)
+ yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
+ for test in small_patch_amr(wt, _fields_2d):
+ yield test
+
+gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
+ at requires_pf(gcm, big_data=True)
+def test_galaxy_cluster_merger():
+ pf = data_dir_load(gcm)
+ for test in big_patch_amr(gcm, _fields):
+ yield test
+
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -31,7 +31,7 @@
class IOHandlerGadget(BaseIOHandler):
_data_style = 'gadget_infrastructure'
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
data = []
fh = h5py.File(grid.filename,mode='r')
for ptype in grid.particle_types:
@@ -50,6 +50,6 @@
def _read_data_slice(self,grid, field, axis, coord):
#how would we implement axis here?
- dat = self._read_data_set(grid,field)
+ dat = self._read_data(grid,field)
return dat[coord]
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -45,7 +45,7 @@
fhandle.close()
return names
- def _read_data_set(self,grid,field):
+ def _read_data(self,grid,field):
fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
data = (fhandle['/data/grid_%010i/'%grid.id+field][:]).copy()
fhandle.close()
@@ -59,9 +59,7 @@
sl[axis] = slice(coord, coord + 1)
if grid.pf.field_ordering == 1:
sl.reverse()
- fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
- data = (fhandle['/data/grid_%010i/'%grid.id+field][:][sl]).copy()
- fhandle.close()
+ data = self._read_data_set(grid, field)
if grid.pf.field_ordering == 1:
return data.T
else:
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ b/yt/frontends/maestro/io.py
@@ -42,7 +42,7 @@
def modify(self, field):
return field.swapaxes(0,2)
- def _read_data_set(self,grid,field):
+ def _read_data(self,grid,field):
"""
reads packed multiFABs output by BoxLib in "NATIVE" format.
@@ -121,12 +121,3 @@
inFile.close()
return field
- def _read_data_slice(self, grid, field, axis, coord):
- """wishful thinking?
- """
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- #sl = tuple(reversed(sl))
- return self._read_data_set(grid,field)[sl]
-
-
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -52,7 +52,7 @@
len(nyx_particle_field_names), tr)
return tr
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
""" reads packed multiFABs output by BoxLib in "NATIVE" format. """
if field in nyx_particle_field_names:
return self._read_particle_field(grid, field)
@@ -77,9 +77,3 @@
return field
- def _read_data_slice(self, grid, field, axis, coord):
- # wishful thinking?
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- #sl = tuple(reversed(sl))
- return self._read_data_set(grid, field)[sl]
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -78,7 +78,7 @@
particles.append(read(line, field))
return np.array(particles)
- def _read_data_set(self,grid,field):
+ def _read_data(self,grid,field):
"""
reads packed multiFABs output by BoxLib in "NATIVE" format.
@@ -158,12 +158,3 @@
inFile.close()
return field
- def _read_data_slice(self, grid, field, axis, coord):
- """wishful thinking?
- """
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- #sl = tuple(reversed(sl))
- return self._read_data_set(grid,field)[sl]
-
-
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -37,7 +37,7 @@
self.ramses_tree = ramses_tree
BaseIOHandler.__init__(self, *args, **kwargs)
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
tr = np.zeros(grid.ActiveDimensions, dtype='float64')
filled = np.zeros(grid.ActiveDimensions, dtype='int32')
to_fill = grid.ActiveDimensions.prod()
@@ -55,11 +55,6 @@
l_delta += 1
return tr
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- return self._read_data_set(grid, field)[sl]
-
def preload(self, grids, sets):
if len(grids) == 0: return
domain_keys = defaultdict(list)
@@ -72,7 +67,7 @@
mylog.debug("Starting read of domain %s (%s)", domain, sets)
for field in sets:
for g in grids:
- self.queue[g.id][field] = self._read_data_set(g, field)
+ self.queue[g.id][field] = self._read_data(g, field)
print "Clearing", field, domain
self.ramses_tree.clear_tree(field, domain - 1)
mylog.debug("Finished read of %s", sets)
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -40,15 +40,13 @@
self.fields = stream_handler.fields
BaseIOHandler.__init__(self)
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
# This is where we implement processor-locking
#if grid.id not in self.grids_in_memory:
# mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
# raise KeyError
- tr = self.fields[grid.id][field]
+ tr = self.fields[grid.id][field].copy()
# If it's particles, we copy.
- if len(tr.shape) == 1: return tr.copy()
- # New in-place unit conversion breaks if we don't copy first
return tr
def modify(self, field):
@@ -57,14 +55,6 @@
def _read_field_names(self, grid):
return self.fields[grid.id].keys()
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- sl = tuple(sl)
- tr = self.fields[grid.id][field][sl]
- # In-place unit conversion requires we return a copy
- return tr.copy()
-
@property
def _read_exception(self):
return KeyError
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ b/yt/frontends/tiger/io.py
@@ -34,20 +34,10 @@
BaseIOHandler.__init__(self, *args, **kwargs)
self._memmaps = {}
- def _read_data_set(self, grid, field):
+ def _read_data(self, grid, field):
fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
LD = np.array(grid.left_dims, dtype='int64')
SS = np.array(grid.ActiveDimensions, dtype='int64')
RS = np.array(grid.pf.root_size, dtype='int64')
data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
return data
-
- def _read_data_slice(self, grid, field, axis, coord):
- fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
- LD = np.array(grid.left_dims, dtype='int64').copy()
- SS = np.array(grid.ActiveDimensions, dtype='int64').copy()
- RS = np.array(grid.pf.root_size, dtype='int64').copy()
- LD[axis] += coord
- SS[axis] = 1
- data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
- return data
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -22,6 +22,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import itertools as it
import numpy as np
from yt.funcs import *
from numpy.testing import assert_array_equal, assert_almost_equal, \
@@ -163,3 +164,100 @@
for field,offset in zip(fields,offsets))
ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
return ug
+
+def expand_keywords(keywords, full=False):
+ """
+ expand_keywords is a means for testing all possible keyword
+ arguments in the nosetests. Simply pass it a dictionary of all the
+ keyword arguments and all of the values for these arguments that you
+ want to test.
+
+ It will return a list of **kwargs dicts containing combinations of
+ the various kwarg values you passed it. These can then be passed
+ to the appropriate function in nosetests.
+
+ If full=True, then every possible combination of keywords is produced,
+ otherwise, every keyword option is included at least once in the output
+ list. Be careful, by using full=True, you may be in for an exponentially
+ larger number of tests!
+
+ keywords : dict
+ a dictionary where the keys are the keywords for the function,
+ and the values of each key are the possible values that this key
+ can take in the function
+
+ full : bool
+ if set to True, every possible combination of given keywords is
+ returned
+
+ Returns
+ -------
+ array of dicts
+ An array of **kwargs dictionaries to be individually passed to
+ the appropriate function matching these kwargs.
+
+ Examples
+ --------
+ >>> keywords = {}
+ >>> keywords['dpi'] = (50, 100, 200)
+ >>> keywords['cmap'] = ('algae', 'jet')
+ >>> list_of_kwargs = expand_keywords(keywords)
+ >>> print list_of_kwargs
+
+ array([{'cmap': 'algae', 'dpi': 50},
+ {'cmap': 'jet', 'dpi': 100},
+ {'cmap': 'algae', 'dpi': 200}], dtype=object)
+
+ >>> list_of_kwargs = expand_keywords(keywords, full=True)
+ >>> print list_of_kwargs
+
+ array([{'cmap': 'algae', 'dpi': 50},
+ {'cmap': 'algae', 'dpi': 100},
+ {'cmap': 'algae', 'dpi': 200},
+ {'cmap': 'jet', 'dpi': 50},
+ {'cmap': 'jet', 'dpi': 100},
+ {'cmap': 'jet', 'dpi': 200}], dtype=object)
+
+ >>> for kwargs in list_of_kwargs:
+ ... write_projection(*args, **kwargs)
+ """
+
+ # if we want every possible combination of keywords, use iter magic
+ if full:
+ keys = sorted(keywords)
+ list_of_kwarg_dicts = np.array([dict(zip(keys, prod)) for prod in \
+ it.product(*(keywords[key] for key in keys))])
+
+ # if we just want to probe each keyword, but not necessarily every
+ # combination
+ else:
+ # Determine the maximum number of values any of the keywords has
+ num_lists = 0
+ for val in keywords.values():
+ if isinstance(val, str):
+ num_lists = max(1.0, num_lists)
+ else:
+ num_lists = max(len(val), num_lists)
+
+ # Construct array of kwargs dicts, each element of the list is a different
+ # **kwargs dict. each kwargs dict gives a different combination of
+ # the possible values of the kwargs
+
+ # initialize array
+ list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
+
+ # fill in array
+ for i in np.arange(num_lists):
+ list_of_kwarg_dicts[i] = {}
+ for key in keywords.keys():
+ # if it's a string, use it (there's only one)
+ if isinstance(keywords[key], str):
+ list_of_kwarg_dicts[i][key] = keywords[key]
+ # if there are more options, use the i'th val
+ elif i < len(keywords[key]):
+ list_of_kwarg_dicts[i][key] = keywords[key][i]
+ # if there are not more options, use the 0'th val
+ else:
+ list_of_kwarg_dicts[i][key] = keywords[key][0]
+
+ return list_of_kwarg_dicts
diff -r 7eb999176655deb8dec8ab32a467f70f2bea75df -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f yt/utilities/amr_kdtree/amr_kdtools.py
--- /dev/null
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -0,0 +1,402 @@
+"""
+AMR kD-Tree Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2010-2011 Samuel Skillman. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+import numpy as np
+from yt.funcs import *
+from yt.utilities.lib import kdtree_get_choices
+
+def _lchild_id(node_id): return (node_id<<1)
+def _rchild_id(node_id): return (node_id<<1) + 1
+def _parent_id(node_id): return (node_id-1) >> 1
+
+class Node(object):
+ def __init__(self, parent, left, right,
+ left_edge, right_edge, grid_id, node_id):
+ self.left = left
+ self.right = right
+ self.left_edge = left_edge
+ self.right_edge = right_edge
+ self.grid = grid_id
+ self.parent = parent
+ self.id = node_id
+ self.data = None
+ self.split = None
+
+class Split(object):
+ def __init__(self, dim, pos):
+ self.dim = dim
+ self.pos = pos
+
+def should_i_build(node, rank, size):
+ if (node.id < size) or (node.id >= 2*size):
+ return True
+ elif node.id - size == rank:
+ return True
+ else:
+ return False
+
+def add_grids(node, gles, gres, gids, rank, size):
+ if not should_i_build(node, rank, size):
+ return
+
+ if kd_is_leaf(node):
+ insert_grids(node, gles, gres, gids, rank, size)
+ else:
+ less_ids = gles[:,node.split.dim] < node.split.pos
+ if len(less_ids) > 0:
+ add_grids(node.left, gles[less_ids], gres[less_ids],
+ gids[less_ids], rank, size)
+
+ greater_ids = gres[:,node.split.dim] > node.split.pos
+ if len(greater_ids) > 0:
+ add_grids(node.right, gles[greater_ids], gres[greater_ids],
+ gids[greater_ids], rank, size)
+
+def should_i_split(node, rank, size):
+ return node.id < size
+
+def geo_split(node, gles, gres, grid_ids, rank, size):
+ big_dim = np.argmax(gres[0]-gles[0])
+ new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+ old_gre = gres[0].copy()
+ new_gle = gles[0].copy()
+ new_gle[big_dim] = new_pos
+ gres[0][big_dim] = new_pos
+ gles = np.append(gles, np.array([new_gle]), axis=0)
+ gres = np.append(gres, np.array([old_gre]), axis=0)
+ grid_ids = np.append(grid_ids, grid_ids, axis=0)
+
+ split = Split(big_dim, new_pos)
+
+ # Create a Split
+ divide(node, split)
+
+ # Populate Left Node
+ #print 'Inserting left node', node.left_edge, node.right_edge
+ insert_grids(node.left, gles[:1], gres[:1],
+ grid_ids[:1], rank, size)
+
+ # Populate Right Node
+ #print 'Inserting right node', node.left_edge, node.right_edge
+ insert_grids(node.right, gles[1:], gres[1:],
+ grid_ids[1:], rank, size)
+ return
+
+def insert_grids(node, gles, gres, grid_ids, rank, size):
+ if not should_i_build(node, rank, size) or grid_ids.size == 0:
+ return
+
+ if len(grid_ids) == 1:
+ # If we should continue to split based on parallelism, do so!
+ if should_i_split(node, rank, size):
+ geo_split(node, gles, gres, grid_ids, rank, size)
+ return
+
+ if np.all(gles[0] <= node.left_edge) and \
+ np.all(gres[0] >= node.right_edge):
+ node.grid = grid_ids[0]
+ assert(node.grid is not None)
+ return
+
+ # Split the grids
+ check = split_grids(node, gles, gres, grid_ids, rank, size)
+ # If check is -1, then we have found a place where there are no choices.
+ # Exit out and set the node to None.
+ if check == -1:
+ node.grid = None
+ return
+
+def split_grids(node, gles, gres, grid_ids, rank, size):
+ # Find a Split
+ data = np.array([(gles[i,:], gres[i,:]) for i in
+ xrange(grid_ids.shape[0])], copy=False)
+ best_dim, split_pos, less_ids, greater_ids = \
+ kdtree_get_choices(data, node.left_edge, node.right_edge)
+
+ # If best_dim is -1, then we have found a place where there are no choices.
+ # Exit out and set the node to None.
+ if best_dim == -1:
+ return -1
+
+ split = Split(best_dim, split_pos)
+
+ del data, best_dim, split_pos
+
+ # Create a Split
+ divide(node, split)
+
+ # Populate Left Node
+ #print 'Inserting left node', node.left_edge, node.right_edge
+ insert_grids(node.left, gles[less_ids], gres[less_ids],
+ grid_ids[less_ids], rank, size)
+
+ # Populate Right Node
+ #print 'Inserting right node', node.left_edge, node.right_edge
+ insert_grids(node.right, gles[greater_ids], gres[greater_ids],
+ grid_ids[greater_ids], rank, size)
+
+ return
+
+def new_right(Node, split):
+ new_right = Node.right_edge.copy()
+ new_right[split.dim] = split.pos
+ return new_right
+
+def new_left(Node, split):
+ new_left = Node.left_edge.copy()
+ new_left[split.dim] = split.pos
+ return new_left
+
+def divide(node, split):
+ # Create a Split
+ node.split = split
+ node.left = Node(node, None, None,
+ node.left_edge, new_right(node, split), node.grid,
+ _lchild_id(node.id))
+ node.right = Node(node, None, None,
+ new_left(node, split), node.right_edge, node.grid,
+ _rchild_id(node.id))
+ return
+
+def kd_sum_volume(node):
+ if (node.left is None) and (node.right is None):
+ if node.grid is None:
+ return 0.0
+ return np.prod(node.right_edge - node.left_edge)
+ else:
+ return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_sum_cells(node):
+ if (node.left is None) and (node.right is None):
+ if node.grid is None:
+ return 0.0
+ return np.prod(node.right_edge - node.left_edge)
+ else:
+ return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+
+def kd_node_check(node):
+ assert (node.left is None) == (node.right is None)
+ if (node.left is None) and (node.right is None):
+ if node.grid is not None:
+ return np.prod(node.right_edge - node.left_edge)
+ else: return 0.0
+ else:
+ return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(node):
+ has_l_child = node.left is None
+ has_r_child = node.right is None
+ assert has_l_child == has_r_child
+ return has_l_child
+
+def step_depth(current, previous):
+ '''
+ Takes a single step in the depth-first traversal
+ '''
+ if kd_is_leaf(current): # At a leaf, move back up
+ previous = current
+ current = current.parent
+
+ elif current.parent is previous: # Moving down, go left first
+ previous = current
+ if current.left is not None:
+ current = current.left
+ elif current.right is not None:
+ current = current.right
+ else:
+ current = current.parent
+
+ elif current.left is previous: # Moving up from left, go right
+ previous = current
+ if current.right is not None:
+ current = current.right
+ else:
+ current = current.parent
+
+ elif current.right is previous: # Moving up from right child, move up
+ previous = current
+ current = current.parent
+
+ return current, previous
+
+def depth_traverse(tree, max_node=None):
+ '''
+ Yields a depth-first traversal of the kd tree always going to
+ the left child before the right.
+ '''
+ current = tree.trunk
+ previous = None
+ if max_node is None:
+ max_node = np.inf
+ while current is not None:
+ yield current
+ current, previous = step_depth(current, previous)
+ if current is None: break
+ if current.id >= max_node:
+ current = current.parent
+ previous = current.right
+
+def depth_first_touch(tree, max_node=None):
+ '''
+ Yields a depth-first traversal of the kd tree always going to
+ the left child before the right.
+ '''
+ current = tree.trunk
+ previous = None
+ if max_node is None:
+ max_node = np.inf
+ while current is not None:
+ if previous is None or previous.parent != current:
+ yield current
+ current, previous = step_depth(current, previous)
+ if current is None: break
+ if current.id >= max_node:
+ current = current.parent
+ previous = current.right
+
+def breadth_traverse(tree):
+ '''
+ Yields a breadth-first traversal of the kd tree always going to
+ the left child before the right.
+ '''
+ current = tree.trunk
+ previous = None
+ while current is not None:
+ yield current
+ current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(tree, viewpoint):
+ '''
+ Yields a viewpoint dependent traversal of the kd-tree. Starts
+ with nodes furthest away from viewpoint.
+ '''
+
+ current = tree.trunk
+ previous = None
+ while current is not None:
+ yield current
+ current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(current, previous, viewpoint):
+ '''
+ Takes a single step in the viewpoint based traversal. Always
+ goes to the node furthest away from viewpoint first.
+ '''
+ if kd_is_leaf(current): # At a leaf, move back up
+ previous = current
+ current = current.parent
+ elif current.split.dim is None: # This is a dead node
+ previous = current
+ current = current.parent
+
+ elif current.parent is previous: # Moving down
+ previous = current
+ if viewpoint[current.split.dim] <= current.split.pos:
+ if current.right is not None:
+ current = current.right
+ else:
+ previous = current.right
+ else:
+ if current.left is not None:
+ current = current.left
+ else:
+ previous = current.left
+
+ elif current.right is previous: # Moving up from right
+ previous = current
+ if viewpoint[current.split.dim] <= current.split.pos:
+ if current.left is not None:
+ current = current.left
+ else:
+ current = current.parent
+ else:
+ current = current.parent
+
+ elif current.left is previous: # Moving up from left child
+ previous = current
+ if viewpoint[current.split.dim] > current.split.pos:
+ if current.right is not None:
+ current = current.right
+ else:
+ current = current.parent
+ else:
+ current = current.parent
+
+ return current, previous
+
+
+def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+ mylog.debug( 'Receiving image from %04i' % incoming_rank)
+ #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+ arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+ (image.shape[0], image.shape[1], image.shape[2]))
+
+ if add_to_front:
+ front = arr2
+ back = image
+ else:
+ front = image
+ back = arr2
+
+ if image.shape[2] == 3:
+ # Assume Projection Camera, Add
+ np.add(image, front, image)
+ return image
+
+ ta = 1.0 - front[:,:,3]
+ np.maximum(ta, 0.0, ta)
+ # This now does the following calculation, but in a memory
+ # conservative fashion
+ # image[:,:,i ] = front[:,:,i] + ta*back[:,:,i]
+ image = back.copy()
+ for i in range(4):
+ np.multiply(image[:,:,i], ta, image[:,:,i])
+ np.add(image, front, image)
+ return image
+
+def send_to_parent(comm, outgoing_rank, image):
+ mylog.debug( 'Sending image to %04i' % outgoing_rank)
+ comm.send_array(image, outgoing_rank, tag=comm.rank)
+
+def scatter_image(comm, root, image):
+ mylog.debug( 'Scattering from %04i' % root)
+ image = comm.mpi_bcast(image, root=root)
+ return image
+
+def find_node(node, pos):
+ """
+ Find the AMRKDTree node enclosing a position
+ """
+ assert(np.all(node.left_edge <= pos))
+ assert(np.all(node.right_edge > pos))
+ while not kd_is_leaf(node):
+ if pos[node.split.dim] < node.split.pos:
+ node = node.left
+ else:
+ node = node.right
+ return node
+
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/c58fbed2957c/
changeset: c58fbed2957c
branch: yt
user: chummels
date: 2013-02-15 02:43:05
summary: Added a comment line to header to point to docstring example of how to use full framework.
affected #: 1 file
diff -r 9c01dfb3728087f035421a1fbf90bb93e0243f1f -r c58fbed2957c8cac9e4c690880cf511835091925 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -44,6 +44,8 @@
# 8. Parentage is described by a fraction of particles that pass from one to
# the other; we have both descendent fractions and ancestory fractions.
+# plot_halo_evolution gives a good full example of how to use the framework
+
import numpy as np
import h5py
import time
https://bitbucket.org/yt_analysis/yt/commits/533f5ddb9629/
changeset: 533f5ddb9629
branch: yt
user: chummels
date: 2013-02-15 18:41:53
summary: Adding mkdir_rec function for recursively creating subdirs if specified an arbitrary path with n-levels of dirs deep, it creates all of those needed.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/91b1119ef926/
changeset: 91b1119ef926
branch: yt
user: chummels
date: 2013-02-15 19:15:32
summary: Assuring that halo_list outputs directed to an arbitrary path won't fail, as it creates the arbitrary path if it doesn't exist.
affected #: 2 files
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/aba16d47f041/
changeset: aba16d47f041
branch: yt
user: chummels
date: 2013-02-15 20:52:21
summary: Adding in feature for passing redshift information through halo finding process and outputting as comment to text file when using .write_out() function. Also adding functions for limiting redshift/cycle range in halo_merger_tree.
affected #: 2 files
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/37c784260b3d/
changeset: 37c784260b3d
branch: yt
user: chummels
date: 2013-02-15 21:51:21
summary: Getting halo_merger_tree to be a bit more lax on how it deals with i/o. Allowing user to specify arbitrary FOF directory where all files are read/written.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/4c03af8f10b4/
changeset: 4c03af8f10b4
branch: yt
user: chummels
date: 2013-02-15 22:09:46
summary: Making sure all of the halo_merger_tree files are read/written to user-defined FOF_directory.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/1f8602ec3b59/
changeset: 1f8602ec3b59
branch: yt
user: chummels
date: 2013-02-15 22:13:27
summary: Correcting syntax thanks to mjturk.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/1c455dfdf6ad/
changeset: 1c455dfdf6ad
branch: yt
user: chummels
date: 2013-02-15 22:15:57
summary: Changing print statements to mylog.info statements.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/4d3f4edb1936/
changeset: 4d3f4edb1936
branch: yt
user: chummels
date: 2013-02-15 22:20:44
summary: Moving all docstrings to below the class instead of below __init__ func.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/5304804e9366/
changeset: 5304804e9366
branch: yt
user: chummels
date: 2013-02-15 22:26:50
summary: Fixing docstrings in halo_objects.py to precede __init__ func calls.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/87ea74c6f95b/
changeset: 87ea74c6f95b
branch: yt
user: chummels
date: 2013-02-15 23:13:11
summary: Fixing bug with dying on halo lineages where a progenitor is too small. Also making accurate recipe in docstrings.
affected #: 1 file
Diff not available.
https://bitbucket.org/yt_analysis/yt/commits/19b12255a015/
changeset: 19b12255a015
branch: yt
user: chummels
date: 2013-02-15 23:18:11
summary: Merging.
affected #: 31 files
Diff not available.
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list