[Yt-svn] yt-commit r624 - in trunk: scripts yt/lagos/hop

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Wed Jun 25 13:11:07 PDT 2008


Author: mturk
Date: Wed Jun 25 13:11:06 2008
New Revision: 624
URL: http://yt.spacepope.org/changeset/624

Log:
Added Stephen Skory's galaxy merger code.  Pretty cool stuff -- outputs
graphviz files.

For information on how to use it, see:

http://yt.enzotools.org/wiki/Recipes/GalaxyMerging

I might make some changes to fastBuildMerge (and I might rename it) but it
looks pretty solid to me now.



Added:
   trunk/scripts/fastBuildMerge.py
   trunk/yt/lagos/hop/Merger.py
Modified:
   trunk/yt/lagos/hop/__init__.py

Added: trunk/scripts/fastBuildMerge.py
==============================================================================
--- (empty file)
+++ trunk/scripts/fastBuildMerge.py	Wed Jun 25 13:11:06 2008
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+
+# Stephen Skory
+# sskory at physics.ucsd.edu
+# created August 2006
+# modified June 2008
+# this python script builds a script for creating halo merger trees using hop inside yt
+# see http://yt.enzotools.org/ for more info on yt and this script.
+
+# the output of GraphViz at the very will have boxes connected by arrows.
+# The arrow into a box is how much of the previous halo's DM & stars is
+# contributing to that halo.
+# Similarly the arrow leaving a box is how much of the DM & stars is leaving
+# that halo for the next one.
+# Inside the box, the top % is how much of the halo is accreted from the 
+# 'ether'; particles that in the previous time step were assigned to no group.
+# Similarly, the bottom % is how much of the halo leaves and is assigned to no
+# group in the next time step.
+# The middle of the box shows how many star/DM particles are in the halo,
+# and the (x,y,z) position of the halo's center.
+
+# to run this, set all the things below to your liking,
+# and then 'python fastBuildMerge.py > mergeScript.py
+# 'chmod u+x mergeScript.py'
+# './mergeScript.py'
+
+# the name of the python script
+name = "merger_yt"
+# the GraphViz file
+outfile = "157-120.dot"
+# the directory basename, no trailing slash needed
+# if a data dump is /path/to/enzo/data/DD0243/data0243, put /path/to/enzo/data/DD below
+dirbasename = "/path/to/enzo/data/DD"
+# usually DD, maybe RD, or data
+filebasename = "data"
+# the maximum number of groups analyzed at one time step
+hardmaxgroup = 2500
+# the hop density threshold used for grouping
+hopthreshold = 80.0
+# dm only, True or False
+dmonly = "True"
+
+# The range of data dumps you wish to operate upon. It's a good idea to manually see how far back
+# haloes exist before setting the end value.
+# start > end
+end = 120
+start = 157
+
+# A list of the haloes in the final data dump for which you want to find ancestors. Use the IDs provided
+# by hop, so you'll probably have to run hop on the final data dump before deciding which haloes to
+# put here, unless you know it's the largest halo, which is always ID=0. It can be a singleton.
+indices = [0,1]
+
+# change below to reflect where python is
+# it is important to use python >= 2.5 as previous versions have a memory bug
+# which affects this script, which is important if you're running large datasets.
+print "#!/usr/bin/env /Library/Frameworks/Python.framework/Versions/Current/bin/python"
+
+# ---------- you shouldn't have to change anything below. ---------
+# -----------------------------------------------------------------
+# -----------------------------------------------------------------
+
+print "import %s" % name
+print "import yt.raven as raven"
+print "import yt.lagos as lagos"
+print "import yt.lagos.hop as hop"
+print "from yt.lagos import mylog"
+
+print """
+def instantiateSnapshots(dirbasename,filebasename,start,end):
+    redshift = {}
+    snapshots = {}
+    for i in range(start-end):
+        j = start - i
+        file = "%s%04d/%s%04d" % (dirbasename,j,filebasename,j)
+        snapshot = lagos.EnzoStaticOutput(file)
+        redshift[j] = snapshot["CosmologyCurrentRedshift"]
+        # put the snapshot instance into the dict snapshots
+        snapshots[j] = snapshot
+    
+    string = "Read in %d snapshots." % len(redshift)
+    mylog.info(string)
+    return(snapshots,redshift)
+"""
+
+
+print "positions = {}"
+print "dirbasename = \"%s\"" % dirbasename
+print "filebasename = \"%s\"" % filebasename
+print "hardmaxgroup = %d" % hardmaxgroup
+print "indices = " + str(indices)
+print "start = %d" % start
+print "end = %d" % end
+print "dmonly = \"%s\"" % dmonly
+
+# first let's read in the redshifts, and instantiate the snapshots
+print "(snapshots,redshift) = instantiateSnapshots(dirbasename,filebasename,start,end-1)"
+
+# open the dotfile
+print "fopen = open('%s', 'w')" % outfile
+
+# write the preambles for the dotfile
+print "%s.writeTop(fopen)" % name
+print "%s.writeNode(fopen)" % name
+
+print "\n"
+
+# loop over all the snapshots
+for i in range(start-end):
+    snapshot = start - i
+    # snapshots should be in time-reverse order, so this loop will count *down*, so the next snapshot
+    # to load has a lower index
+    nextsnapshot = snapshot-1
+    # if we're at the bottom, we're done!
+    if (nextsnapshot < end):
+        break
+    
+    # we need to run hop on the snapshot
+    print "sphere = snapshots[%d].h.sphere([0.5,0.5,0.5],1.0)" % nextsnapshot
+    print "hop_results = lagos.hop.HopList(sphere, %f, dm_only=dmonly)" % hopthreshold
+    
+    # get the group info for the next snapshot
+    # get group positions
+    print "positions = %s.convertPositions(hop_results,%d,positions,hardmaxgroup)" % (name,nextsnapshot)
+    # get particle IDs
+    print "g%04d = %s.convertGroups(hop_results,%d,hardmaxgroup)" % (nextsnapshot,name,nextsnapshot)
+    # sort the particles in each group
+    print "for g in g%04d: g.particles.sort()" % nextsnapshot
+    # delete the sphere which contains all the particles, to clear up memory
+    print "del sphere"
+    # delete hop_results now that we're done with them, too
+    print "del hop_results\n"
+    
+    # in the beginning we need to read in two groups before we build the links, so do everything above
+    # again
+    if (snapshot==start):
+        print "sphere = snapshots[%d].h.sphere([0.5,0.5,0.5],1.0)" % snapshot
+        print "hop_results = lagos.hop.HopList(sphere, %f, dm_only=dmonly)" % hopthreshold
+        print "positions = %s.convertPositionsSelected(hop_results,%d,positions,indices)" % (name,snapshot)
+        print "g%04d = %s.convertGroupsSelected(hop_results,%d,indices)" % (snapshot,name,snapshot)
+        print "for g in g%04d: g.particles.sort()\n" % snapshot
+        print "del sphere"
+        print "del hop_results"
+    
+    
+    # for just the last group (in time), a singleton, usually.
+    if (snapshot==start):
+        print "for g in g%04d: g.flag = 1\n" % start
+    
+    # build links which requires two neighboring groups at once.
+    print "(links%04d%04d,g%04d,g%04d) = %s.buildLinks(g%04d,g%04d,positions)" % \
+    (nextsnapshot,snapshot,nextsnapshot,snapshot,name,nextsnapshot,snapshot)
+    
+    # write out levels which keeps the boxes on one line rather than put how graphviz wants aesthetically
+    
+    print "%s.writeLevels(fopen, g%04d,redshift[%d])" % (name,nextsnapshot,nextsnapshot)
+    if (snapshot==start):
+        print "%s.writeLevels(fopen, g%04d,redshift[%d])" % (name,snapshot,snapshot)
+    
+    # print out label stuff
+    
+    print "%s.writeOpen(fopen)" % name
+    
+    # for the first (last in time) halo, write out the box labels using style 2
+    if (snapshot==start):
+        print "%s.writeLabels(fopen, g%04d, positions, 2)" % (name,snapshot)
+    # middle time steps get type 1
+    if (snapshot!=start):
+        print "%s.writeLabels(fopen, g%04d, positions, 1)" % (name,snapshot)
+    # the top, first in time get type 0
+    if (nextsnapshot==end):
+        print "%s.writeLabels(fopen, g%04d, positions, 0)" % (name,nextsnapshot)
+    
+    print "%s.writeClose(fopen)" % name
+    
+    print "%s.writeOpen(fopen)" % name
+    print "%s.writeLinks(fopen,links%04d%04d, g%04d, g%04d)" % (name,nextsnapshot,snapshot,nextsnapshot,snapshot)
+    print "%s.writeClose(fopen)" % name
+
+    print "for g in g%04d: del g.particles\n\n" % snapshot
+
+# close the dotfile
+print "%s.writeClose(fopen)" % name
+print "fopen.close()"

Added: trunk/yt/lagos/hop/Merger.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/hop/Merger.py	Wed Jun 25 13:11:06 2008
@@ -0,0 +1,421 @@
+# Merger.py. Written by Stephen Skory & Rick Wagner, August 2006.
+
+# figures the 'family tree,' and outputs in GraphViz format, the galaxy
+# merger history for a series of files that list the particles & the group that
+# they are assigned to.
+
+# this file only contains functions that are used by the output of fastBuildMerge.py.
+
+# June 2008, converted/upgraded for use in Matthew Turk's enzo analyzer yt by Stephen Skory
+
+# define the Group class for the list of groups, which contains
+# the group id, a list of the particles in that group, the numerical
+# order of the group, the percentage of the groups particles that go to the 
+# ether (no group), and the percentage of the group that comes from no group.
+class Group:
+
+
+    def __init__(self, id, particles, orderIndex, toEther, fromEther, flag):
+        self.id = id
+        self.particles = particles
+        self.orderIndex = orderIndex
+        # these are fields to track how much of each group comes and goes to the
+        # ether
+        self.toEther = toEther
+        self.fromEther = fromEther
+        # the flag says whether or not this group is linked to the final,
+        # interesting group(s).
+        self.flag = flag
+
+# the heavy lifting. This takes in a parentGroup and sees how many of its particles
+# go to the childGroup.
+# DEPRECIATED, replaced by pushListCount. I've left it here because it's still functional and
+# a lot simpler to understand than pushListCount. If you want to use it, uncomment out the line
+# where it's used in buildLinks, and comment out the pushListCount line. This is a whole bunch slower
+# than pushListCount, so I don't know why you would want to do that.
+
+    def isParent(self, childGroup):
+
+        n = 0
+        for particleNumber in self.particles:
+            if childGroup.particles.count(particleNumber):
+                #return True
+                n+=1
+
+        return n
+
+# this function takes two lists which have both been sorted in increasing order. This is a way to
+# take the O(n^2) matching and make it roughly O(2n). Since most of the time most particles go from
+# one group to the next, a step down one list will get the same next particle in the other list, so
+# most of the time we can move down both lists simultaneously. When one entry is bigger than the other,
+# we use findNextMatch to see how far to move the second list until it matches the first, or 
+# surpasses the first (in which case we then move the first list until it catches up with the second, 
+# ad naseum).
+def pushListCount(list1, list2):
+
+
+    count = 0
+    i = 0
+    j = 0
+    
+    while (i < len(list1)) and (j < len(list2)):
+        #print 'list1['+str(i)+']='+str(list1[i])+' list2['+str(j)+']='+str(list2[j])
+        if list1[i] == list2[j]:
+            i += 1
+            j += 1
+            count += 1
+            #print 'Matched first try'
+        else:
+            if (i <= len(list1) -1) and (j <= len(list2) -1):
+                value = min(list1[i], list2[j])
+                                #print str(value)
+                if value == list1[i]:
+                    #print 'First list smaller'
+                    nextindex = findNextMatch(i+1, list2[j], list1)
+                    if nextindex > -1:
+                        if nextindex == 1.5: # special case
+                            j += 1
+                            i += 1
+                        else:
+                            i = nextindex + 1
+                            j += 1
+                            count += 1
+                    if nextindex < -1:
+                        i = -1 * nextindex
+                        j += 1
+                    if (j == len(list2)) or (i == len(list1)):
+                        break
+                    if nextindex == -1:
+                        #print "Got minus one on second list, nextindex= " + str(nextindex)
+                        break
+
+                if value == list2[j]:
+                    #print 'Second list smaller'
+                    nextindex = findNextMatch(j+1, list1[i], list2)
+                    if nextindex > -1:
+                        if nextindex == 1.5: # special case
+                            j += 1
+                            i +=1
+                        else:
+                            j = nextindex +1
+                            i += 1
+                            count += 1
+                    if nextindex < -1:
+                        j = -1 * nextindex
+                        i += 1
+                    if nextindex == 1:
+                        j = nextindex + 1
+                        i += 1
+                    if (j == len(list2)) or (i == len(list1)):
+                        break
+                    if nextindex == -1:
+                        #print "Got minus one on first list, nextindex= " + str(nextindex)
+                        break
+        #print 'count: ' + str(count)
+
+    return count
+
+def findNextMatch(startIndex, value, thislist):
+    #print "looking for " + str(value) + ', startindex= ' + str(startIndex)
+    outval = -1
+    for i in range(startIndex, len(thislist)):
+        #print 'thislist[' + str(i) +'] = ' +str(thislist[i])
+        if thislist[i] == value:
+            outval = i
+            break
+        if thislist[i] > value:
+            outval = -1 * i
+            if outval == -1: # this in case i=1, which isn't a problem unless outval=-1 *is* the value we want
+                         # this happens if the second item in each list matches
+                outval = 1.5
+            break
+    #print 'found match for ' + str(value) + ' at index: ' + str(outval)
+
+    return outval
+    
+
+
+# add yt.hop particles into the Group structure
+def convertGroups(snapshot,orderIndex,hardmaxgroup):
+    from yt.lagos import mylog
+    # ytHopResults is the stuff from yt containing the results of hop.
+    # orderIndex is the index for this snapshot
+    # hardmaxgroup is some reasonable limit for the number of galaxies looked at for each
+    # time step.
+    
+    groups = []
+    particles = []
+    # set up the percent fields for later...
+    toEther = 100
+    fromEther = 100
+    pcount = 0 # counts how many particles are groups for that particular time step
+    flag = 0 # zero means not assoc with final group, one means yes.
+    
+    # loop over the groups
+    for i,group in enumerate(snapshot):
+        # don't go past hardmaxgroup
+        if (i >= hardmaxgroup):
+            break
+        # add it to our groups as a Group
+        groups.append(Group(i,group["particle_index"],orderIndex,toEther,fromEther,flag))
+        pcount += len(group["particle_index"])
+    
+    string = "There are " + str(pcount) + " particles in orderIndex " + str(orderIndex) + "."
+    mylog.info(string)
+    return(groups)
+
+
+# from the yt.hop output, add to the positions dict the central position and group index for this
+# snapshot
+def convertPositions(snapshot,orderIndex,positions,hardmaxgroup):
+    from yt.lagos import mylog
+    # loop over the hop groups
+    for i,group in enumerate(snapshot):
+        # if we're trying to read in too many groups, we're done so we leave
+        if (i >= hardmaxgroup):
+            break
+        center = group.maximum_density_location()
+        positions[orderIndex,i] = (center[0],center[1],center[2])
+    
+    string = 'There are ' + str(len(snapshot)) + ' groups in orderIndex ' + str(orderIndex)
+    mylog.info(string)
+    
+    return (positions)
+
+# add yt.hop particles into the Group structure, but for selected groups, for the 
+# last time step, which is the first inspected.
+def convertGroupsSelected(snapshot,orderIndex,indices):
+    from yt.lagos import mylog
+    # ytHopResults is the stuff from yt containing the results of hop.
+    # orderIndex is the index for this snapshot
+    # hardmaxgroup is some reasonable limit for the number of galaxies looked at for each
+    # time step.
+    
+    groups = []
+    particles = []
+    # set up the percent fields for later...
+    toEther = 100
+    fromEther = 100
+    pcount = 0 # counts how many particles are groups for that particular time step
+    flag = 0 # zero means not assoc with final group, one means yes.
+    
+    # loop over the selected groups
+    for index in indices:
+        # pick out the one group
+        group = snapshot[index]
+        
+        # add it to our groups as a Group
+        groups.append(Group(index,group["particle_index"],orderIndex,toEther,fromEther,flag))
+        pcount += len(group["particle_index"])
+    
+    string = "There are " + str(pcount) + " particles in orderIndex " + str(orderIndex) + "."
+    mylog.info(string)
+    return(groups)
+
+# from the yt.hop output, add to the positions dict the central position and group index for this
+# snapshot, but only chosen groups, which is what we want at the final data dump
+def convertPositionsSelected(snapshot,orderIndex,positions,indices):
+    from yt.lagos import mylog
+    
+    # loop over the selected groups
+    for index in indices:
+        group = snapshot[index]
+        center = group.maximum_density_location()
+        positions[orderIndex,index] = (center[0],center[1],center[2])
+    
+    string = 'There are ' + str(len(indices)) +' groups in orderIndex ' + str(orderIndex)
+    mylog.info(string)
+    
+    return (positions)
+
+
+
+# this does three things:
+# 1. Builds the links list, which contains the parentGroup.id and childGroup.id
+# of the matched pair, along with the number of particles that are transferred.
+# 2. Calculates what percentage of the parentGroup is transferred to the child
+# groups. This is then subtracted from 100 to give how much of the group goes to
+# the ether.
+# 3. Similarly, calculates what percentage of the child group comes from the
+# ether.
+def buildLinks(parentGroups, childGroups,positions):
+    from yt.lagos import mylog
+    links = []
+    childPercents = {}
+    
+    for parentGroup in parentGroups:
+        parentPercent = 0
+        for childGroup in childGroups:
+            number = 0 # this will be the number of particles that go from the parent
+            # group to the child group.
+            # Below we calculate the distance between the groups, taking the periodic
+            # boundary conditions into account.
+            dist_x = abs(positions[parentGroup.orderIndex,parentGroup.id][0] - positions[childGroup.orderIndex,childGroup.id][0])
+            if dist_x > 0.5:
+                dist_x = 1 - dist_x
+            dist_y = abs(positions[parentGroup.orderIndex,parentGroup.id][1] - positions[childGroup.orderIndex,childGroup.id][1])
+            if dist_y > 0.5:
+                dist_y = 1 - dist_y
+            dist_z = abs(positions[parentGroup.orderIndex,parentGroup.id][2] - positions[childGroup.orderIndex,childGroup.id][2])
+            if dist_z > 0.5:
+                dist_z = 1 - dist_z
+            dist = (dist_x**2 + dist_y**2 + dist_z**2)**0.5
+            # If the childgroup isn't flagged, or if the two groups are too far away,
+            # the groups don't get checked; number stays equal to zero; and
+            # we move to the next parentgroup.
+            if (childGroup.flag == 1) and (dist <= 0.1):
+                string = 'checking parentGroup (' + str(len(parentGroup.particles)) + '): ' + str(parentGroup.orderIndex) + '.' + str(parentGroup.id) + \
+                ' with childGroup (' + str(len(childGroup.particles)) + '): ' + str(childGroup.orderIndex) + '.' + str(childGroup.id)
+                mylog.info(string)
+                #number = parentGroup.isParent(childGroup)
+                number = pushListCount(parentGroup.particles,childGroup.particles)
+                #string = 'number: ' + str(number)
+                #mylog.info(string)
+            # create an entry in the childPercents dictionary if it's not already
+            # there
+            if childPercents.has_key(childGroup.id) != 1:
+                childPercents[childGroup.id] = 0
+            
+            # If the number of particles from the parentgroup goes to the child group,
+            # above some threshold, add this link to the links list; calculate how much
+            # of the parent group went to the child group and add that to any existing total
+            # of particles leaving the parent group; add the particles going to the child
+            # group to it's percentage of particles not coming from the ether; and finally
+            # flag the parentgroup as part of the lineage for future checking.
+            if (number/float(len(parentGroup.particles)))*100 > 50:
+                links.append([parentGroup.id,childGroup.id,number])
+                string = 'parentGroup: ' + str(parentGroup.orderIndex) + '.' + str(parentGroup.id) + \
+                ' | childGroup: ' + str(childGroup.orderIndex) + '.' + str(childGroup.id) + \
+                ' | %% transferred: %2.2f%%' % (number/float(len(parentGroup.particles)) * 100)
+                mylog.info(string)
+                parentPercent += (number/float(len(parentGroup.particles))*100)
+                childPercents[childGroup.id] += (number/float(len(childGroup.particles))*100)
+                parentGroup.flag = 1
+        # End loop over childGroups.
+        # if any of the particles from the parent group went to a child group,
+        # change the parent groups toEther to reflect this.
+        if parentPercent > 0:
+            parentPercent = 100 - parentPercent
+            parentGroup.toEther = parentPercent
+
+    # go over the childgroups and see how much of the particles came from
+    # the ether.
+    for childGroup in childGroups:
+        if childPercents[childGroup.id] > 0:
+            childPercents[childGroup.id] = 100 - childPercents[childGroup.id]
+            childGroup.fromEther = childPercents[childGroup.id]
+
+    return (links,parentGroups,childGroups)
+
+# The functions below just write out simple blocks that GraphViz needs.
+def writeTop(fopen):
+
+    line = 'digraph galaxy {size="40,40";\n'
+    fopen.write(line)
+
+def writeClose(fopen):
+
+    line = '};\n'
+    fopen.write(line)
+
+def writeOpen(fopen):
+
+    line = '{\n'
+    fopen.write(line)
+
+def writeNode(fopen):
+    
+    line = 'node [color=lightblue, style=bold, shape=record];\n'
+    fopen.write(line)
+
+# In order to keep the boxes for the haloes on the same level, GraphViz needs
+# to be told which boxes are all on the same level.
+# Mar 2007, added lines for redshift levels.
+def writeLevels(fopen, Groups, redshift):
+
+    line = '{ rank = same;\n'
+    fopen.write(line)
+
+    line = '"' + str(redshift) + '";'
+    fopen.write(line)
+    
+    for Group in Groups:
+        if (Group.toEther != 100) or (Group.fromEther != 100):
+            line = '"' + str(Group.orderIndex) + '.' + str(Group.id) + '";'
+            fopen.write(line)
+    
+    line = "\n};\n"
+    
+    fopen.write(line)
+
+    
+# Each box (node) has four bits of information:
+# 1. For all but the top nodes, the percentage of the particles that come from
+# the ether.
+# 2. The number of particles in it.
+# 3. The center of that group in (0,0,0)->(1,1,1) coordinates
+# 4. The all but the bottom notes, the percentage of the particles that go to 
+# the ether.
+
+def writeLabels(fopen, Groups, gPositions, switch):
+
+    # switch = 0: very top node, beginning of time
+    # switch = 1: intermediate node, many
+    # switch = 2: very bottom node, singular (usually), the last in time
+
+    groupLength = len(Groups)
+    
+    if switch == 0:
+        for Group in Groups:
+            if (Group.toEther != 100) or (Group.fromEther != 100):
+                color = 1 - float(Group.id)/float(groupLength)
+                line = '"' + str(Group.orderIndex) + '.' + str(Group.id) + '" [label="{' + str(len(Group.particles))
+                # write the position triplet for the group
+                line += '\\n(%1.3f,' % float(gPositions[Group.orderIndex,Group.id][0])
+                line += '%1.3f,' % float(gPositions[Group.orderIndex,Group.id][1])
+                line += '%1.3f)' % float(gPositions[Group.orderIndex,Group.id][2])
+                line += '| %2.2f%%}", shape="record",' % Group.toEther
+                line += 'color="%1.3f ' % color
+                line += '1.0 %1.3f"];\n' % color
+                fopen.write(line)
+        
+    if switch == 1:
+        for Group in Groups:
+            if (Group.toEther != 100) or (Group.fromEther != 100):
+                color = 1 - float(Group.id)/float(groupLength)
+                line = '"' + str(Group.orderIndex) + '.' + str(Group.id)
+                line += '" [label="{%2.2f%%| {' % Group.fromEther
+                line += str(len(Group.particles))
+                # write the position triplet for the group
+                line += '\\n(%1.3f,' % float(gPositions[Group.orderIndex,Group.id][0])
+                line += '%1.3f,' % float(gPositions[Group.orderIndex,Group.id][1])
+                line += '%1.3f)' % float(gPositions[Group.orderIndex,Group.id][2])
+                line +='}|%2.2f%%}", shape="record"' % Group.toEther
+                line += ', color="%1.3f ' % color
+                line += '1.0 %1.3f"];\n' % color
+                fopen.write(line)
+            
+    if switch == 2:
+        for Group in Groups:
+            if (Group.toEther != 100) or (Group.fromEther != 100):
+                color = 1 - float(Group.id)/float(groupLength)
+                line = '"' + str(Group.orderIndex) + '.' + str(Group.id)
+                line += '" [label="{%2.2f%%|' % Group.fromEther
+                line += str(len(Group.particles))
+                # write the position triplet for the group
+                line += '\\n(%1.3f,' % float(gPositions[Group.orderIndex,Group.id][0])
+                line += '%1.3f,' % float(gPositions[Group.orderIndex,Group.id][1])
+                line += '%1.3f)' % float(gPositions[Group.orderIndex,Group.id][2])
+                line += '}", shape="record", color="%1.3f ' % color
+                line += '1.0 %1.3f"];\n' % color
+                fopen.write(line)
+
+# write out the GraphViz links that connect two groups.
+def writeLinks(fopen, links, parentGroups, childGroups):
+
+    for parentGroup in parentGroups:
+        for link in links:
+            if str(parentGroup.id) == str(link[0]):
+                line = '"' + str(parentGroup.orderIndex) + '.' + str(link[0]) + '"->"' + str(parentGroup.orderIndex+1) + '.' + str(link[1])+\
+                '"[label="%2.2f%%",color="blue", fontsize=10];\n' % (link[2]/float(len(parentGroup.particles)) * 100)
+                fopen.write(line)

Modified: trunk/yt/lagos/hop/__init__.py
==============================================================================
--- trunk/yt/lagos/hop/__init__.py	(original)
+++ trunk/yt/lagos/hop/__init__.py	Wed Jun 25 13:11:06 2008
@@ -2,3 +2,4 @@
 
 from EnzoHop import *
 from HopOutput import *
+from Merger import *



More information about the yt-svn mailing list