[Yt-svn] yt-commit r1207 - trunk/yt/lagos/fof

sskory at wrangler.dreamhost.com sskory at wrangler.dreamhost.com
Thu Mar 12 21:14:45 PDT 2009


Author: sskory
Date: Thu Mar 12 21:14:44 2009
New Revision: 1207
URL: http://yt.spacepope.org/changeset/1207

Log:
added first revsion of a friends-of-friends implementation

Added:
   trunk/yt/lagos/fof/
   trunk/yt/lagos/fof/EnzoFOF.c
   trunk/yt/lagos/fof/FOF_Output.py
   trunk/yt/lagos/fof/README
   trunk/yt/lagos/fof/__config__.py
   trunk/yt/lagos/fof/__init__.py
   trunk/yt/lagos/fof/fof_main.c
   trunk/yt/lagos/fof/kd.c
   trunk/yt/lagos/fof/kd.h
   trunk/yt/lagos/fof/setup.py
   trunk/yt/lagos/fof/setup.pyc   (contents, props changed)
   trunk/yt/lagos/fof/tipsydefs.h

Added: trunk/yt/lagos/fof/EnzoFOF.c
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/EnzoFOF.c	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,203 @@
+/************************************************************************
+* Copyright (C) 2008-2009 Matthew Turk.  All Rights Reserved.
+*
+* This file is part of yt.
+*
+* yt is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 3 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*
+************************************************************************/
+
+//
+// EnzoFOF
+//   A module for running friends-of-friends halo finding on a set of particles
+//
+
+#include "Python.h"
+#include <stdio.h>
+#include <math.h>
+#include <signal.h>
+#include <ctype.h>
+#include "kd.h"
+#include "tipsydefs.h"
+
+#include "numpy/ndarrayobject.h"
+
+
+static PyObject *_FOFerror;
+
+static PyObject *
+Py_EnzoFOF(PyObject *obj, PyObject *args)
+{
+    PyObject    *oxpos, *oypos, *ozpos;
+
+    PyArrayObject    *xpos, *ypos, *zpos;
+    xpos=ypos=zpos=NULL;
+    float link = 0.2;
+
+    int i;
+
+    if (!PyArg_ParseTuple(args, "OOO|f",
+        &oxpos, &oypos, &ozpos, &link))
+    return PyErr_Format(_FOFerror,
+            "EnzoFOF: Invalid parameters.");
+
+    /* First the regular source arrays */
+
+    xpos    = (PyArrayObject *) PyArray_FromAny(oxpos,
+                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if(!xpos){
+    PyErr_Format(_FOFerror,
+             "EnzoFOF: xpos didn't work.");
+    goto _fail;
+    }
+    int num_particles = PyArray_SIZE(xpos);
+
+    ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
+                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if((!ypos)||(PyArray_SIZE(ypos) != num_particles)) {
+    PyErr_Format(_FOFerror,
+             "EnzoFOF: xpos and ypos must be the same length.");
+    goto _fail;
+    }
+
+    zpos    = (PyArrayObject *) PyArray_FromAny(ozpos,
+                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
+                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+    if((!zpos)||(PyArray_SIZE(zpos) != num_particles)) {
+    PyErr_Format(_FOFerror,
+             "EnzoFOF: xpos and zpos must be the same length.");
+    goto _fail;
+    }
+
+    /* let's get started with the FOF stuff */
+
+	KD kd;
+	int nBucket,j;
+	float fPeriod[3],fEps;
+	int nMembers,nGroup,bVerbose=1;
+	int sec,usec;
+	
+	/* linking length */
+	fprintf(stdout, "Link length is %f\n", link);
+	fEps = link;
+	
+	nBucket = 16;
+	nMembers = 8;
+
+	for (j=0;j<3;++j) fPeriod[j] = 1.0;
+
+    /* initialize the kd FOF structure */
+
+	kdInit(&kd,nBucket,fPeriod);
+	
+	/* kdReadTipsy(kd,stdin,bDark,bGas,bStar); */
+
+ 	/* Copy positions into kd structure. */
+
+    fprintf(stdout, "Filling in %d particles\n", num_particles);
+    kd->nActive = num_particles;
+	kd->p = (PARTICLE *)malloc(kd->nActive*sizeof(PARTICLE));
+	assert(kd->p != NULL);
+	for (i = 0; i < num_particles; i++) {
+	  kd->p[i].iOrder = i;
+	  kd->p[i].r[0] = (float)(*(npy_float64*) PyArray_GETPTR1(xpos, i));
+	  kd->p[i].r[1] = (float)(*(npy_float64*) PyArray_GETPTR1(ypos, i));
+	  kd->p[i].r[2] = (float)(*(npy_float64*) PyArray_GETPTR1(zpos, i));
+	}
+	
+	kdBuildTree(kd);
+	kdTime(kd,&sec,&usec);
+	nGroup = kdFoF(kd,fEps);
+	kdTime(kd,&sec,&usec);
+	if (bVerbose) printf("Number of initial groups:%d\n",nGroup);
+	nGroup = kdTooSmall(kd,nMembers);
+	if (bVerbose) {
+		printf("Number of groups:%d\n",nGroup);
+		printf("FOF CPU TIME: %d.%06d secs\n",sec,usec);
+		}
+	kdOrder(kd);
+
+	/* kdOutGroup(kd,ach); */
+	
+    // Now we need to get the groupID, realID.
+    // This will give us the index into the original array.
+    // Additionally, note that we don't really need to tie the index
+    // back to the ID in this code, as we can do that back in the python code.
+    // All we need to do is group information.
+    
+    // Tags are in kd->p[i].iGroup
+    PyArrayObject *particle_group_id = (PyArrayObject *)
+            PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
+                    PyArray_DescrFromType(NPY_INT32));
+    
+    for (i = 0; i < num_particles; i++) {
+      // group tag is in kd->p[i].iGroup
+      *(npy_int32*)(PyArray_GETPTR1(particle_group_id, i)) =
+            (npy_int32) kd->p[i].iGroup;
+    }
+
+	kdFinish(kd);
+
+    PyArray_UpdateFlags(particle_group_id, NPY_OWNDATA | particle_group_id->flags);
+    PyObject *return_value = Py_BuildValue("N", particle_group_id);
+
+    Py_DECREF(xpos);
+    Py_DECREF(ypos);
+    Py_DECREF(zpos);
+
+    /* We don't need this, as it's done in kdFinish
+    if(kd->p!=NULL)free(kd->p);
+    */
+
+    return return_value;
+
+_fail:
+    Py_XDECREF(xpos);
+    Py_XDECREF(ypos);
+    Py_XDECREF(zpos);
+
+    if(kd->p!=NULL)free(kd->p);
+
+    return NULL;
+
+}
+
+static PyMethodDef _FOFMethods[] = {
+    {"RunFOF", Py_EnzoFOF, METH_VARARGS},
+    {NULL, NULL} /* Sentinel */
+};
+
+/* platform independent*/
+#ifdef MS_WIN32
+__declspec(dllexport)
+#endif
+
+void initEnzoFOF(void)
+{
+    PyObject *m, *d;
+    m = Py_InitModule("EnzoFOF", _FOFMethods);
+    d = PyModule_GetDict(m);
+    _FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL);
+    PyDict_SetItemString(d, "error", _FOFerror);
+    import_array();
+}
+
+/*
+ * Local Variables:
+ * mode: C
+ * c-file-style: "python"
+ * End:
+ */

Added: trunk/yt/lagos/fof/FOF_Output.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/FOF_Output.py	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,315 @@
+"""
+FOF-output data handling
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Stephen Skory <stephenskory at yahoo.com>
+Affiliation: UCSD Physics/CASS
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2008-2009 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.lagos.fof import *
+
+class FOFList(object):
+    def __init__(self, data_source, link=0.2,
+                 dm_only = True):
+        """
+        Run fof on *data_source* with a given link length *link*.  If
+        *dm_only* is set, only run it on the dark matter particles, otherwise
+        on all particles. It's probably best to not run on star particles because
+        friends-of-friends doesn't account for mass. 
+        Returns an iterable collection of *FOFGroup* items.
+        """
+        self.data_source = data_source
+        self.dm_only = dm_only
+        self.link = link
+        self._groups = []
+        mylog.info("Initializing FOF")
+        self.__obtain_particles()
+        self.__run_fof()
+        mylog.info("Parsing outputs")
+        self.__parse_output()
+        mylog.debug("Finished. (%s)", len(self))
+
+    def __obtain_particles(self):
+        if self.dm_only: ii = self.__get_dm_indices()
+        else: ii = slice(None)
+        self.particle_fields = {}
+        for field in ["particle_position_%s" % ax for ax in 'xyz']:
+            tot_part = self.data_source[field].size
+            self.particle_fields[field] = self.data_source[field][ii]
+        self._base_indices = na.arange(tot_part)[ii]
+
+    def __run_fof(self):
+        # this needs to be done in a better way!!!
+        adjust = len(self.particle_fields["particle_position_x"])**(1./3.)
+        self.tags = \
+            RunFOF(self.particle_fields["particle_position_x"],
+                   self.particle_fields["particle_position_y"],
+                   self.particle_fields["particle_position_z"],
+                   self.link/adjust)
+        self.particle_fields["tags"] = self.tags
+
+    def __get_dm_indices(self):
+        if 'creation_time' in self.data_source.hierarchy.field_list:
+            mylog.debug("Differentiating based on creation time")
+            return (self.data_source["creation_time"] < 0)
+        elif 'particle_type' in self.data_source.hierarchy.field_list:
+            mylog.debug("Differentiating based on particle type")
+            return (self.data_source["particle_type"] == 1)
+        else:
+            mylog.warning("No particle_type, no creation_time, so not distinguishing.")
+            return slice(None)
+
+    def __parse_output(self):
+        unique_ids = na.unique(self.tags)
+        counts = na.bincount(self.tags+1)
+        sort_indices = na.argsort(self.tags)
+        grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
+        cp = 0
+        for i in unique_ids:
+            cp_c = cp + counts[i+1]
+            if i == -1:
+                cp += counts[i+1]
+                continue
+            group_indices = grab_indices[cp:cp_c]
+            self._groups.append(FOFGroup(self, i, group_indices))
+            px, py, pz = [self.particle_fields['particle_position_%s'%ax][group_indices]
+                                            for ax in 'xyz']
+            cp += counts[i+1]
+
+    def __len__(self):
+        return len(self._groups)
+ 
+    def __iter__(self):
+        return FOFIterator(self)
+
+    def __getitem__(self, key):
+        return self._groups[key]
+
+    def write_out(self, filename="FOFAnalysis.out"):
+        """
+        Write out standard FOF information to *filename*.
+        """
+        if hasattr(filename, 'write'):
+            f = filename
+        else:
+            f = open(filename,"w")
+        f.write("\t".join(["# Group","Mass","# part",
+                           "center-of-mass",
+                           "x","y","z",
+                           "vx","vy","vz","max_r","\n"]))
+        for group in self:
+            f.write("%10i\t" % group.id)
+            f.write("%0.9e\t" % group.total_mass())
+            f.write("%10i\t" % group.get_size())
+            f.write("\t".join(["%0.9e" % v for v in group.center_of_mass()]))
+            f.write("\t")
+            f.write("\t".join(["%0.9e" % v for v in group.bulk_velocity()]))
+            f.write("\t")
+            f.write("%0.9e\t" % group.maximum_radius())
+            f.write("\n")
+            f.flush()
+        f.close()
+
+class FOFIterator(object):
+    def __init__(self, fof):
+        self.fof = fof
+        self.index = -1
+
+    def next(self):
+        self.index += 1
+        if self.index == len(self.fof): raise StopIteration
+        return self.fof[self.index]
+
+class FOFGroup(object):
+    """
+    A data source that returns particle information about the members of a
+    FOF-identified halo.
+    """
+    __metaclass__ = ParallelDummy # This will proxy up our methods
+    _distributed = False
+    _processing = False
+    _owner = 0
+    indices = None
+    dont_wrap = ["get_sphere", "write_particle_list"]
+    extra_wrap = ["__getitem__"]
+
+    def __init__(self, fof_output, id, indices = None):
+        self.fof_output = fof_output
+        self.id = id
+        self.data = fof_output.data_source
+        if indices is not None: self.indices = fof_output._base_indices[indices]
+        # We assume that if indices = None, the instantiator has OTHER plans
+        # for us -- i.e., setting it somehow else
+
+    def center_of_mass(self):
+        """
+        Calculate and return the center of mass.
+        """
+        pm = self["ParticleMassMsun"]
+        cx = self["particle_position_x"]
+        cy = self["particle_position_y"]
+        cz = self["particle_position_z"]
+        com = (pm * na.array([cx,cy,cz])).sum(axis=1)/pm.sum()
+        return com
+
+    def total_mass(self):
+        """
+        Returns the total mass in solar masses of the halo.
+        """
+        return self["ParticleMassMsun"].sum()
+
+    def bulk_velocity(self):
+        """
+        Returns the mass-weighted average velocity.
+        """
+        pm = self["ParticleMassMsun"]
+        vx = (self["particle_velocity_x"] * pm).sum()
+        vy = (self["particle_velocity_y"] * pm).sum()
+        vz = (self["particle_velocity_z"] * pm).sum()
+        return na.array([vx,vy,vz])/pm.sum()
+
+    def maximum_radius(self):
+        """
+        Returns the maximum radius in the halo for all particles,
+        either from the center_of_mass.
+        """
+        center = self.center_of_mass()
+        rx = na.abs(self["particle_position_x"]-center[0])
+        ry = na.abs(self["particle_position_y"]-center[1])
+        rz = na.abs(self["particle_position_z"]-center[2])
+        r = na.sqrt(na.minimum(rx, 1.0-rx)**2.0
+                +   na.minimum(ry, 1.0-ry)**2.0
+                +   na.minimum(rz, 1.0-rz)**2.0)
+        return r.max()
+
+    def __getitem__(self, key):
+        return self.data[key][self.indices]
+
+    def get_sphere(self):
+        """
+        Returns an EnzoSphere centered on 
+        the *center_of_mass*, with the maximum radius of the halo.
+        """
+        center = self.center_of_mass()
+        radius = self.maximum_radius()
+        # A bit of a long-reach here...
+        sphere = self.fof_output.data_source.hierarchy.sphere(
+                        center, radius=radius)
+        return sphere
+
+    def get_size(self):
+        return self.indices.size
+
+    def write_particle_list(self, handle):
+        self._processing = True
+        gn = "Halo%08i" % (self.id)
+        handle.createGroup("/", gn)
+        for field in ["particle_position_%s" % ax for ax in 'xyz'] \
+                   + ["particle_velocity_%s" % ax for ax in 'xyz'] \
+                   + ["particle_index"]:
+            handle.createArray("/%s" % gn, field, self[field])
+        n = handle.getNode("/", gn)
+        # set attributes on n
+        self._processing = False
+
+class FOFHaloFinder(FOFList, ParallelAnalysisInterface):
+    def __init__(self, pf, link=0.2, dm_only=True, padding=0.2):
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf["DomainRightEdge"] + pf["DomainLeftEdge"])/2.0
+        self.padding = padding #* pf["unitary"] # This should be clevererer
+        padded, LE, RE, self.data_source = self._partition_hierarchy_3d(padding=self.padding)
+        self.bounds = (LE, RE)
+        # reflect particles around the periodic boundary
+        self._reposition_particles((LE, RE))
+        self.data_source.get_data(["particle_velocity_%s" % ax for ax in 'xyz'] +    
+                                  ["particle_position_%s" % ax for ax in 'xyz'])
+        # here is where the FOF halo finder is run
+        super(FOFHaloFinder, self).__init__(self.data_source, link, dm_only)
+        self._parse_foflist()
+        self._join_foflists()
+
+    def _parse_foflist(self):
+        groups, com, hi  = [], {}, 0
+        LE, RE = self.bounds
+        for halo in self._groups:
+            this_com = halo.center_of_mass()
+            # if the center of mass is in the box, keep it
+            if na.all((this_com >= LE) & (this_com <= RE)):
+                # Now we add the halo information to OURSELVES, taken from the
+                # self.fof_list
+                groups.append(FOFGroup(self, hi))
+                groups[-1].indices = halo.indices
+                self._claim_object(groups[-1])
+                hi += 1
+        del self._groups # explicit >> implicit
+        self._groups = groups
+
+    def _join_foflists(self):
+        # First we get the total number of halos the entire collection
+        # has identified
+        # Note I have added a new method here to help us get information
+        # about processors and ownership and so forth.
+        # _mpi_info_dict returns a dict of {proc: whatever} where whatever is
+        # what is fed in on each proc.
+        mine, halo_info = self._mpi_info_dict(len(self))
+        nhalos = sum(halo_info.values())
+        # Figure out our offset
+        my_first_id = sum([v for k,v in halo_info.items() if k < mine])
+        # sort the list by the size of the groups
+        # Now we add ghost halos and reassign all the IDs
+        # Note: we already know which halos we own!
+        after = my_first_id + len(self._groups)
+        # One single fake halo, not owned, does the trick
+        self._groups = [FOFGroup(self, i) for i in range(my_first_id)] + \
+                       self._groups + \
+                       [FOFGroup(self, i) for i in range(after, nhalos)]
+        id = 0
+        for proc in sorted(halo_info.keys()):
+            for halo in self._groups[id:id+halo_info[proc]]:
+                halo.id = id
+                halo._distributed = self._distributed
+                halo._owner = proc
+                id += 1
+        self._groups.sort(key = lambda h: -1 * h.get_size())
+        
+    def _reposition_particles(self, bounds):
+        # This only does periodicity.  We do NOT want to deal with anything
+        # else.  The only reason we even do periodicity is the 
+        LE, RE = bounds
+        dw = self.pf["DomainRightEdge"] - self.pf["DomainLeftEdge"]
+        for i, ax in enumerate('xyz'):
+            arr = self.data_source["particle_position_%s" % ax]
+            arr[arr < LE[i]-self.padding] += dw[i]
+            arr[arr > RE[i]+self.padding] -= dw[i]
+
+    def write_out(self, filename):
+        f = self._write_on_root(filename)
+        FOFList.write_out(self, f)
+
+    @parallel_blocking_call
+    def write_particle_lists(self, prefix):
+        fn = "%s.h5" % self._get_filename(prefix)
+        f = tables.openFile(fn, "w")
+        for halo in self._groups:
+            if not self._is_mine(halo): continue
+            halo.write_particle_list(f)

Added: trunk/yt/lagos/fof/README
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/README	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,34 @@
+
+
+						FOF v1.1 
+
+			A Group Finder for N-body Simulations
+
+					October 26, 1994
+
+Changes from v1.0:
+	o 	Fixed bug in tree building, this bug only affected cases where
+		a very small "bucket" size was chosen and the number of particles
+		was not a power of two.
+
+Included are:
+	README
+	Makefile
+	cat1/fof.1
+	kd.c
+	kd.h
+	main.c
+	man1/fof.1
+	tipsydefs.h
+
+For detailed information read the man page (either cat1/fof.1 or
+man1/fof.1).
+
+To build:
+
+	> make
+
+To get further information contact:
+
+	hpccsoft at astro.washington.edu
+

Added: trunk/yt/lagos/fof/__config__.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/__config__.py	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,22 @@
+# This file is generated by /share/home/00649/tg457850/yt/src/yt-trunk-svn/setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+
+def get_info(name):
+    g = globals()
+    return g.get(name, g.get(name + "_info", {}))
+
+def show():
+    for name,info_dict in globals().items():
+        if name[0] == "_" or type(info_dict) is not type({}): continue
+        print name + ":"
+        if not info_dict:
+            print "  NOT AVAILABLE"
+        for k,v in info_dict.items():
+            v = str(v)
+            if k == "sources" and len(v) > 200:
+                v = v[:60] + " ...\n... " + v[-60:]
+            print "    %s = %s" % (k,v)
+        print
+    
\ No newline at end of file

Added: trunk/yt/lagos/fof/__init__.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/__init__.py	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,4 @@
+from yt.lagos import *
+
+from EnzoFOF import *
+from FOF_Output import *

Added: trunk/yt/lagos/fof/fof_main.c
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/fof_main.c	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,134 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <ctype.h>
+#include <assert.h>
+#include "kd.h"
+
+
+void usage(void)
+{
+	fprintf(stderr,"USAGE:\n");
+	fprintf(stderr,"fof -e <Linking Length>\n");
+	fprintf(stderr,"   [-m <nMinMembers>] [-dgs] [-v]\n");
+	fprintf(stderr,"   [-o <Output Name>] [-p <xyzPeriod>]\n");
+	fprintf(stderr,"   [-px <xPeriod>] [-py <yPeriod>] [-pz <zPeriod>]\n");
+	fprintf(stderr,"Input taken from stdin in tipsy binary format.\n");
+	fprintf(stderr,"SEE MAN PAGE: fof(1) for more information.\n");
+	exit(1);
+	}
+
+void main(int argc,char **argv)
+{
+	KD kd;
+	int nBucket,i,j;
+	char ach[80];
+	float fPeriod[3],fEps;
+	int bDark,bGas,bStar;
+	int nMembers,nGroup,bVerbose;
+	int sec,usec;
+	char *p;
+	
+   	nBucket = 16;
+	nMembers = 8;
+	bDark = 1;
+	bGas = 1;
+	bStar = 1;
+	bVerbose = 0;
+	strcpy(ach,"fof");
+	i = 1;
+	for (j=0;j<3;++j) fPeriod[j] = HUGE;
+	while (i < argc) {
+		if (!strcmp(argv[i],"-e")) {
+			++i;
+			fEps = atof(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-m")) {
+			++i;
+			nMembers = atoi(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-o")) {
+			++i;
+			strcpy(ach,argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-p")) {
+			++i;
+			fPeriod[0] = atof(argv[i]);
+			fPeriod[1] = atof(argv[i]);
+			fPeriod[2] = atof(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-px")) {
+			++i;
+			fPeriod[0] = atof(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-py")) {
+			++i;
+			fPeriod[1] = atof(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-pz")) {
+			++i;
+		    fPeriod[2] = atof(argv[i]);
+			++i;
+			}
+		else if (!strcmp(argv[i],"-v")) {
+			bVerbose = 1;
+			++i;
+			}
+		else if (*argv[i] == '-') {
+			p = argv[i];
+			++p;
+			if (*p == 'd' || *p == 'g' || *p == 's') {
+				bDark = 0;
+				bGas = 0;
+				bStar = 0;
+				}
+			else usage();
+			while (isalpha(*p)) {
+				switch (*p) {
+				case 'd':
+					bDark = 1;
+					break;
+				case 'g':
+					bGas = 1;
+					break;
+				case 's':
+					bStar = 1;
+					break;
+				default:
+					usage();
+					}
+				++p;
+				}
+			++i;
+			}
+		else usage();
+		}
+	kdInit(&kd,nBucket,fPeriod);
+	kdReadTipsy(kd,stdin,bDark,bGas,bStar);
+	kdBuildTree(kd);
+	kdTime(kd,&sec,&usec);
+	nGroup = kdFoF(kd,fEps);
+	kdTime(kd,&sec,&usec);
+	if (bVerbose) printf("Number of initial groups:%d\n",nGroup);
+	nGroup = kdTooSmall(kd,nMembers);
+	if (bVerbose) {
+		printf("Number of groups:%d\n",nGroup);
+		printf("FOF CPU TIME: %d.%06d secs\n",sec,usec);
+		}
+	kdOrder(kd);
+	strcat(ach,".grp");
+	kdOutGroup(kd,ach);
+	kdFinish(kd);
+	}
+
+
+
+
+

Added: trunk/yt/lagos/fof/kd.c
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/kd.c	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,444 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <assert.h>
+#include "kd.h"
+#include "tipsydefs.h"
+
+
+void kdTime(KD kd,int *puSecond,int *puMicro)
+{
+	struct rusage ru;
+
+	getrusage(0,&ru);
+	*puMicro = ru.ru_utime.tv_usec - kd->uMicro;
+	*puSecond = ru.ru_utime.tv_sec - kd->uSecond;
+	if (*puMicro < 0) {
+		*puMicro += 1000000;
+		*puSecond -= 1;
+		}
+	kd->uSecond = ru.ru_utime.tv_sec;
+	kd->uMicro = ru.ru_utime.tv_usec;
+	}
+
+
+int kdInit(KD *pkd,int nBucket,float *fPeriod)
+{
+	KD kd;
+	int j;
+
+	kd = (KD)malloc(sizeof(struct kdContext));
+	assert(kd != NULL);
+	kd->nBucket = nBucket;
+	for (j=0;j<3;++j) kd->fPeriod[j] = fPeriod[j];
+	kd->p = NULL;
+	kd->kdNodes = NULL;
+	*pkd = kd;
+	return(1);
+	}
+
+
+void kdReadTipsy(KD kd,FILE *fp,int bDark,int bGas,int bStar)
+{
+	int i,j,nCnt;
+	struct dump h;
+	struct gas_particle gp;
+	struct dark_particle dp;
+	struct star_particle sp;
+
+	fread(&h,sizeof(struct dump),1,fp);
+	kd->nParticles = h.nbodies;
+	kd->nDark = h.ndark;
+	kd->nGas = h.nsph;
+	kd->nStar = h.nstar;
+	kd->fTime = h.time;
+	kd->nActive = 0;
+	if (bDark) kd->nActive += kd->nDark;
+	if (bGas) kd->nActive += kd->nGas;
+	if (bStar) kd->nActive += kd->nStar;
+	kd->bDark = bDark;
+	kd->bGas = bGas;
+	kd->bStar = bStar;
+	/*
+	 ** Allocate particles.
+	 */
+	kd->p = (PARTICLE *)malloc(kd->nActive*sizeof(PARTICLE));
+	assert(kd->p != NULL);
+	/*
+	 ** Read Stuff!
+	 */
+	nCnt = 0;
+	for (i=0;i<h.nsph;++i) {
+		fread(&gp,sizeof(struct gas_particle),1,fp);
+		if (bGas) {
+			kd->p[nCnt].iOrder = nCnt;
+			for (j=0;j<3;++j) kd->p[nCnt].r[j] = gp.pos[j];
+			++nCnt;
+			}
+		}
+	for (i=0;i<h.ndark;++i) {
+		fread(&dp,sizeof(struct dark_particle),1,fp);
+		if (bDark) {
+			kd->p[nCnt].iOrder = nCnt;
+			for (j=0;j<3;++j) kd->p[nCnt].r[j] = dp.pos[j];
+			++nCnt;
+			}
+		}
+	for (i=0;i<h.nstar;++i) {
+		fread(&sp,sizeof(struct star_particle),1,fp);
+		if (bStar) {
+			kd->p[nCnt].iOrder = nCnt;
+			for (j=0;j<3;++j) kd->p[nCnt].r[j] = sp.pos[j];
+			++nCnt;
+			}
+		}
+	}
+
+
+void kdSelect(KD kd,int d,int k,int l,int r)
+{
+	PARTICLE *p,t;
+	double v;
+	int i,j;
+
+	p = kd->p;
+	while (r > l) {
+		v = p[k].r[d];
+		t = p[r];
+		p[r] = p[k];
+		p[k] = t;
+		i = l - 1;
+		j = r;
+		while (1) {
+			while (i < j) if (p[++i].r[d] >= v) break;
+			while (i < j) if (p[--j].r[d] <= v) break;
+			t = p[i];
+			p[i] = p[j];
+			p[j] = t;
+			if (j <= i) break;
+			}
+		p[j] = p[i];
+		p[i] = p[r];
+		p[r] = t;
+		if (i >= k) r = i - 1;
+		if (i <= k) l = i + 1;
+		}
+	}
+
+
+void kdCombine(KDN *p1,KDN *p2,KDN *pOut)
+{
+	int j;
+
+	/*
+	 ** Combine the bounds.
+	 */
+	for (j=0;j<3;++j) {
+		if (p2->bnd.fMin[j] < p1->bnd.fMin[j])
+			pOut->bnd.fMin[j] = p2->bnd.fMin[j];
+		else
+			pOut->bnd.fMin[j] = p1->bnd.fMin[j];
+		if (p2->bnd.fMax[j] > p1->bnd.fMax[j])
+			pOut->bnd.fMax[j] = p2->bnd.fMax[j];
+		else
+			pOut->bnd.fMax[j] = p1->bnd.fMax[j];
+		}
+	}
+
+
+void kdUpPass(KD kd,int iCell)
+{
+	KDN *c;
+	int l,u,pj,j;
+
+	c = kd->kdNodes;
+	if (c[iCell].iDim != -1) {
+		l = LOWER(iCell);
+		u = UPPER(iCell);
+		kdUpPass(kd,l);
+		kdUpPass(kd,u);
+		kdCombine(&c[l],&c[u],&c[iCell]);
+		}
+	else {
+		l = c[iCell].pLower;
+		u = c[iCell].pUpper;
+		for (j=0;j<3;++j) {
+			c[iCell].bnd.fMin[j] = kd->p[u].r[j];
+			c[iCell].bnd.fMax[j] = kd->p[u].r[j];
+			}
+		for (pj=l;pj<u;++pj) {
+			for (j=0;j<3;++j) {
+				if (kd->p[pj].r[j] < c[iCell].bnd.fMin[j])
+					c[iCell].bnd.fMin[j] = kd->p[pj].r[j];
+				if (kd->p[pj].r[j] > c[iCell].bnd.fMax[j])
+					c[iCell].bnd.fMax[j] = kd->p[pj].r[j];
+				}
+			}
+		}
+	}
+
+void kdBuildTree(KD kd)
+{
+	int l,n,i,d,m,j,diff;
+	KDN *c;
+	BND bnd;
+
+	n = kd->nActive;
+	kd->nLevels = 1;
+	l = 1;
+	while (n > kd->nBucket) {
+		n = n>>1;
+		l = l<<1;
+		++kd->nLevels;
+		}
+	kd->nSplit = l;
+	kd->nNodes = l<<1;
+	if (kd->kdNodes != NULL) free(kd->kdNodes);
+	kd->kdNodes = (KDN *)malloc(kd->nNodes*sizeof(KDN));
+	assert(kd->kdNodes != NULL);
+	/*
+	 ** Calculate Bounds.
+	 */
+	for (j=0;j<3;++j) {
+		bnd.fMin[j] = kd->p[0].r[j];
+		bnd.fMax[j] = kd->p[0].r[j];
+		}
+	for (i=1;i<kd->nActive;++i) {
+		for (j=0;j<3;++j) {
+			if (bnd.fMin[j] > kd->p[i].r[j]) 
+				bnd.fMin[j] = kd->p[i].r[j];
+			else if (bnd.fMax[j] < kd->p[i].r[j])
+				bnd.fMax[j] = kd->p[i].r[j];
+			}
+		}
+	/*
+	 ** Set up ROOT node
+	 */
+	c = kd->kdNodes;
+	c[ROOT].pLower = 0;
+	c[ROOT].pUpper = kd->nActive-1;
+	c[ROOT].bnd = bnd;
+	i = ROOT;
+	while (1) {
+		assert(c[i].pUpper - c[i].pLower + 1 > 0);
+		if (i < kd->nSplit && (c[i].pUpper - c[i].pLower) > 0) {
+			d = 0;
+			for (j=1;j<3;++j) {
+				if (c[i].bnd.fMax[j]-c[i].bnd.fMin[j] > 
+					c[i].bnd.fMax[d]-c[i].bnd.fMin[d]) d = j;
+				}
+			c[i].iDim = d;
+
+			m = (c[i].pLower + c[i].pUpper)/2;
+			kdSelect(kd,d,m,c[i].pLower,c[i].pUpper);
+
+			c[i].fSplit = kd->p[m].r[d];
+			c[LOWER(i)].bnd = c[i].bnd;
+			c[LOWER(i)].bnd.fMax[d] = c[i].fSplit;
+			c[LOWER(i)].pLower = c[i].pLower;
+			c[LOWER(i)].pUpper = m;
+			c[UPPER(i)].bnd = c[i].bnd;
+			c[UPPER(i)].bnd.fMin[d] = c[i].fSplit;
+			c[UPPER(i)].pLower = m+1;
+			c[UPPER(i)].pUpper = c[i].pUpper;
+			diff = (m-c[i].pLower+1)-(c[i].pUpper-m);
+			assert(diff == 0 || diff == 1);
+			i = LOWER(i);
+			}
+		else {
+			c[i].iDim = -1;
+			SETNEXT(i);
+			if (i == ROOT) break;
+			}
+		}
+	kdUpPass(kd,ROOT);
+	}
+
+
+int kdFoF(KD kd,float fEps)
+{
+	PARTICLE *p;
+	KDN *c;
+	int pi,pj,pn,cp;
+
+	int iGroup;
+
+	int *Fifo,iHead,iTail,nFifo;
+	float fEps2;
+	float dx,dy,dz,x,y,z,lx,ly,lz,sx,sy,sz,fDist2;
+
+	p = kd->p;
+	c = kd->kdNodes;
+	lx = kd->fPeriod[0];
+	ly = kd->fPeriod[1];
+	lz = kd->fPeriod[2];
+	fEps2 = fEps*fEps;
+	for (pn=0;pn<kd->nActive;++pn) p[pn].iGroup = 0;
+	nFifo = kd->nActive;
+	Fifo = (int *)malloc(nFifo*sizeof(int));
+	assert(Fifo != NULL);
+	iHead = 0;
+	iTail = 0;
+	iGroup = 0;
+	for (pn=0;pn<kd->nActive;++pn) {
+		if (p[pn].iGroup) continue;
+		++iGroup;
+		/*
+		 ** Mark it and add to the do-fifo.
+		 */
+		p[pn].iGroup = iGroup;
+		Fifo[iTail++] = pn;
+		if (iTail == nFifo) iTail = 0;
+		while (iHead != iTail) {
+			pi = Fifo[iHead++];
+			if (iHead == nFifo) iHead = 0;
+			/*
+			 ** Now do an fEps-Ball Gather!
+			 */
+			x = p[pi].r[0];
+			y = p[pi].r[1];
+			z = p[pi].r[2];
+			cp = ROOT;
+			while (1) {
+				INTERSECT(c,cp,fEps2,lx,ly,lz,x,y,z,sx,sy,sz);
+				/*
+				 ** We have an intersection to test.
+				 */
+				if (c[cp].iDim >= 0) {
+					cp = LOWER(cp);
+					continue;
+					}
+				else {
+					for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) {
+						if (p[pj].iGroup) continue;
+						dx = sx - p[pj].r[0];
+						dy = sy - p[pj].r[1];
+						dz = sz - p[pj].r[2];
+						fDist2 = dx*dx + dy*dy + dz*dz;
+						if (fDist2 < fEps2) {
+							/*
+							 ** Mark it and add to the do-fifo.
+							 */
+							p[pj].iGroup = iGroup;
+							Fifo[iTail++] = pj;
+							if (iTail == nFifo) iTail = 0;
+							}
+						}
+					SETNEXT(cp);
+					if (cp == ROOT) break;
+					continue;
+					}
+			ContainedCell:
+				for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) {
+					if (p[pj].iGroup) continue;
+					/*
+					 ** Mark it and add to the do-fifo.
+					 */
+					p[pj].iGroup = iGroup;
+					Fifo[iTail++] = pj;
+					if (iTail == nFifo) iTail = 0;
+					}
+			GetNextCell:
+				SETNEXT(cp);
+				if (cp == ROOT) break;
+				}
+			}
+		}
+	free(Fifo);
+	kd->nGroup = iGroup+1;
+	return(kd->nGroup-1);
+	}
+
+
+int kdTooSmall(KD kd,int nMembers)
+{
+	int *pnMembers,*pMap;
+	int i,pi,nGroup;
+
+	pnMembers = (int *)malloc(kd->nGroup*sizeof(int));
+	assert(pnMembers != NULL);
+	pMap = (int *)malloc(kd->nGroup*sizeof(int));
+	assert(pMap != NULL);
+	for (i=0;i<kd->nGroup;++i) pnMembers[i] = 0;
+	for (pi=0;pi<kd->nActive;++pi) {
+		++pnMembers[kd->p[pi].iGroup];
+		}
+	for (i=1;i<kd->nGroup;++i) {
+		if (pnMembers[i] < nMembers) {
+			pnMembers[i] = 0;
+			}
+		}
+	/*
+	 ** Create a remapping!
+	 */
+	pMap[0] = 0;
+	nGroup = 1;
+	for (i=1;i<kd->nGroup;++i) {
+		pMap[i] = nGroup;
+		if (pnMembers[i] == 0) {
+			pMap[i] = 0;
+			}
+		else {
+			++nGroup;
+			}
+		}
+	/*
+	 ** Remap the groups.
+	 */
+	for (pi=0;pi<kd->nActive;++pi) {
+		kd->p[pi].iGroup = pMap[kd->p[pi].iGroup];
+		}
+	free(pMap);
+	free(pnMembers);
+	kd->nGroup = nGroup;
+	return(nGroup-1);
+	}
+
+
+int CmpParticles(const void *v1,const void *v2)
+{
+	PARTICLE *p1 = (PARTICLE *)v1;
+	PARTICLE *p2 = (PARTICLE *)v2;
+	return(p1->iOrder - p2->iOrder);
+	}
+
+void kdOrder(KD kd)
+{
+	qsort(kd->p,kd->nActive,sizeof(PARTICLE),CmpParticles);
+	}
+
+
+void kdOutGroup(KD kd,char *pszFile)
+{
+	FILE *fp;
+	int i,iCnt;
+
+	fp = fopen(pszFile,"w");
+	assert(fp != NULL);
+	fprintf(fp,"%d\n",kd->nParticles);
+	iCnt = 0;
+	for (i=0;i<kd->nGas;++i) {
+		if (kd->bGas) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup);
+		else fprintf(fp,"0\n");
+		}
+	for (i=0;i<kd->nDark;++i) {
+		if (kd->bDark) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup);
+		else fprintf(fp,"0\n");
+		}
+	for (i=0;i<kd->nStar;++i) {
+		if (kd->bStar) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup);
+		else fprintf(fp,"0\n");
+		}
+	fclose(fp);
+	}
+
+
+void kdFinish(KD kd)
+{
+	free(kd->p);
+	free(kd->kdNodes);
+	free(kd);
+	}
+

Added: trunk/yt/lagos/fof/kd.h
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/kd.h	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,203 @@
+#ifndef KD_HINCLUDED
+#define KD_HINCLUDED
+
+#define ROOT		1
+#define LOWER(i)	(i<<1)
+#define UPPER(i)	((i<<1)+1)
+#define PARENT(i)	(i>>1)
+#define SIBLING(i) 	((i&1)?i-1:i+1)
+#define SETNEXT(i)\
+{\
+	while (i&1) i=i>>1;\
+	++i;\
+	}
+
+#define DARK	1
+#define GAS		2
+#define STAR	4
+
+#define KD_ORDERTEMP	256
+
+typedef struct Particle {
+	float r[3];
+	int iGroup;
+	int iOrder;
+	} PARTICLE;
+
+typedef struct bndBound {
+	float fMin[3];
+	float fMax[3];
+	} BND;
+
+typedef struct kdNode {
+	float fSplit;
+	BND bnd;
+	int iDim;
+	int pLower;
+	int pUpper;
+	} KDN;
+
+typedef struct kdContext {
+	int nBucket;
+	int nParticles;
+	int nDark;
+	int nGas;
+	int nStar;
+	int bDark;
+	int bGas;
+	int bStar;
+	int nActive;
+	float fTime;
+	float fPeriod[3];
+	int nLevels;
+	int nNodes;
+	int nSplit;
+	PARTICLE *p;
+	KDN *kdNodes;
+	int nGroup;
+	int uSecond;
+	int uMicro;
+	} * KD;
+
+
+#define INTERSECT(c,cp,fBall2,lx,ly,lz,x,y,z,sx,sy,sz)\
+{\
+	float dx,dy,dz,dx1,dy1,dz1,fDist2,fMax2;\
+	dx = c[cp].bnd.fMin[0]-x;\
+	dx1 = x-c[cp].bnd.fMax[0];\
+	dy = c[cp].bnd.fMin[1]-y;\
+	dy1 = y-c[cp].bnd.fMax[1];\
+	dz = c[cp].bnd.fMin[2]-z;\
+	dz1 = z-c[cp].bnd.fMax[2];\
+	if (dx > 0.0) {\
+		if (dx1+lx < dx) {\
+			dx1 += lx;\
+			dx -= lx;\
+			sx = x+lx;\
+			fDist2 = dx1*dx1;\
+			fMax2 = dx*dx;\
+			}\
+		else {\
+			sx = x;\
+			fDist2 = dx*dx;\
+			fMax2 = dx1*dx1;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else if (dx1 > 0.0) {\
+		if (dx+lx < dx1) {\
+		    dx += lx;\
+			dx1 -= lx;\
+			sx = x-lx;\
+			fDist2 = dx*dx;\
+			fMax2 = dx1*dx1;\
+			}\
+		else {\
+			sx = x;\
+			fDist2 = dx1*dx1;\
+			fMax2 = dx*dx;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else {\
+		sx = x;\
+		fDist2 = 0.0;\
+		if (dx < dx1) fMax2 = dx*dx;\
+		else fMax2 = dx1*dx1;\
+		}\
+	if (dy > 0.0) {\
+		if (dy1+ly < dy) {\
+		    dy1 += ly;\
+			dy -= ly;\
+			sy = y+ly;\
+			fDist2 += dy1*dy1;\
+			fMax2 += dy*dy;\
+			}\
+		else {\
+			sy = y;\
+			fDist2 += dy*dy;\
+			fMax2 += dy1*dy1;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else if (dy1 > 0.0) {\
+		if (dy+ly < dy1) {\
+		    dy += ly;\
+			dy1 -= ly;\
+			sy = y-ly;\
+			fDist2 += dy*dy;\
+			fMax2 += dy1*dy1;\
+			}\
+		else {\
+			sy = y;\
+			fDist2 += dy1*dy1;\
+			fMax2 += dy*dy;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else {\
+		sy = y;\
+		if (dy < dy1) fMax2 += dy*dy;\
+		else fMax2 += dy1*dy1;\
+		}\
+	if (dz > 0.0) {\
+		if (dz1+lz < dz) {\
+		    dz1 += lz;\
+            dz -= lz;\
+			sz = z+lz;\
+			fDist2 += dz1*dz1;\
+			fMax2 += dz*dz;\
+			}\
+		else {\
+			sz = z;\
+			fDist2 += dz*dz;\
+			fMax2 += dz1*dz1;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else if (dz1 > 0.0) {\
+		if (dz+lz < dz1) {\
+			dz += lz;\
+		    dz1 -= lz;\
+			sz = z-lz;\
+			fDist2 += dz*dz;\
+			fMax2 += dz1*dz1;\
+			}\
+		else {\
+			sz = z;\
+			fDist2 += dz1*dz1;\
+			fMax2 += dz*dz;\
+			}\
+		if (fDist2 > fBall2) goto GetNextCell;\
+		}\
+	else {\
+		sz = z;\
+		if (dz < dz1) fMax2 += dz*dz;\
+		else fMax2 += dz1*dz1;\
+		}\
+	if (fMax2 < fBall2) goto ContainedCell;\
+	}
+
+
+void kdTime(KD,int *,int *);
+int kdInit(KD *,int,float *);
+void kdReadTipsy(KD,FILE *,int,int,int);
+void kdBuildTree(KD);
+int kdFoF(KD,float);
+int kdTooSmall(KD,int);
+void kdOrder(KD);
+void kdOutGroup(KD,char *);
+void kdFinish(KD);
+
+#endif
+
+
+
+
+
+
+
+
+
+
+

Added: trunk/yt/lagos/fof/setup.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/setup.py	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path
+
+import os.path
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('fof',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
+    config.make_svn_version_py()
+    config.add_extension("EnzoFOF", sources=
+                                    ["EnzoFOF.c",
+                                     "kd.c"],
+                                    libraries=["m"])
+    return config

Added: trunk/yt/lagos/fof/setup.pyc
==============================================================================
Binary file. No diff available.

Added: trunk/yt/lagos/fof/tipsydefs.h
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/fof/tipsydefs.h	Thu Mar 12 21:14:44 2009
@@ -0,0 +1,58 @@
+#define MAXDIM 3
+#define forever for(;;)
+
+typedef float Real;
+
+struct gas_particle {
+    Real mass;
+    Real pos[MAXDIM];
+    Real vel[MAXDIM];
+    Real rho;
+    Real temp;
+    Real hsmooth;
+    Real metals ;
+    Real phi ;
+} ;
+
+struct gas_particle *gas_particles;
+
+struct dark_particle {
+    Real mass;
+    Real pos[MAXDIM];
+    Real vel[MAXDIM];
+    Real eps;
+    Real phi ;
+} ;
+
+struct dark_particle *dark_particles;
+
+struct star_particle {
+    Real mass;
+    Real pos[MAXDIM];
+    Real vel[MAXDIM];
+    Real metals ;
+    Real tform ;
+    Real eps;
+    Real phi ;
+} ;
+
+struct star_particle *star_particles;
+
+struct dump {
+    double time ;
+    int nbodies ;
+    int ndim ;
+    int nsph ;
+    int ndark ;
+    int nstar ;
+} ;
+
+struct dump header ;
+
+
+
+
+
+
+
+



More information about the yt-svn mailing list