[yt-svn] commit/yt: 16 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Dec 4 07:19:27 PST 2012


16 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/4e841522ff45/
changeset:   4e841522ff45
branch:      yt
user:        MatthewTurk
date:        2012-11-28 13:20:16
summary:     This canges the rockstar interface to work with 0.99.6.
affected #:  1 file

diff -r da2c74ebeffafb855ee27b842400e0909c60e0c8 -r 4e841522ff4558bff385625ca4f4841506ff9133 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -54,7 +54,7 @@
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
-    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, 
 			   np.int64_t chunk, float *bounds)
 
 cdef import from "config_vars.h":
@@ -220,7 +220,7 @@
     print "FOF_FRACTION =", FOF_FRACTION
     print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
     print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    #print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
     print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
     print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
     print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
@@ -228,8 +228,8 @@
 
     print "TOTAL_PARTICLES =", TOTAL_PARTICLES
     print "BOX_SIZE =", BOX_SIZE
-    print "OUTPUT_HMAD =", OUTPUT_HMAD
-    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
+    #print "OUTPUT_HMAD =", OUTPUT_HMAD
+    #print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
     print "OUTPUT_LEVELS =", OUTPUT_LEVELS
     print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
     print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
@@ -400,7 +400,7 @@
     def call_rockstar(self):
         read_particles("generic")
         rockstar(NULL, 0)
-        output_and_free_halos(0, 0, 0, NULL)
+        output_halos(0, 0, 0, NULL)
 
     def start_server(self):
         with nogil:



https://bitbucket.org/yt_analysis/yt/changeset/74161ead4b7f/
changeset:   74161ead4b7f
branch:      yt
user:        MatthewTurk
date:        2012-11-28 14:18:15
summary:     First pass at streamlining Rockstar halo finding:

 * Remove parsing of the server.h file in favor of two different Cython methods
 * Move several calls to the ytcfg object outside of loops and conditionals
 * Fix split_work() to avoid any loops
affected #:  2 files

diff -r 4e841522ff4558bff385625ca4f4841506ff9133 -r 74161ead4b7fc2fc2a405ffe9d2d19cfd1786b8f yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -40,40 +40,15 @@
 from os import mkdir
 from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
-
 class InlineRunner(ParallelAnalysisInterface):
     def __init__(self, num_writers):
         # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        self.num_readers = psize
         if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
+            self.num_writers =  psize
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
+            self.num_writers = min(num_writers, psize)
 
     def split_work(self, pool):
         avail = range(pool.comm.size)
@@ -110,12 +85,12 @@
             time.sleep(0.1 + pool.comm.rank/10.0)
             writer_pid = os.fork()
             if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
+                handler.start_writer()
                 os._exit(0)
         # Start readers, not forked.
         if pool.comm.rank in self.readers:
             time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
+            handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
             os.waitpid(writer_pid, 0)
@@ -125,31 +100,19 @@
 class StandardRunner(ParallelAnalysisInterface):
     def __init__(self, num_readers, num_writers):
         self.num_readers = num_readers
+        psize = ytcfg.getint("yt", "__global_parallel_size")
         if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
+            self.num_writers =  psize - num_readers - 1
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
+            self.num_writers = min(num_writers, psize)
+        if self.num_readers + self.num_writers + 1 != psize:
             mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
+                    self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
     def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
+        self.readers = np.arange(self.num_readers) + 1
+        self.writers = np.arange(self.num_writers) + 1 + self.num_readers
     
     def run(self, handler, pool):
         # Not inline so we just launch them directly from our MPI threads.
@@ -157,10 +120,10 @@
             handler.start_server()
         if pool.comm.rank in self.readers:
             time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
+            handler.start_reader()
         if pool.comm.rank in self.writers:
             time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+            handler.start_writer()
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 


diff -r 4e841522ff4558bff385625ca4f4841506ff9133 -r 74161ead4b7fc2fc2a405ffe9d2d19cfd1786b8f yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -48,6 +48,8 @@
 
 cdef import from "server.h" nogil:
     int server()
+    np.int64_t READER_TYPE
+    np.int64_t WRITER_TYPE
 
 cdef import from "client.h" nogil:
     void client(np.int64_t in_type)
@@ -406,6 +408,10 @@
         with nogil:
             server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
+    def start_reader(self):
+        cdef np.int64_t in_type = np.int64(READER_TYPE)
         client(in_type)
+
+    def start_writer(self):
+        cdef np.int64_t in_type = np.int64(WRITER_TYPE)
+        client(in_type)



https://bitbucket.org/yt_analysis/yt/changeset/9239d5a8c5dd/
changeset:   9239d5a8c5dd
branch:      yt
user:        MatthewTurk
date:        2012-11-28 14:59:38
summary:     Going back to using ProcessorPools instead of the pool.comm stuff.  Removing
the print_rockstar_settings routine.
affected #:  2 files

diff -r 74161ead4b7fc2fc2a405ffe9d2d19cfd1786b8f -r 9239d5a8c5dd2dec6834a227f0a94ef7d28cf481 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -110,24 +110,25 @@
                     self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self, pool):
+    def split_work(self):
         self.readers = np.arange(self.num_readers) + 1
         self.writers = np.arange(self.num_writers) + 1 + self.num_readers
     
-    def run(self, handler, pool):
+    def run(self, handler, wg):
         # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
+        if wg.name == "server":
             handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
+        if wg.name == "readers":
+            time.sleep(0.1)
             handler.start_reader()
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
+        if wg.name == "writers":
+            time.sleep(0.2)
             handler.start_writer()
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
+    def __init__(self, ts, num_readers = 1, num_writers = None,
+            outbase="rockstar_halos", particle_mass=-1.0, dm_type=1, 
+            force_res=None):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -204,7 +205,7 @@
         # Note that Rockstar does not support subvolumes.
         # We assume that all of the snapshots in the time series
         # use the same domain info as the first snapshots.
-        if not isinstance(ts,TimeSeriesData):
+        if not isinstance(ts, TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
@@ -214,34 +215,29 @@
                 return (data["particle_type"]==dm_type).sum()
             except KeyError:
                 return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
+        add_field("particle_count", function=_particle_count,
+                  not_in_all=True, particle_type=True)
         # Get total_particles in parallel.
         dd = tpf.h.all_data()
         self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
-        self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
+        self.left_edge = tpf.domain_left_edge
+        self.right_edge = tpf.domain_right_edge
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        if outbase is None:
-            outbase = 'rockstar_halos'
         self.outbase = outbase
-        self.particle_mass = particle_mass
         if force_res is None:
             self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
         else:
             self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         # We set up the workgroups *before* initializing
         # ParallelAnalysisInterface. Everyone is their own workgroup!
+        ParallelAnalysisInterface.__init__(self)
         self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
+        self.pool, self.workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
         self.handler = rockstar_interface.RockstarInterface(
                 self.ts, dd)
 
@@ -249,7 +245,7 @@
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.rank == 0 or self.comm.size == 1:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -257,7 +253,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -271,19 +267,19 @@
         self.handler.setup_rockstar(self.server_address, self.port,
                     len(self.ts), self.total_particles, 
                     self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
-                    force_res=self.force_res,
+                    force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
         # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
+                os.makedirs(self.outbase)
             # Make a record of which dataset corresponds to which set of
             # output files because it will be easy to lose this connection.
             fp = open(self.outbase + '/pfs.txt', 'w')
@@ -294,15 +290,15 @@
                 fp.write(line)
             fp.close()
         # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        self.comm.barrier()
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
             # Split up the work.
-            self.runner.split_work(self.pool)
+            self.runner.split_work()
             # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.runner.run(self.handler, self.workgroup)
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):


diff -r 74161ead4b7fc2fc2a405ffe9d2d19cfd1786b8f -r 9239d5a8c5dd2dec6834a227f0a94ef7d28cf481 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -146,100 +146,6 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
-def print_rockstar_settings():
-    # We have to do the config
-    print "FILE_FORMAT =", FILE_FORMAT
-    print "PARTICLE_MASS =", PARTICLE_MASS
-
-    print "MASS_DEFINITION =", MASS_DEFINITION
-    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
-    print "FORCE_RES =", FORCE_RES
-
-    print "SCALE_NOW =", SCALE_NOW
-    print "h0 =", h0
-    print "Ol =", Ol
-    print "Om =", Om
-
-    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
-    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
-    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
-    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
-    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
-
-    print "PARALLEL_IO =", PARALLEL_IO
-    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
-    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
-    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
-    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
-    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
-
-    print "INBASE =", INBASE
-    print "FILENAME =", FILENAME
-    print "STARTING_SNAP =", STARTING_SNAP
-    print "NUM_SNAPS =", NUM_SNAPS
-    print "NUM_BLOCKS =", NUM_BLOCKS
-    print "NUM_READERS =", NUM_READERS
-    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
-    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
-    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
-    print "BLOCK_NAMES =", BLOCK_NAMES
-
-    print "OUTBASE =", OUTBASE
-    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
-    print "NUM_WRITERS =", NUM_WRITERS
-    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
-    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
-
-    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
-    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
-    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
-    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
-
-    print "BOUND_PROPS =", BOUND_PROPS
-    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
-    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
-    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
-    print "TRIM_OVERLAP =", TRIM_OVERLAP
-    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
-    print "LIGHTCONE =", LIGHTCONE
-    print "PERIODIC =", PERIODIC
-
-    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
-    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
-    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
-    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
-    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
-    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
-
-    print "LIMIT_CENTER =", LIMIT_CENTER[0]
-    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
-    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
-    print "LIMIT_RADIUS =", LIMIT_RADIUS
-
-    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
-    print "GADGET_VARIANT =", GADGET_VARIANT
-
-    print "FOF_FRACTION =", FOF_FRACTION
-    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
-    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    #print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
-    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
-    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
-    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
-    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
-
-    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
-    print "BOX_SIZE =", BOX_SIZE
-    #print "OUTPUT_HMAD =", OUTPUT_HMAD
-    #print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
-    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
-    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
-    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
-    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
-
-    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
-    print "SINGLE_SNAP =", SINGLE_SNAP
-
 cdef class RockstarInterface
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
@@ -252,31 +158,17 @@
     block = int(str(filename).rsplit(".")[-1])
     n = rh.block_ratio
 
-    all_grids = pf.h.grids
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
     if NUM_BLOCKS > 1:
         local_parts = 0
-        for g in grids:
+        for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
             if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
                 iddm = g["particle_type"] == rh.dm_type
             else:
                 iddm = Ellipsis
@@ -297,7 +189,7 @@
     left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
-    for g in grids:
+    for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
         if "particle_type" in all_fields:
             iddm = g["particle_type"] == rh.dm_type



https://bitbucket.org/yt_analysis/yt/changeset/ce4a238aa912/
changeset:   ce4a238aa912
branch:      yt
user:        MatthewTurk
date:        2012-11-28 15:03:31
summary:     Changing docstring to reflect new TimeSeries initialization
affected #:  2 files

diff -r 9239d5a8c5dd2dec6834a227f0a94ef7d28cf481 -r ce4a238aa9124cf5413edeb5db1b30400319bf72 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -186,9 +186,7 @@
         from yt.mods import *
         import sys
 
-        files = glob.glob('/u/cmoody3/data/a*')
-        files.sort()
-        ts = TimeSeriesData.from_filenames(files)
+        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
         pm = 7.81769027e+11
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()


diff -r 9239d5a8c5dd2dec6834a227f0a94ef7d28cf481 -r ce4a238aa9124cf5413edeb5db1b30400319bf72 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -146,7 +146,9 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
+# Forward declare
 cdef class RockstarInterface
+
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]



https://bitbucket.org/yt_analysis/yt/changeset/27fa49e34965/
changeset:   27fa49e34965
branch:      yt
user:        MatthewTurk
date:        2012-11-29 21:03:23
summary:     Change Rockstar halo finding to use readers *only* for reading data.  This
allows the readers to be the only ones that instantiate hierarchies, unless
they have previously been instantiated before calling Rockstar.

This leads to the point that we will at some point need to swap out
communicators for these items, as we are adding new topcomms during Rockstar
halo finding.
affected #:  2 files

diff -r ce4a238aa9124cf5413edeb5db1b30400319bf72 -r 27fa49e34965108ac4e60eff39d14968ae8c7571 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -191,6 +191,7 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
+        ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:
             self.runner = InlineRunner(num_writers)
@@ -208,36 +209,46 @@
         self.ts = ts
         self.dm_type = dm_type
         tpf = ts.__iter__().next()
+        self.outbase = outbase
+        self.particle_mass = particle_mass
+        if force_res is None:
+            tpf = ts[-1] # Cache a reference
+            self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
+            # We have to delete now to wipe the hierarchy
+            del tpf
+        else:
+            self.force_res = force_res
+        # We set up the workgroups *before* initializing
+        # ParallelAnalysisInterface. Everyone is their own workgroup!
+        self.pool = ProcessorPool()
+        self.pool, self.workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        p = self._setup_parameters(ts)
+        params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
+        self.__dict__.update(params)
+        self.handler = rockstar_interface.RockstarInterface(self.ts)
+
+    def _setup_parameters(self, ts):
+        if self.workgroup.name != "readers": return None
+        tpf = ts[0]
         def _particle_count(field, data):
             try:
-                return (data["particle_type"]==dm_type).sum()
+                return (data["particle_type"]==self.dm_type).sum()
             except KeyError:
                 return np.prod(data["particle_position_x"].shape)
         add_field("particle_count", function=_particle_count,
                   not_in_all=True, particle_type=True)
         # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
-        self.particle_mass = particle_mass 
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        self.outbase = outbase
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        ParallelAnalysisInterface.__init__(self)
-        self.pool = ProcessorPool()
-        self.pool, self.workgroup = ProcessorPool.from_sizes(
-           [ (1, "server"),
-             (self.num_readers, "readers"),
-             (self.num_writers, "writers") ]
-        )
-        self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+        p = {}
+        p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        p['left_edge'] = tpf.domain_left_edge
+        p['right_edge'] = tpf.domain_right_edge
+        p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        return p
 
     def __del__(self):
         self.pool.free_all()


diff -r ce4a238aa9124cf5413edeb5db1b30400319bf72 -r 27fa49e34965108ac4e60eff39d14968ae8c7571 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -226,10 +226,9 @@
     cdef public int dm_type
     cdef public int total_particles
 
-    def __cinit__(self, ts, data_source):
+    def __cinit__(self, ts):
         self.ts = ts
         self.tsl = ts.__iter__() #timseries generator used by read
-        self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,



https://bitbucket.org/yt_analysis/yt/changeset/ee235fb2046e/
changeset:   ee235fb2046e
branch:      yt
user:        sskory
date:        2012-11-30 20:29:46
summary:     These Halo attachments need to go here, otherwise they're identical for all halos.
affected #:  1 file

diff -r 271a1313d9be206a52ab3b3627089e49a45b0c70 -r ee235fb2046e03f12c8ea6c691eaa1536a27e572 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -75,11 +75,6 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
-    # Used for Loaded and RockstarHalos
-    _saved_fields = {}
-    _ds_sort = None
-    _particle_mask = None
-
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
@@ -111,6 +106,9 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
 
     @property
     def particle_mask(self):



https://bitbucket.org/yt_analysis/yt/changeset/1b2b0d4e334b/
changeset:   1b2b0d4e334b
branch:      yt
user:        sskory
date:        2012-11-30 20:33:22
summary:     Merged in Matt's changes.
affected #:  3 files

diff -r ee235fb2046e03f12c8ea6c691eaa1536a27e572 -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -43,7 +43,7 @@
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
-INST_ROCKSTAR=1 # Install the Rockstar halo finder?
+INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""


diff -r ee235fb2046e03f12c8ea6c691eaa1536a27e572 -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -40,40 +40,15 @@
 from os import mkdir
 from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
-
 class InlineRunner(ParallelAnalysisInterface):
     def __init__(self, num_writers):
         # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        self.num_readers = psize
         if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
+            self.num_writers =  psize
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
+            self.num_writers = min(num_writers, psize)
 
     def split_work(self, pool):
         avail = range(pool.comm.size)
@@ -110,12 +85,12 @@
             time.sleep(0.1 + pool.comm.rank/10.0)
             writer_pid = os.fork()
             if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
+                handler.start_writer()
                 os._exit(0)
         # Start readers, not forked.
         if pool.comm.rank in self.readers:
             time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
+            handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
             os.waitpid(writer_pid, 0)
@@ -125,46 +100,35 @@
 class StandardRunner(ParallelAnalysisInterface):
     def __init__(self, num_readers, num_writers):
         self.num_readers = num_readers
+        psize = ytcfg.getint("yt", "__global_parallel_size")
         if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
+            self.num_writers =  psize - num_readers - 1
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
+            self.num_writers = min(num_writers, psize)
+        if self.num_readers + self.num_writers + 1 != psize:
             mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
+                    self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
+    def split_work(self):
+        self.readers = np.arange(self.num_readers) + 1
+        self.writers = np.arange(self.num_writers) + 1 + self.num_readers
     
-    def run(self, handler, pool):
+    def run(self, handler, wg):
         # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
+        if wg.name == "server":
             handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+        if wg.name == "readers":
+            time.sleep(0.1)
+            handler.start_reader()
+        if wg.name == "writers":
+            time.sleep(0.2)
+            handler.start_writer()
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
+    def __init__(self, ts, num_readers = 1, num_writers = None,
+            outbase="rockstar_halos", particle_mass=-1.0, dm_type=1, 
+            force_res=None):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -222,13 +186,12 @@
         from yt.mods import *
         import sys
 
-        files = glob.glob('/u/cmoody3/data/a*')
-        files.sort()
-        ts = TimeSeriesData.from_filenames(files)
+        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
         pm = 7.81769027e+11
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
+        ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:
             self.runner = InlineRunner(num_writers)
@@ -241,52 +204,57 @@
         # Note that Rockstar does not support subvolumes.
         # We assume that all of the snapshots in the time series
         # use the same domain info as the first snapshots.
-        if not isinstance(ts,TimeSeriesData):
+        if not isinstance(ts, TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
         tpf = ts.__iter__().next()
-        def _particle_count(field, data):
-            try:
-                return (data["particle_type"]==dm_type).sum()
-            except KeyError:
-                return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
-        dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
-        self.hierarchy = tpf.h
-        self.particle_mass = particle_mass 
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        if outbase is None:
-            outbase = 'rockstar_halos'
         self.outbase = outbase
         self.particle_mass = particle_mass
         if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
+            tpf = ts[-1] # Cache a reference
+            self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
+            # We have to delete now to wipe the hierarchy
+            del tpf
         else:
             self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         # We set up the workgroups *before* initializing
         # ParallelAnalysisInterface. Everyone is their own workgroup!
         self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
-        self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+        self.pool, self.workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        p = self._setup_parameters(ts)
+        params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
+        self.__dict__.update(params)
+        self.handler = rockstar_interface.RockstarInterface(self.ts)
+
+    def _setup_parameters(self, ts):
+        if self.workgroup.name != "readers": return None
+        tpf = ts[0]
+        def _particle_count(field, data):
+            try:
+                return (data["particle_type"]==self.dm_type).sum()
+            except KeyError:
+                return np.prod(data["particle_position_x"].shape)
+        add_field("particle_count", function=_particle_count,
+                  not_in_all=True, particle_type=True)
+        # Get total_particles in parallel.
+        dd = tpf.h.all_data()
+        p = {}
+        p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        p['left_edge'] = tpf.domain_left_edge
+        p['right_edge'] = tpf.domain_right_edge
+        p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        return p
 
     def __del__(self):
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.rank == 0 or self.comm.size == 1:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +262,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -308,19 +276,19 @@
         self.handler.setup_rockstar(self.server_address, self.port,
                     len(self.ts), self.total_particles, 
                     self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
-                    force_res=self.force_res,
+                    force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
         # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
+                os.makedirs(self.outbase)
             # Make a record of which dataset corresponds to which set of
             # output files because it will be easy to lose this connection.
             fp = open(self.outbase + '/pfs.txt', 'w')
@@ -331,15 +299,15 @@
                 fp.write(line)
             fp.close()
         # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        self.comm.barrier()
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
             # Split up the work.
-            self.runner.split_work(self.pool)
+            self.runner.split_work()
             # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.runner.run(self.handler, self.workgroup)
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):


diff -r ee235fb2046e03f12c8ea6c691eaa1536a27e572 -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -48,13 +48,15 @@
 
 cdef import from "server.h" nogil:
     int server()
+    np.int64_t READER_TYPE
+    np.int64_t WRITER_TYPE
 
 cdef import from "client.h" nogil:
     void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
-    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, 
 			   np.int64_t chunk, float *bounds)
 
 cdef import from "config_vars.h":
@@ -144,101 +146,9 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
-def print_rockstar_settings():
-    # We have to do the config
-    print "FILE_FORMAT =", FILE_FORMAT
-    print "PARTICLE_MASS =", PARTICLE_MASS
+# Forward declare
+cdef class RockstarInterface
 
-    print "MASS_DEFINITION =", MASS_DEFINITION
-    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
-    print "FORCE_RES =", FORCE_RES
-
-    print "SCALE_NOW =", SCALE_NOW
-    print "h0 =", h0
-    print "Ol =", Ol
-    print "Om =", Om
-
-    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
-    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
-    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
-    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
-    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
-
-    print "PARALLEL_IO =", PARALLEL_IO
-    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
-    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
-    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
-    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
-    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
-
-    print "INBASE =", INBASE
-    print "FILENAME =", FILENAME
-    print "STARTING_SNAP =", STARTING_SNAP
-    print "NUM_SNAPS =", NUM_SNAPS
-    print "NUM_BLOCKS =", NUM_BLOCKS
-    print "NUM_READERS =", NUM_READERS
-    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
-    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
-    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
-    print "BLOCK_NAMES =", BLOCK_NAMES
-
-    print "OUTBASE =", OUTBASE
-    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
-    print "NUM_WRITERS =", NUM_WRITERS
-    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
-    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
-
-    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
-    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
-    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
-    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
-
-    print "BOUND_PROPS =", BOUND_PROPS
-    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
-    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
-    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
-    print "TRIM_OVERLAP =", TRIM_OVERLAP
-    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
-    print "LIGHTCONE =", LIGHTCONE
-    print "PERIODIC =", PERIODIC
-
-    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
-    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
-    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
-    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
-    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
-    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
-
-    print "LIMIT_CENTER =", LIMIT_CENTER[0]
-    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
-    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
-    print "LIMIT_RADIUS =", LIMIT_RADIUS
-
-    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
-    print "GADGET_VARIANT =", GADGET_VARIANT
-
-    print "FOF_FRACTION =", FOF_FRACTION
-    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
-    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
-    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
-    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
-    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
-    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
-
-    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
-    print "BOX_SIZE =", BOX_SIZE
-    print "OUTPUT_HMAD =", OUTPUT_HMAD
-    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
-    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
-    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
-    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
-    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
-
-    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
-    print "SINGLE_SNAP =", SINGLE_SNAP
-
-cdef class RockstarInterface
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]
@@ -250,31 +160,17 @@
     block = int(str(filename).rsplit(".")[-1])
     n = rh.block_ratio
 
-    all_grids = pf.h.grids
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
     if NUM_BLOCKS > 1:
         local_parts = 0
-        for g in grids:
+        for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
             if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
                 iddm = g["particle_type"] == rh.dm_type
             else:
                 iddm = Ellipsis
@@ -295,7 +191,7 @@
     left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
-    for g in grids:
+    for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
         if "particle_type" in all_fields:
             iddm = g["particle_type"] == rh.dm_type
@@ -330,10 +226,9 @@
     cdef public int dm_type
     cdef public int total_particles
 
-    def __cinit__(self, ts, data_source):
+    def __cinit__(self, ts):
         self.ts = ts
         self.tsl = ts.__iter__() #timseries generator used by read
-        self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,
@@ -400,12 +295,16 @@
     def call_rockstar(self):
         read_particles("generic")
         rockstar(NULL, 0)
-        output_and_free_halos(0, 0, 0, NULL)
+        output_halos(0, 0, 0, NULL)
 
     def start_server(self):
         with nogil:
             server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
+    def start_reader(self):
+        cdef np.int64_t in_type = np.int64(READER_TYPE)
         client(in_type)
+
+    def start_writer(self):
+        cdef np.int64_t in_type = np.int64(WRITER_TYPE)
+        client(in_type)



https://bitbucket.org/yt_analysis/yt/changeset/8deb84e38f1d/
changeset:   8deb84e38f1d
branch:      yt
user:        sskory
date:        2012-11-30 22:34:54
summary:     Updating Rockstar to work with 0.99.6.
affected #:  3 files

diff -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1321,18 +1321,24 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+        ('vmax_r', np.float32), 
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
+        ('klypin_rs', np.float32), 
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
-        ('padding1', np.float32), ('num_p', np.int64),
+        ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
+        ('Voff', np.float32), ('b_to_a', np.float32),
+        ('c_to_a', np.float32), ('A', (np.float32, 3)),
+        ('bullock_spin', np.float32), ('kin_to_pot', np.float32),
+        ('num_p', np.int64),
         ('num_child_particles', np.int64), ('p_start', np.int64),
         ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
         ('min_pos_err', np.float32), ('min_vel_err', np.float32),
         ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
-    # Above, padding1&2 are due to c byte ordering which pads between
+    # Above, padding* are due to c byte ordering which pads between
     # 4 and 8 byte values in the struct as to not overlap memory registers.
-    _tocleanup = ['padding1', 'padding2']
+    _tocleanup = ['padding2']
 
     def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)


diff -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -127,7 +127,7 @@
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None,
-            outbase="rockstar_halos", particle_mass=-1.0, dm_type=1, 
+            outbase="rockstar_halos", dm_type=1, 
             force_res=None):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
@@ -157,8 +157,6 @@
         outbase: str
             This is where the out*list files that Rockstar makes should be
             placed. Default is 'rockstar_halos'.
-        particle_mass: float
-            This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
@@ -188,7 +186,7 @@
 
         ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
         pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh = RockstarHaloFinder(ts)
         rh.run()
         """
         ParallelAnalysisInterface.__init__(self)
@@ -208,9 +206,7 @@
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
-        tpf = ts.__iter__().next()
         self.outbase = outbase
-        self.particle_mass = particle_mass
         if force_res is None:
             tpf = ts[-1] # Cache a reference
             self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
@@ -243,13 +239,25 @@
                   not_in_all=True, particle_type=True)
         # Get total_particles in parallel.
         dd = tpf.h.all_data()
+        # Get DM particle mass.
+        all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
+        for g in tpf.h._get_objs("grids"):
+            if g.NumberOfParticles == 0: continue
+            if "particle_type" in all_fields:
+                iddm = g["particle_type"] == self.dm_type
+            else:
+                iddm = Ellipsis
+            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+            break
         p = {}
         p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
         p['left_edge'] = tpf.domain_left_edge
         p['right_edge'] = tpf.domain_right_edge
         p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        p['particle_mass'] = particle_mass
         return p
 
+
     def __del__(self):
         self.pool.free_all()
 


diff -r 1b2b0d4e334b34b3993aeb6f2e404904dc5dfe8b -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -233,7 +233,7 @@
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,
                        int dm_type,
-                       np.float64_t particle_mass = -1.0,
+                       np.float64_t particle_mass,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
@@ -280,8 +280,6 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-        if particle_mass < 0:
-            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
         BOX_SIZE = (tpf.domain_right_edge[0] -



https://bitbucket.org/yt_analysis/yt/changeset/3ebd7a48bd38/
changeset:   3ebd7a48bd38
branch:      yt
user:        sskory
date:        2012-11-30 22:35:39
summary:     Merge.
affected #:  14 files

diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,4 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
\ No newline at end of file
+with-xunit=1


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -63,6 +63,8 @@
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
 
+
+
     def add_frequency_bin_field(self, ev_min, ev_max):
         """
         Add a new field to the FieldInfoContainer, which is an
@@ -73,7 +75,7 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+            dd = {'H_NumberDensity' : np.log10(data["H_NumberDensity"]),
                   'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -59,6 +59,10 @@
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     ipython_notebook = 'False',
+    answer_testing_tolerance = '3',
+    answer_testing_bitwise = 'False',
+    gold_standard_filename = 'gold003',
+    local_standard_filename = 'local001'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2777,12 +2777,12 @@
             ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
-            f = open(filename, "w")
+            if hasattr(filename, "write"): f = filename
             for v1 in verts:
                 f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
             for i in range(len(verts)/3):
                 f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
-            f.close()
+            if not hasattr(filename, "write"): f.close()
         if sample_values is not None:
             return verts, samples
         return verts


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -136,6 +136,12 @@
             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")
             self.proj = self.overlap_proj
+        if self.pf.dimensionality < 3 and hasattr(self, 'proj') and \
+            hasattr(self, 'overlap_proj'):
+            mylog.warning("Dimensionality less than 3: reverting to"
+                        + " overlap_proj")
+            self.proj = self.overlap_proj
+
         self.object_types.sort()
 
     def _setup_unknown_fields(self):


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -60,17 +60,20 @@
 def standard_small_simulation(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [None]
+    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
+    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
     for field in fields:
-        yield GridValuesTest(pf_fn, field)
+        if bitwise:
+            yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
         for ds in dso:
             for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
-                        ds, decimals=3)
+                        ds, decimals=tolerance)
             yield FieldValuesTest(
-                    pf_fn, field, ds, decimals=3)
+                    pf_fn, field, ds, decimals=tolerance)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -221,6 +221,27 @@
           function=_NumberDensity,
           convert_function=_ConvertNumberDensity)
 
+def _H_NumberDensity(field, data):
+    field_data = np.zeros(data["Density"].shape,
+                          dtype=data["Density"].dtype)
+    if data.pf.parameters["MultiSpecies"] == 0:
+        field_data += data["Density"] * \
+          data.pf.parameters["HydrogenFractionByMass"]
+    if data.pf.parameters["MultiSpecies"] > 0:
+        field_data += data["HI_Density"]
+        field_data += data["HII_Density"]
+    if data.pf.parameters["MultiSpecies"] > 1:
+        field_data += data["HM_Density"]
+        field_data += data["H2I_Density"]
+        field_data += data["H2II_Density"]
+    if data.pf.parameters["MultiSpecies"] > 2:
+        field_data += data["HDI_Density"] / 2.0
+    return field_data
+add_field("H_NumberDensity", units=r"\rm{cm}^{-3}",
+          function=_H_NumberDensity,
+          convert_function=_ConvertNumberDensity)
+
+
 # Now we add all the fields that we want to control, but we give a null function
 # This is every Enzo field we can think of.  This will be installation-dependent,
 


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,7 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
@@ -116,10 +116,18 @@
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
@@ -150,11 +158,12 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
@@ -249,6 +258,32 @@
         else:
             self.io = io_registry[self.data_style](self.stream_handler)
 
+    def update_data(self, data) :
+
+        """
+        Update the stream data with a new data dict. If fields already exist,
+        they will be replaced, but if they do not, they will be added. Fields
+        already in the stream but not part of the data dict will be left
+        alone. 
+        """
+        
+        particle_types = set_particle_types(data[0])
+
+        for key in data[0].keys() :
+            if key is "number_of_particles": continue
+            self.stream_handler.particle_types[key] = particle_types[key]
+            if key not in self.field_list:
+                self.field_list.append(key)
+
+        for i, grid in enumerate(self.grids) :
+            if data[i].has_key("number_of_particles") :
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+            for key in data[i].keys() :
+                self.stream_handler.fields[grid.id][key] = data[i][key]
+            
+        self._setup_unknown_fields()
+        self._detect_fields()
+        
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo
@@ -299,8 +334,68 @@
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+
+    """
+    Assign particle data to the grids using find_points. This
+    will overwrite any existing particle data, so be careful!
+    """
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+    
+        pdata.pop("number_of_particles")    
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            grid = {}
+            grid["number_of_particles"] = pcount
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            for key in pdata.keys() :
+                grid[key] = pdata[key][idxs][start:end]
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -312,6 +407,9 @@
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
@@ -326,8 +424,6 @@
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -347,14 +443,29 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
@@ -375,9 +486,10 @@
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
@@ -396,10 +508,16 @@
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -418,8 +536,9 @@
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
@@ -428,8 +547,6 @@
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -438,11 +555,13 @@
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
@@ -461,23 +580,27 @@
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
@@ -529,6 +652,20 @@
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
@@ -545,7 +682,8 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
@@ -557,8 +695,14 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -60,8 +60,8 @@
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
-        sl = tuple(reversed(sl))
-        tr = self.fields[grid.id][field][sl].swapaxes(0,2)
+        sl = tuple(sl)
+        tr = self.fields[grid.id][field][sl]
         # In-place unit conversion requires we return a copy
         return tr.copy()
 


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/stream/tests/test_stream_particles.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -0,0 +1,130 @@
+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/frontends/stream/tests/test_update_data.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -0,0 +1,23 @@
+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+                              


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -105,6 +105,10 @@
 #from yt.frontends.maestro.api import \
 #    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
 
+from yt.frontends.stream.api import \
+    StreamStaticOutput, StreamFieldInfo, add_stream_field, \
+    StreamHandler, load_uniform_grid, load_amr_grids
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -45,7 +45,9 @@
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
 
-_latest = "gold001"
+# Set the latest gold and local standard filenames
+_latest = ytcfg.get("yt", "gold_standard_filename")
+_latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
 class AnswerTesting(Plugin):
@@ -54,16 +56,16 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
-            default=_latest, help="The name of tests against which we will compare")
+        parser.add_option("--answer-name", dest="answer_name", metavar='str',
+            default=None, help="The name of the standard to store/compare against")
+        parser.add_option("--answer-store", dest="store_results", metavar='bool',
+            default=False, action="store_true",
+            help="Should we store this result instead of comparing?")
+        parser.add_option("--local", dest="local_results",
+            default=False, action="store_true", help="Store/load reference results locally?")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
-            default=None,
-            help="The name we'll call this set of tests")
-        parser.add_option("--local-store", dest="store_local_results",
-            default=False, action="store_true", help="Store/Load local results?")
 
     @property
     def my_version(self, version=None):
@@ -82,47 +84,64 @@
         if not self.enabled:
             return
         disable_stream_logging()
-        if options.store_name is not None:
-            self.store_results = True
-        # If the user sets the storage_name, then it means they are storing and
-        # not comparing, even if they set the compare_name (since it is set by default)
-            options.compare_name = None
-        else: 
-            self.store_results = False
-            options.store_name = self.my_version
-        from yt.config import ytcfg
+
+        # Parse through the storage flags to make sense of them
+        # and use reasonable defaults
+        # If we're storing the data, default storage name is local
+        # latest version
+        if options.store_results:
+            if options.answer_name is None:
+                self.store_name = _latest_local
+            else:
+                self.store_name = options.answer_name
+            self.compare_name = None
+        # if we're not storing, then we're comparing, and we want default
+        # comparison name to be the latest gold standard 
+        # either on network or local
+        else:
+            if options.answer_name is None:
+                if options.local_results:
+                    self.compare_name = _latest_local
+                else:
+                    self.compare_name = _latest
+            else:
+                self.compare_name = options.answer_name
+            self.store_name = self.my_version
+
+        self.store_results = options.store_results
+
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name == "SKIP":
-            options.compare_name = None
-        elif options.compare_name == "latest":
-            options.compare_name = _latest
+        if self.compare_name == "SKIP":
+            self.compare_name = None
+        elif self.compare_name == "latest":
+            self.compare_name = _latest
             
         # Local/Cloud storage 
-        if options.store_local_results:
+        if options.local_results:
             storage_class = AnswerTestLocalStorage
             # Fix up filename for local storage 
-            if options.compare_name is not None:
-                options.compare_name = "%s/%s/%s" % \
-                    (os.path.realpath(options.output_dir), options.compare_name, 
-                     options.compare_name)
-            if options.store_name is not None:
+            if self.compare_name is not None:
+                self.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), self.compare_name, 
+                     self.compare_name)
+            if self.store_name is not None:
                 name_dir_path = "%s/%s" % \
                     (os.path.realpath(options.output_dir), 
-                    options.store_name)
+                    self.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.makedirs(name_dir_path)
-                options.store_name= "%s/%s" % \
-                        (name_dir_path, options.store_name)
+                self.store_name= "%s/%s" % \
+                        (name_dir_path, self.store_name)
         else:
             storage_class = AnswerTestCloudStorage
 
         # Initialize answer/reference storage
         AnswerTestingTest.reference_storage = self.storage = \
-                storage_class(options.compare_name, options.store_name)
+                storage_class(self.compare_name, self.store_name)
 
-        self.store_local_results = options.store_local_results
+        self.local_results = options.local_results
         global run_big_data
         run_big_data = options.big_data
 
@@ -147,13 +166,22 @@
         url = _url_path % (self.reference_name, pf_name)
         try:
             resp = urllib2.urlopen(url)
-            # This is dangerous, but we have a controlled S3 environment
-            data = resp.read()
-            rv = cPickle.loads(data)
         except urllib2.HTTPError as ex:
             raise YTNoOldAnswer(url)
-            mylog.warning("Missing %s (%s)", url, ex)
-            rv = default
+        else:
+            for this_try in range(3):
+                try:
+                    data = resp.read()
+                except:
+                    time.sleep(0.01)
+                else:
+                    # We were succesful
+                    break
+            else:
+                # Raise error if all tries were unsuccessful
+                raise YTCloudError(url)
+            # This is dangerous, but we have a controlled S3 environment
+            rv = cPickle.loads(data)
         self.cache[pf_name] = rv
         return rv
 
@@ -249,7 +277,8 @@
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer(self.storage_name)
+            if dd is None or self.description not in dd: 
+                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
@@ -367,7 +396,7 @@
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
-        self.weight_field = field
+        self.weight_field = weight_field
         self.obj_type = obj_type
         self.decimals = decimals
 
@@ -376,12 +405,15 @@
             obj = self.create_obj(self.pf, self.obj_type)
         else:
             obj = None
+        if self.pf.domain_dimensions[self.axis] == 1: return None
         proj = self.pf.h.proj(self.axis, self.field,
                               weight_field=self.weight_field,
                               data_source = obj)
         return proj.field_data
 
     def compare(self, new_result, old_result):
+        if new_result is None:
+            return
         assert(len(new_result) == len(old_result))
         for k in new_result:
             assert (k in old_result)


diff -r 8deb84e38f1dcafb93d1b6cf124327b5f10119fa -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -163,6 +163,14 @@
         return "There is no old answer available.\n" + \
                str(self.path)
 
+class YTCloudError(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "Failed to retrieve cloud data. Connection may be broken.\n" + \
+               str(self.path)
+
 class YTEllipsoidOrdering(YTException):
     def __init__(self, pf, A, B, C):
         YTException.__init__(self, pf)



https://bitbucket.org/yt_analysis/yt/changeset/88f17e5ee177/
changeset:   88f17e5ee177
branch:      yt
user:        sskory
date:        2012-11-30 22:39:18
summary:     Updating the install_script for Rockstar 0.99.6.
affected #:  1 file

diff -r 3ebd7a48bd38ca1a4743c4f7d0f803d5563f06eb -r 88f17e5ee1777b3866376b7f9ed633ed0fe7c160 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -434,8 +434,7 @@
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
-echo 'd91fa23d8f86c5f5b3df210731c14c92a2e3d999979ef2c4c67be3dda19a8a8d2363f8cd23611957280c17f61653b0e66f59d3f5515b6d443b22cddeaef86ab6  Rockstar-0.99.tar.gz' > Rockstar-0.99.tar.gz.sha512
-
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -705,14 +704,14 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar-0.99/done ]
+    if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
+        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
-        cd Rockstar-0.99
+        cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..



https://bitbucket.org/yt_analysis/yt/changeset/aac45ed6d4e8/
changeset:   aac45ed6d4e8
branch:      yt
user:        sskory
date:        2012-12-03 21:39:14
summary:     A few changes for inline Rockstar.
affected #:  1 file

diff -r 88f17e5ee1777b3866376b7f9ed633ed0fe7c160 -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -67,7 +67,7 @@
             stride = int(ceil(float(pool.comm.size) / self.num_writers))
             while len(self.writers) < self.num_writers:
                 self.writers.extend(avail[::stride])
-                for r in readers:
+                for r in avail:
                     avail.pop(avail.index(r))
 
     def run(self, handler, pool):
@@ -82,14 +82,14 @@
         # Start writers.
         writer_pid = 0
         if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
+            time.sleep(0.05 + pool.comm.rank/10.0)
             writer_pid = os.fork()
             if writer_pid == 0:
                 handler.start_writer()
                 os._exit(0)
         # Start readers, not forked.
         if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
+            time.sleep(0.05 + pool.comm.rank/10.0)
             handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
@@ -97,6 +97,13 @@
         if server_pid != 0:
             os.waitpid(server_pid, 0)
 
+    def setup_pool(self):
+        pool = ProcessorPool()
+        # Everyone is a reader, and when we're inline, that's all that matters.
+        readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
+        pool.add_workgroup(ranks=readers, name="readers")
+        return pool, pool.workgroups[0]
+
 class StandardRunner(ParallelAnalysisInterface):
     def __init__(self, num_readers, num_writers):
         self.num_readers = num_readers
@@ -110,7 +117,7 @@
                     self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self):
+    def split_work(self, pool):
         self.readers = np.arange(self.num_readers) + 1
         self.writers = np.arange(self.num_writers) + 1 + self.num_readers
     
@@ -119,11 +126,20 @@
         if wg.name == "server":
             handler.start_server()
         if wg.name == "readers":
-            time.sleep(0.1)
+            time.sleep(0.05)
             handler.start_reader()
         if wg.name == "writers":
-            time.sleep(0.2)
+            time.sleep(0.1)
             handler.start_writer()
+    
+    def setup_pool(self):
+        pool = ProcessorPool()
+        pool, workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        return pool, workgrup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None,
@@ -214,14 +230,8 @@
             del tpf
         else:
             self.force_res = force_res
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        self.pool, self.workgroup = ProcessorPool.from_sizes(
-           [ (1, "server"),
-             (self.num_readers, "readers"),
-             (self.num_writers, "writers") ]
-        )
+        # Setup pool and workgroups.
+        self.pool, self.workgroup = self.runner.setup_pool()
         p = self._setup_parameters(ts)
         params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
         self.__dict__.update(params)
@@ -259,7 +269,14 @@
 
 
     def __del__(self):
-        self.pool.free_all()
+        try:
+            self.pool.free_all()
+        except AttributeError:
+            # This really only acts to cut down on the misleading
+            # error messages when/if this class is called incorrectly
+            # or some other error happens and self.pool hasn't been created
+            # already.
+            pass
 
     def _get_hosts(self):
         if self.comm.rank == 0 or self.comm.size == 1:
@@ -312,7 +329,7 @@
             self.handler.call_rockstar()
         else:
             # Split up the work.
-            self.runner.split_work()
+            self.runner.split_work(self.pool)
             # And run it!
             self.runner.run(self.handler, self.workgroup)
         self.comm.barrier()



https://bitbucket.org/yt_analysis/yt/changeset/4d7f451f048a/
changeset:   4d7f451f048a
branch:      yt
user:        sskory
date:        2012-12-03 21:39:38
summary:     Merge.
affected #:  5 files

diff -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 -r 4d7f451f048afb2716baa8b226457b7319671e08 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -746,7 +746,7 @@
 then
     echo "Cloning a copy of Enzo."
     cd ${DEST_DIR}/src/
-    ${HG_EXEC} clone https://enzo.googlecode.com/hg/ ./enzo-hg-stable
+    ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
     cd $MY_PWD
 fi
 


diff -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 -r 4d7f451f048afb2716baa8b226457b7319671e08 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -274,16 +274,18 @@
             self.stream_handler.particle_types[key] = particle_types[key]
             if key not in self.field_list:
                 self.field_list.append(key)
+                
+        self._setup_unknown_fields()
 
         for i, grid in enumerate(self.grids) :
             if data[i].has_key("number_of_particles") :
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
             for key in data[i].keys() :
+                if key in grid.keys() : grid.field_data.pop(key, None)
                 self.stream_handler.fields[grid.id][key] = data[i][key]
             
-        self._setup_unknown_fields()
         self._detect_fields()
-        
+                
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo


diff -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 -r 4d7f451f048afb2716baa8b226457b7319671e08 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -420,6 +420,8 @@
         for k in new_result:
             err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
               (k, self.field, self.weight_field, self.axis)
+            if k == 'weight_field' and self.weight_field is None:
+                continue
             if self.decimals is None:
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)


diff -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 -r 4d7f451f048afb2716baa8b226457b7319671e08 yt/utilities/particle_generator.py
--- /dev/null
+++ b/yt/utilities/particle_generator.py
@@ -0,0 +1,378 @@
+import numpy as np
+import h5py
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+class ParticleGenerator(object) :
+
+    default_fields = ["particle_position_x",
+                      "particle_position_y",
+                      "particle_position_z",
+                      "particle_index"]
+
+    def __init__(self, pf, num_particles, field_list) :
+        """
+        Base class for generating particle fields which may be applied to
+        streams. Normally this would not be called directly, since it doesn't
+        really do anything except allocate memory. Takes a *pf* to serve as the
+        basis for determining grids, the number of particles *num_particles*,
+        and a list of fields, *field_list*.
+        """
+        self.pf = pf
+        self.num_particles = num_particles
+        self.field_list = field_list
+            
+        try :
+            self.posx_index = self.field_list.index(self.default_fields[0])
+            self.posy_index = self.field_list.index(self.default_fields[1])
+            self.posz_index = self.field_list.index(self.default_fields[2])
+            self.index_index = self.field_list.index(self.default_fields[3])
+        except :
+            raise KeyError("Field list must contain the following fields: " +
+                           "\'particle_position_x\', \'particle_position_y\'" +
+                           ", \'particle_position_z\', \'particle_index\' ")
+
+        self.num_grids = self.pf.h.num_grids
+        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
+        self.ParticleIndices = np.zeros(self.num_grids + 1, dtype='int64')
+        
+        self.num_fields = len(self.field_list)
+        
+        self.particles = np.zeros((self.num_particles, self.num_fields),
+                                  dtype='float64')
+
+    def has_key(self, key) :
+        """
+        Check to see if *key* is in the particle field list.
+        """
+        return (key in self.field_list)
+            
+    def keys(self) :
+        """
+        Return the list of particle fields.
+        """
+        return self.field_list
+    
+    def __getitem__(self, key) :
+        """
+        Get the field associated with key.
+        """
+        return self.particles[:,self.field_list.index(key)]
+    
+    def __setitem__(self, key, val) :
+        """
+        Sets a field to be some other value. Note that we assume
+        that the particles have been sorted by grid already, so
+        make sure the setting of the field is consistent with this.
+        """
+        self.particles[:,self.field_list.index(key)] = val[:]
+                
+    def __len__(self) :
+        """
+        The number of particles
+        """
+        return self.num_particles
+
+    def get_for_grid(self, grid) :
+        """
+        Return a dict containing all of the particle fields in the specified grid.
+        """
+        ind = grid.id-grid._id_offset
+        start = self.ParticleIndices[ind]
+        end = self.ParticleIndices[ind+1]
+        return dict([(field, self.particles[start:end,self.field_list.index(field)])
+                     for field in self.field_list])
+    
+    def _setup_particles(self,x,y,z,setup_fields=None) :
+        """
+        Assigns grids to particles and sets up particle positions. *setup_fields* is
+        a dict of fields other than the particle positions to set up. 
+        """
+        particle_grids, particle_grid_inds = self.pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        self.particles[:,self.posx_index] = x[idxs]
+        self.particles[:,self.posy_index] = y[idxs]
+        self.particles[:,self.posz_index] = z[idxs]
+        self.NumberOfParticles = np.bincount(particle_grid_inds,
+                                             minlength=self.num_grids)
+        if self.num_grids > 1 :
+            np.add.accumulate(self.NumberOfParticles.squeeze(),
+                              out=self.ParticleIndices[1:])
+        else :
+            self.ParticleIndices[1] = self.NumberOfParticles.squeeze()
+        if setup_fields is not None:
+            for key, value in setup_fields.items():
+                if key not in self.default_fields:
+                    self.particles[:,self.field_list.index(key)] = value[idxs]
+    
+    def assign_indices(self, function=None, **kwargs) :
+        """
+        Assign unique indices to the particles. The default is to just use
+        numpy.arange, but any function may be supplied with keyword arguments.
+        """
+        if function is None :
+            self.particles[:,self.index_index] = np.arange((self.num_particles))
+        else :
+            self.particles[:,self.index_index] = function(**kwargs)
+            
+    def map_grid_fields_to_particles(self, mapping_dict) :
+        r"""
+        For the fields in  *mapping_dict*, map grid fields to the particles
+        using CIC sampling.
+
+        Examples
+        --------
+        >>> field_map = {'Density':'particle_density',
+        >>>              'Temperature':'particle_temperature'}
+        >>> particles.map_grid_fields_to_particles(field_map)
+        """
+        pbar = get_pbar("Mapping fields to particles", self.num_grids)
+        for i, grid in enumerate(self.pf.h.grids) :
+            pbar.update(i)
+            if self.NumberOfParticles[i] > 0:
+                start = self.ParticleIndices[i]
+                end = self.ParticleIndices[i+1]
+                # Note we add one ghost zone to the grid!
+                cube = grid.retrieve_ghost_zones(1, mapping_dict.keys())
+                le = np.array(grid.LeftEdge).astype(np.float64)
+                dims = np.array(grid.ActiveDimensions).astype(np.int32)
+                for gfield, pfield in mapping_dict.items() :
+                    field_index = self.field_list.index(pfield)
+                    CICSample_3(self.particles[start:end,self.posx_index],
+                                self.particles[start:end,self.posy_index],
+                                self.particles[start:end,self.posz_index],
+                                self.particles[start:end,field_index],
+                                np.int64(self.NumberOfParticles[i]),
+                                cube[gfield], le, dims,
+                                np.float64(grid['dx']))
+        pbar.finish()
+
+    def apply_to_stream(self, clobber=False) :
+        """
+        Apply the particles to a stream parameter file. If particles already exist,
+        and clobber=False, do not overwrite them, but add the new ones to them. 
+        """
+        grid_data = []
+        for i,g in enumerate(self.pf.h.grids) :
+            data = {}
+            if clobber :
+                data["number_of_particles"] = self.NumberOfParticles[i]
+            else :
+                data["number_of_particles"] = self.NumberOfParticles[i] + \
+                                              g.NumberOfParticles
+            grid_particles = self.get_for_grid(g)
+            for field in self.field_list :
+                if data["number_of_particles"] > 0 :
+                    # We have particles in this grid
+                    if g.NumberOfParticles > 0 and not clobber:
+                        # Particles already exist
+                        if field in self.pf.h.field_list :
+                            # This field already exists
+                            prev_particles = g[field]
+                        else :
+                            # This one doesn't, set the previous particles' field
+                            # values to zero
+                            prev_particles = np.zeros((g.NumberOfParticles))
+                        data[field] = np.concatenate((prev_particles,
+                                                      grid_particles[field]))
+                    else :
+                        # Particles do not already exist or we're clobbering
+                        data[field] = grid_particles[field]
+                else :
+                    # We don't have particles in this grid
+                    data[field] = np.array([], dtype='float64')
+            grid_data.append(data)
+        self.pf.h.update_data(grid_data)
+
+class FromListParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, num_particles, data) :
+        r"""
+        Generate particle fields from array-like lists contained in a dict.
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        num_particles : int
+            The number of particles in the dict.
+        data : dict of NumPy arrays
+            The particle fields themselves.
+
+        Examples
+        --------
+        >>> num_p = 100000
+        >>> posx = np.random.random((num_p))
+        >>> posy = np.random.random((num_p))
+        >>> posz = np.random.random((num_p))
+        >>> mass = np.ones((num_p))
+        >>> data = {'particle_position_x': posx, 'particle_position_y': posy,
+        >>>         'particle_position_z': posz, 'particle_mass': mass}
+        >>> particles = FromListParticleGenerator(pf, num_p, data)
+        """
+
+        field_list = data.keys()
+        x = data.pop("particle_position_x")
+        y = data.pop("particle_position_y")
+        z = data.pop("particle_position_z")
+
+        xcond = np.logical_or(x < pf.domain_left_edge[0],
+                              x >= pf.domain_right_edge[0])
+        ycond = np.logical_or(y < pf.domain_left_edge[1],
+                              y >= pf.domain_right_edge[1])
+        zcond = np.logical_or(z < pf.domain_left_edge[2],
+                              z >= pf.domain_right_edge[2])
+        cond = np.logical_or(xcond, ycond)
+        cond = np.logical_or(zcond, cond)
+
+        if np.any(cond) :
+            raise ValueError("Some particles are outside of the domain!!!")
+
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+        self._setup_particles(x,y,z,setup_fields=data)
+        
+class LatticeParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, particles_dims, particles_left_edge,
+                 particles_right_edge, field_list) :
+        r"""
+        Generate particles in a lattice arrangement. 
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        particles_dims : int, array-like 
+            The number of particles along each dimension
+        particles_left_edge : float, array-like
+            The 'left-most' starting positions of the lattice.
+        particles_right_edge : float, array-like
+             The 'right-most' ending positions of the lattice.
+        field_list : list of strings
+             A list of particle fields
+             
+        Examples
+        --------
+        >>> dims = (128,128,128)
+        >>> le = np.array([0.25,0.25,0.25])
+        >>> re = np.array([0.75,0.75,0.75])
+        >>> fields = ["particle_position_x","particle_position_y",
+        >>>           "particle_position_z","particle_index",
+        >>>           "particle_density","particle_temperature"]
+        >>> particles = LatticeParticleGenerator(pf, dims, le, re, fields)
+        """
+
+        num_x = particles_dims[0]
+        num_y = particles_dims[1]
+        num_z = particles_dims[2]
+        xmin = particles_left_edge[0]
+        ymin = particles_left_edge[1]
+        zmin = particles_left_edge[2]
+        xmax = particles_right_edge[0]
+        ymax = particles_right_edge[1]
+        zmax = particles_right_edge[2]
+
+        xcond = (xmin < pf.domain_left_edge[0]) or \
+                (xmax >= pf.domain_right_edge[0])
+        ycond = (ymin < pf.domain_left_edge[1]) or \
+                (ymax >= pf.domain_right_edge[1])
+        zcond = (zmin < pf.domain_left_edge[2]) or \
+                (zmax >= pf.domain_right_edge[2])
+        cond = xcond or ycond or zcond
+
+        if cond :
+            raise ValueError("Proposed bounds for particles are outside domain!!!")
+
+        ParticleGenerator.__init__(self, pf, num_x*num_y*num_z, field_list)
+
+        dx = (xmax-xmin)/(num_x-1)
+        dy = (ymax-ymin)/(num_y-1)
+        dz = (zmax-zmin)/(num_z-1)
+        inds = np.indices((num_x,num_y,num_z))
+        xpos = inds[0]*dx + xmin
+        ypos = inds[1]*dy + ymin
+        zpos = inds[2]*dz + zmin
+        
+        self._setup_particles(xpos.flat[:], ypos.flat[:], zpos.flat[:])
+        
+class WithDensityParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, data_source, num_particles, field_list,
+                 density_field="Density") :
+        r"""
+        Generate particles based on a density field.
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        data_source : `yt.data_objects.api.AMRData`
+            The data source containing the density field.
+        num_particles : int
+            The number of particles to be generated
+        field_list : list of strings
+            A list of particle fields
+        density_field : string, optional
+            A density field which will serve as the distribution function for the
+            particle positions. Theoretically, this could be any 'per-volume' field. 
+            
+        Examples
+        --------
+        >>> sphere = pf.h.sphere(pf.domain_center, 0.5)
+        >>> num_p = 100000
+        >>> fields = ["particle_position_x","particle_position_y",
+        >>>           "particle_position_z","particle_index",
+        >>>           "particle_density","particle_temperature"]
+        >>> particles = WithDensityParticleGenerator(pf, sphere, num_particles,
+        >>>                                          fields, density_field='Dark_Matter_Density')
+        """
+
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+
+        num_cells = len(data_source["x"].flat)
+        max_density = data_source[density_field].max()
+        num_particles_left = num_particles
+        all_x = []
+        all_y = []
+        all_z = []
+        
+        pbar = get_pbar("Generating Particles", num_particles)
+        tot_num_accepted = int(0)
+        
+        while num_particles_left > 0:
+
+            rho = np.random.uniform(high=1.01*max_density,
+                                    size=num_particles_left)
+            idxs = np.random.random_integers(low=0, high=num_cells-1,
+                                             size=num_particles_left)
+            rho_true = data_source[density_field].flat[idxs]
+            accept = rho <= rho_true
+            num_accepted = accept.sum()
+            accepted_idxs = idxs[accept]
+            
+            xpos = data_source["x"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dx"].flat[accepted_idxs]
+            ypos = data_source["y"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dy"].flat[accepted_idxs]
+            zpos = data_source["z"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dz"].flat[accepted_idxs]
+
+            all_x.append(xpos)
+            all_y.append(ypos)
+            all_z.append(zpos)
+
+            num_particles_left -= num_accepted
+            tot_num_accepted += num_accepted
+            pbar.update(tot_num_accepted)
+
+        pbar.finish()
+
+        x = np.concatenate(all_x)
+        y = np.concatenate(all_y)
+        z = np.concatenate(all_z)
+
+        self._setup_particles(x,y,z)
+        


diff -r aac45ed6d4e81ed8707c2843ec5df03bddd69aa4 -r 4d7f451f048afb2716baa8b226457b7319671e08 yt/utilities/tests/test_particle_generator.py
--- /dev/null
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -0,0 +1,105 @@
+import numpy as np
+from yt.testing import *
+from yt.utilities.particle_generator import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+from IPython import embed
+
+def setup() :
+    pass
+
+def test_particle_generator() :
+    
+    # First generate our pf
+    domain_dims = (128, 128, 128)
+    dens = np.zeros(domain_dims) + 0.1
+    temp = 4.*np.ones(domain_dims)
+    fields = {"Density": dens, "Temperature": temp}
+    ug = load_uniform_grid(fields, domain_dims, 1.0)
+    fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{"Density":(10.0)})]
+    rc = [fm.flagging_method_registry["overdensity"](4.0)]
+    pf = refine_amr(ug, rc, fo, 3)
+
+    # Now generate particles from density
+
+    field_list = ["particle_position_x","particle_position_y",
+                  "particle_position_z","particle_index",
+                  "particle_gas_density"]
+    num_particles = 1000000
+    field_dict = {"Density": "particle_gas_density"}
+    sphere = pf.h.sphere(pf.domain_center, 0.45)
+
+    particles1 = WithDensityParticleGenerator(pf, sphere, num_particles, field_list)
+    particles1.assign_indices()
+    particles1.map_grid_fields_to_particles(field_dict)
+    
+    # Test to make sure we ended up with the right number of particles per grid
+    particles1.apply_to_stream()
+    particles_per_grid1 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
+    particles_per_grid1 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
+
+    # Set up a lattice of particles
+    pdims = np.array([64,64,64])
+    def new_indices() :
+        # We just add new indices onto the existing ones
+        return np.arange((np.product(pdims)))+num_particles
+    le = np.array([0.25,0.25,0.25])
+    re = np.array([0.75,0.75,0.75])
+    new_field_list = field_list + ["particle_gas_temperature"]
+    new_field_dict = {"Density": "particle_gas_density",
+                      "Temperature": "particle_gas_temperature"}
+
+    particles2 = LatticeParticleGenerator(pf, pdims, le, re, new_field_list)
+    particles2.assign_indices(function=new_indices)
+    particles2.map_grid_fields_to_particles(new_field_dict)
+
+    #Test lattice positions
+    xpos = np.unique(particles2["particle_position_x"])
+    ypos = np.unique(particles2["particle_position_y"])
+    zpos = np.unique(particles2["particle_position_z"])
+
+    xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
+    ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
+    zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
+
+    assert_almost_equal(xpos, xpred)
+    assert_almost_equal(ypos, ypred)
+    assert_almost_equal(zpos, zpred)
+
+    #Test the number of particles again
+    particles2.apply_to_stream()
+    particles_per_grid2 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    particles_per_grid2 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
+
+    #Test the uniqueness of tags
+    tags = np.concatenate([grid["particle_index"] for grid in pf.h.grids])
+    tags.sort()
+    assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
+
+    # Test that the old particles have zero for the new field
+    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
+                          for i, grid in enumerate(pf.h.grids)]
+    test_zeros = [np.zeros((particles_per_grid1[i])) 
+                  for i, grid in enumerate(pf.h.grids)]
+    assert_equal(old_particle_temps, test_zeros)
+
+    #Now dump all of these particle fields out into a dict
+    pdata = {}
+    dd = pf.h.all_data()
+    for field in new_field_list :
+        pdata[field] = dd[field]
+
+    #Test the "from-list" generator and particle field clobber
+    particles3 = FromListParticleGenerator(pf, num_particles+np.product(pdims), pdata)
+    particles3.apply_to_stream(clobber=True)
+    
+    #Test the number of particles again
+    particles_per_grid3 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    particles_per_grid2 = [len(grid["particle_position_z"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)



https://bitbucket.org/yt_analysis/yt/changeset/cf1db8d604f0/
changeset:   cf1db8d604f0
branch:      yt
user:        sskory
date:        2012-12-03 23:29:20
summary:     Simplifying the inline logic.
affected #:  1 file

diff -r 4d7f451f048afb2716baa8b226457b7319671e08 -r cf1db8d604f0866e28e335d8f51646a684171f42 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -41,35 +41,13 @@
 from os import path
 
 class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
+    def __init__(self):
         # If this is being run inline, num_readers == comm.size, always.
         psize = ytcfg.getint("yt", "__global_parallel_size")
         self.num_readers = psize
-        if num_writers is None:
-            self.num_writers =  psize
-        else:
-            self.num_writers = min(num_writers, psize)
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in avail:
-                    avail.pop(avail.index(r))
-
+        # No choice for you, everyone's a writer too!
+        self.num_writers =  psize
+    
     def run(self, handler, pool):
         # If inline, we use forks.
         server_pid = 0
@@ -79,18 +57,16 @@
             if server_pid == 0:
                 handler.start_server()
                 os._exit(0)
-        # Start writers.
+        # Start writers on all.
         writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.05 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_writer()
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.05 + pool.comm.rank/10.0)
-            handler.start_reader()
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        writer_pid = os.fork()
+        if writer_pid == 0:
+            handler.start_writer()
+            os._exit(0)
+        # Everyone's a reader!
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
             os.waitpid(writer_pid, 0)
@@ -117,10 +93,6 @@
                     self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self, pool):
-        self.readers = np.arange(self.num_readers) + 1
-        self.writers = np.arange(self.num_writers) + 1 + self.num_readers
-    
     def run(self, handler, wg):
         # Not inline so we just launch them directly from our MPI threads.
         if wg.name == "server":
@@ -208,7 +180,7 @@
         ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
+            self.runner = InlineRunner()
         else:
             self.runner = StandardRunner(num_readers, num_writers)
         self.num_readers = self.runner.num_readers



https://bitbucket.org/yt_analysis/yt/changeset/b5ca396e761c/
changeset:   b5ca396e761c
branch:      yt
user:        sskory
date:        2012-12-03 23:31:26
summary:     Forgot to remove split_work lower down.
affected #:  1 file

diff -r cf1db8d604f0866e28e335d8f51646a684171f42 -r b5ca396e761c53b24187084e68cf56e36ab8eda3 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -300,8 +300,6 @@
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
             # And run it!
             self.runner.run(self.handler, self.workgroup)
         self.comm.barrier()



https://bitbucket.org/yt_analysis/yt/changeset/c65043d32eb2/
changeset:   c65043d32eb2
branch:      yt
user:        sskory
date:        2012-12-04 01:28:30
summary:     Adding the total_particles and dm_only flags to Rockstar, which address issue #469 and issue #468, respectively.
affected #:  2 files

diff -r b5ca396e761c53b24187084e68cf56e36ab8eda3 -r c65043d32eb217d1ebf63af21deede2bf4310bec yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -111,12 +111,12 @@
              (self.num_readers, "readers"),
              (self.num_writers, "writers") ]
         )
-        return pool, workgrup
+        return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
-            force_res=None):
+            force_res=None, total_particles=None, dm_only=False):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -156,6 +156,17 @@
             last data snapshot (i.e. the one where time has evolved the
             longest) in the time series:
             ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+        total_particles : int
+            If supplied, this is a pre-calculated total number of dark matter
+            particles present in the simulation. For example, this is useful
+            when analyzing a series of snapshots where the number of dark
+            matter particles should not change and this will save some disk
+            access time. If left unspecified, it will
+            be calculated automatically. Default: ``None``.
+        dm_only : boolean
+            If set to ``True``, it will be assumed that there are only dark
+            matter particles present in the simulation. This can save analysis
+            time if this is indeed the case. Default: ``False``.
             
         Returns
         -------
@@ -202,6 +213,8 @@
             del tpf
         else:
             self.force_res = force_res
+        self.total_particles = total_particles
+        self.dm_only = dm_only
         # Setup pool and workgroups.
         self.pool, self.workgroup = self.runner.setup_pool()
         p = self._setup_parameters(ts)
@@ -213,26 +226,31 @@
         if self.workgroup.name != "readers": return None
         tpf = ts[0]
         def _particle_count(field, data):
+            if self.dm_only:
+                return np.prod(data["particle_position_x"].shape)
             try:
                 return (data["particle_type"]==self.dm_type).sum()
             except KeyError:
                 return np.prod(data["particle_position_x"].shape)
         add_field("particle_count", function=_particle_count,
                   not_in_all=True, particle_type=True)
-        # Get total_particles in parallel.
         dd = tpf.h.all_data()
         # Get DM particle mass.
         all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
         for g in tpf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
+            if self.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
                 iddm = g["particle_type"] == self.dm_type
             else:
                 iddm = Ellipsis
             particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
             break
         p = {}
-        p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        if self.total_particles is None:
+            # Get total_particles in parallel.
+            p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
         p['left_edge'] = tpf.domain_left_edge
         p['right_edge'] = tpf.domain_right_edge
         p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
@@ -281,6 +299,7 @@
                     outbase = self.outbase,
                     force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
+                    dm_only = int(self.dm_only),
                     **kwargs)
         # Make the directory to store the halo lists in.
         if self.comm.rank == 0:


diff -r b5ca396e761c53b24187084e68cf56e36ab8eda3 -r c65043d32eb217d1ebf63af21deede2bf4310bec yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -170,7 +170,9 @@
         local_parts = 0
         for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
+            if rh.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
                 iddm = g["particle_type"] == rh.dm_type
             else:
                 iddm = Ellipsis
@@ -193,7 +195,9 @@
     pi = 0
     for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
+        if rh.dm_only:
+            iddm = Ellipsis
+        elif "particle_type" in all_fields:
             iddm = g["particle_type"] == rh.dm_type
         else:
             iddm = Ellipsis
@@ -225,6 +229,7 @@
     cdef public int block_ratio
     cdef public int dm_type
     cdef public int total_particles
+    cdef public int dm_only
 
     def __cinit__(self, ts):
         self.ts = ts
@@ -238,7 +243,8 @@
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
-                       int min_halo_size = 25, outbase = "None"):
+                       int min_halo_size = 25, outbase = "None",
+                       int dm_only = 0):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -269,6 +275,7 @@
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
+        self.dm_only = dm_only
         
         tpf = self.ts[0]
         h0 = tpf.hubble_constant



https://bitbucket.org/yt_analysis/yt/changeset/3ffe2b24d1a7/
changeset:   3ffe2b24d1a7
branch:      yt
user:        brittonsmith
date:        2012-12-04 16:19:24
summary:     Merged in MatthewTurk/yt (pull request #353)
affected #:  4 files

diff -r 61f553c23c6aa7f9aefa2e53d9879c61c0733eaa -r 3ffe2b24d1a76ebd778e7a47b4caaab4c0eda90c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -434,8 +434,7 @@
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
-echo 'd91fa23d8f86c5f5b3df210731c14c92a2e3d999979ef2c4c67be3dda19a8a8d2363f8cd23611957280c17f61653b0e66f59d3f5515b6d443b22cddeaef86ab6  Rockstar-0.99.tar.gz' > Rockstar-0.99.tar.gz.sha512
-
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -705,14 +704,14 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar-0.99/done ]
+    if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
+        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
-        cd Rockstar-0.99
+        cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..


diff -r 61f553c23c6aa7f9aefa2e53d9879c61c0733eaa -r 3ffe2b24d1a76ebd778e7a47b4caaab4c0eda90c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -75,11 +75,6 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
-    # Used for Loaded and RockstarHalos
-    _saved_fields = {}
-    _ds_sort = None
-    _particle_mask = None
-
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
@@ -111,6 +106,9 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
 
     @property
     def particle_mask(self):
@@ -1323,18 +1321,24 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+        ('vmax_r', np.float32), 
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
+        ('klypin_rs', np.float32), 
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
-        ('padding1', np.float32), ('num_p', np.int64),
+        ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
+        ('Voff', np.float32), ('b_to_a', np.float32),
+        ('c_to_a', np.float32), ('A', (np.float32, 3)),
+        ('bullock_spin', np.float32), ('kin_to_pot', np.float32),
+        ('num_p', np.int64),
         ('num_child_particles', np.int64), ('p_start', np.int64),
         ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
         ('min_pos_err', np.float32), ('min_vel_err', np.float32),
         ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
-    # Above, padding1&2 are due to c byte ordering which pads between
+    # Above, padding* are due to c byte ordering which pads between
     # 4 and 8 byte values in the struct as to not overlap memory registers.
-    _tocleanup = ['padding1', 'padding2']
+    _tocleanup = ['padding2']
 
     def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)


diff -r 61f553c23c6aa7f9aefa2e53d9879c61c0733eaa -r 3ffe2b24d1a76ebd778e7a47b4caaab4c0eda90c yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -40,61 +40,14 @@
 from os import mkdir
 from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
-
 class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
+    def __init__(self):
         # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        self.num_readers = psize
+        # No choice for you, everyone's a writer too!
+        self.num_writers =  psize
+    
     def run(self, handler, pool):
         # If inline, we use forks.
         server_pid = 0
@@ -104,67 +57,66 @@
             if server_pid == 0:
                 handler.start_server()
                 os._exit(0)
-        # Start writers.
+        # Start writers on all.
         writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        writer_pid = os.fork()
+        if writer_pid == 0:
+            handler.start_writer()
+            os._exit(0)
+        # Everyone's a reader!
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
             os.waitpid(writer_pid, 0)
         if server_pid != 0:
             os.waitpid(server_pid, 0)
 
+    def setup_pool(self):
+        pool = ProcessorPool()
+        # Everyone is a reader, and when we're inline, that's all that matters.
+        readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
+        pool.add_workgroup(ranks=readers, name="readers")
+        return pool, pool.workgroups[0]
+
 class StandardRunner(ParallelAnalysisInterface):
     def __init__(self, num_readers, num_writers):
         self.num_readers = num_readers
+        psize = ytcfg.getint("yt", "__global_parallel_size")
         if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
+            self.num_writers =  psize - num_readers - 1
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
+            self.num_writers = min(num_writers, psize)
+        if self.num_readers + self.num_writers + 1 != psize:
             mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
+                    self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
+    def run(self, handler, wg):
+        # Not inline so we just launch them directly from our MPI threads.
+        if wg.name == "server":
+            handler.start_server()
+        if wg.name == "readers":
+            time.sleep(0.05)
+            handler.start_reader()
+        if wg.name == "writers":
+            time.sleep(0.1)
+            handler.start_writer()
     
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def setup_pool(self):
+        pool = ProcessorPool()
+        pool, workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
+    def __init__(self, ts, num_readers = 1, num_writers = None,
+            outbase="rockstar_halos", dm_type=1, 
+            force_res=None, total_particles=None, dm_only=False):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -193,8 +145,6 @@
         outbase: str
             This is where the out*list files that Rockstar makes should be
             placed. Default is 'rockstar_halos'.
-        particle_mass: float
-            This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
@@ -206,6 +156,17 @@
             last data snapshot (i.e. the one where time has evolved the
             longest) in the time series:
             ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+        total_particles : int
+            If supplied, this is a pre-calculated total number of dark matter
+            particles present in the simulation. For example, this is useful
+            when analyzing a series of snapshots where the number of dark
+            matter particles should not change and this will save some disk
+            access time. If left unspecified, it will
+            be calculated automatically. Default: ``None``.
+        dm_only : boolean
+            If set to ``True``, it will be assumed that there are only dark
+            matter particles present in the simulation. This can save analysis
+            time if this is indeed the case. Default: ``False``.
             
         Returns
         -------
@@ -222,16 +183,15 @@
         from yt.mods import *
         import sys
 
-        files = glob.glob('/u/cmoody3/data/a*')
-        files.sort()
-        ts = TimeSeriesData.from_filenames(files)
+        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
         pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh = RockstarHaloFinder(ts)
         rh.run()
         """
+        ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
+            self.runner = InlineRunner()
         else:
             self.runner = StandardRunner(num_readers, num_writers)
         self.num_readers = self.runner.num_readers
@@ -241,52 +201,75 @@
         # Note that Rockstar does not support subvolumes.
         # We assume that all of the snapshots in the time series
         # use the same domain info as the first snapshots.
-        if not isinstance(ts,TimeSeriesData):
+        if not isinstance(ts, TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
-        tpf = ts.__iter__().next()
+        self.outbase = outbase
+        if force_res is None:
+            tpf = ts[-1] # Cache a reference
+            self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
+            # We have to delete now to wipe the hierarchy
+            del tpf
+        else:
+            self.force_res = force_res
+        self.total_particles = total_particles
+        self.dm_only = dm_only
+        # Setup pool and workgroups.
+        self.pool, self.workgroup = self.runner.setup_pool()
+        p = self._setup_parameters(ts)
+        params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
+        self.__dict__.update(params)
+        self.handler = rockstar_interface.RockstarInterface(self.ts)
+
+    def _setup_parameters(self, ts):
+        if self.workgroup.name != "readers": return None
+        tpf = ts[0]
         def _particle_count(field, data):
+            if self.dm_only:
+                return np.prod(data["particle_position_x"].shape)
             try:
-                return (data["particle_type"]==dm_type).sum()
+                return (data["particle_type"]==self.dm_type).sum()
             except KeyError:
                 return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
+        add_field("particle_count", function=_particle_count,
+                  not_in_all=True, particle_type=True)
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
-        self.hierarchy = tpf.h
-        self.particle_mass = particle_mass 
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
-        self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+        # Get DM particle mass.
+        all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
+        for g in tpf.h._get_objs("grids"):
+            if g.NumberOfParticles == 0: continue
+            if self.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
+                iddm = g["particle_type"] == self.dm_type
+            else:
+                iddm = Ellipsis
+            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+            break
+        p = {}
+        if self.total_particles is None:
+            # Get total_particles in parallel.
+            p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        p['left_edge'] = tpf.domain_left_edge
+        p['right_edge'] = tpf.domain_right_edge
+        p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        p['particle_mass'] = particle_mass
+        return p
+
 
     def __del__(self):
-        self.pool.free_all()
+        try:
+            self.pool.free_all()
+        except AttributeError:
+            # This really only acts to cut down on the misleading
+            # error messages when/if this class is called incorrectly
+            # or some other error happens and self.pool hasn't been created
+            # already.
+            pass
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.rank == 0 or self.comm.size == 1:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +277,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -308,19 +291,20 @@
         self.handler.setup_rockstar(self.server_address, self.port,
                     len(self.ts), self.total_particles, 
                     self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
-                    force_res=self.force_res,
+                    force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
+                    dm_only = int(self.dm_only),
                     **kwargs)
         # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
+                os.makedirs(self.outbase)
             # Make a record of which dataset corresponds to which set of
             # output files because it will be easy to lose this connection.
             fp = open(self.outbase + '/pfs.txt', 'w')
@@ -331,15 +315,13 @@
                 fp.write(line)
             fp.close()
         # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        self.comm.barrier()
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
             # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.runner.run(self.handler, self.workgroup)
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):


diff -r 61f553c23c6aa7f9aefa2e53d9879c61c0733eaa -r 3ffe2b24d1a76ebd778e7a47b4caaab4c0eda90c yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -48,13 +48,15 @@
 
 cdef import from "server.h" nogil:
     int server()
+    np.int64_t READER_TYPE
+    np.int64_t WRITER_TYPE
 
 cdef import from "client.h" nogil:
     void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
-    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, 
 			   np.int64_t chunk, float *bounds)
 
 cdef import from "config_vars.h":
@@ -144,101 +146,9 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
-def print_rockstar_settings():
-    # We have to do the config
-    print "FILE_FORMAT =", FILE_FORMAT
-    print "PARTICLE_MASS =", PARTICLE_MASS
+# Forward declare
+cdef class RockstarInterface
 
-    print "MASS_DEFINITION =", MASS_DEFINITION
-    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
-    print "FORCE_RES =", FORCE_RES
-
-    print "SCALE_NOW =", SCALE_NOW
-    print "h0 =", h0
-    print "Ol =", Ol
-    print "Om =", Om
-
-    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
-    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
-    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
-    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
-    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
-
-    print "PARALLEL_IO =", PARALLEL_IO
-    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
-    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
-    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
-    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
-    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
-
-    print "INBASE =", INBASE
-    print "FILENAME =", FILENAME
-    print "STARTING_SNAP =", STARTING_SNAP
-    print "NUM_SNAPS =", NUM_SNAPS
-    print "NUM_BLOCKS =", NUM_BLOCKS
-    print "NUM_READERS =", NUM_READERS
-    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
-    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
-    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
-    print "BLOCK_NAMES =", BLOCK_NAMES
-
-    print "OUTBASE =", OUTBASE
-    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
-    print "NUM_WRITERS =", NUM_WRITERS
-    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
-    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
-
-    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
-    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
-    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
-    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
-
-    print "BOUND_PROPS =", BOUND_PROPS
-    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
-    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
-    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
-    print "TRIM_OVERLAP =", TRIM_OVERLAP
-    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
-    print "LIGHTCONE =", LIGHTCONE
-    print "PERIODIC =", PERIODIC
-
-    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
-    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
-    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
-    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
-    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
-    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
-
-    print "LIMIT_CENTER =", LIMIT_CENTER[0]
-    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
-    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
-    print "LIMIT_RADIUS =", LIMIT_RADIUS
-
-    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
-    print "GADGET_VARIANT =", GADGET_VARIANT
-
-    print "FOF_FRACTION =", FOF_FRACTION
-    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
-    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
-    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
-    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
-    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
-    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
-
-    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
-    print "BOX_SIZE =", BOX_SIZE
-    print "OUTPUT_HMAD =", OUTPUT_HMAD
-    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
-    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
-    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
-    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
-    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
-
-    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
-    print "SINGLE_SNAP =", SINGLE_SNAP
-
-cdef class RockstarInterface
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]
@@ -250,31 +160,19 @@
     block = int(str(filename).rsplit(".")[-1])
     n = rh.block_ratio
 
-    all_grids = pf.h.grids
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
     if NUM_BLOCKS > 1:
         local_parts = 0
-        for g in grids:
+        for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+            if rh.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
                 iddm = g["particle_type"] == rh.dm_type
             else:
                 iddm = Ellipsis
@@ -295,9 +193,11 @@
     left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
-    for g in grids:
+    for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
+        if rh.dm_only:
+            iddm = Ellipsis
+        elif "particle_type" in all_fields:
             iddm = g["particle_type"] == rh.dm_type
         else:
             iddm = Ellipsis
@@ -329,21 +229,22 @@
     cdef public int block_ratio
     cdef public int dm_type
     cdef public int total_particles
+    cdef public int dm_only
 
-    def __cinit__(self, ts, data_source):
+    def __cinit__(self, ts):
         self.ts = ts
         self.tsl = ts.__iter__() #timseries generator used by read
-        self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,
                        int dm_type,
-                       np.float64_t particle_mass = -1.0,
+                       np.float64_t particle_mass,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
-                       int min_halo_size = 25, outbase = "None"):
+                       int min_halo_size = 25, outbase = "None",
+                       int dm_only = 0):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -374,6 +275,7 @@
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
+        self.dm_only = dm_only
         
         tpf = self.ts[0]
         h0 = tpf.hubble_constant
@@ -385,8 +287,6 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-        if particle_mass < 0:
-            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
         BOX_SIZE = (tpf.domain_right_edge[0] -
@@ -400,12 +300,16 @@
     def call_rockstar(self):
         read_particles("generic")
         rockstar(NULL, 0)
-        output_and_free_halos(0, 0, 0, NULL)
+        output_halos(0, 0, 0, NULL)
 
     def start_server(self):
         with nogil:
             server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
+    def start_reader(self):
+        cdef np.int64_t in_type = np.int64(READER_TYPE)
         client(in_type)
+
+    def start_writer(self):
+        cdef np.int64_t in_type = np.int64(WRITER_TYPE)
+        client(in_type)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list