[yt-svn] commit/yt: 28 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Nov 26 08:33:27 PST 2012


28 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/35c2708a4937/
changeset:   35c2708a4937
branch:      yt
user:        sskory
date:        2012-11-03 00:23:46
summary:     Some changes to rockstar, mostly works but could be prettier.
affected #:  4 files

diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r 35c2708a4937106a79193858fe315f69a3d88f62 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -45,4 +45,8 @@
     FOFHaloFinder, \
     HaloFinder, \
     LoadHaloes, \
-    LoadTextHaloes
+    LoadTextHalos, \
+    LoadTextHaloes, \
+    RockstarHalo, \
+    RockstarHaloList, \
+    LoadRockstarHalos


diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r 35c2708a4937106a79193858fe315f69a3d88f62 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -547,7 +547,6 @@
            Most defaults are removed since we don't read in which halos
            particles belong to. 
         """
-        #we can still use get_sphere!
         self.ID = ID #from rockstar
         self.id = index #index in the halo list
         self.pf = halo_list.pf
@@ -574,11 +573,10 @@
         self.size=Np 
         self.CoM=np.array([X,Y,Z])
         self.max_dens_point=-1
-        self.group_total_mass=-1
+        self.group_total_mass=Mvir
         self.max_radius=Rvir
         self.bulk_vel=np.array([VX,VY,VZ])*1e5
-        self.rms_vel=-1
-        self.group_total_mass = -1 #not implemented 
+        self.rms_vel=Vrms
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -588,14 +586,6 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
-    def total_mass(self):
-        r"""Not implemented."""
-        return -1
-
-    def get_size(self):
-        r"""Return the number of particles belonging to the halo."""
-        return self.Np
-
     def write_particle_list(self,handle):
         r"""Not implemented."""
         return -1
@@ -608,18 +598,6 @@
         r"""Virial radius in Mpc/h comoving"""
         return self.Rvir
 
-    def virial_bin(self):
-        r"""Not implemented"""
-        return -1
-
-    def virial_density(self):
-        r"""Not implemented """
-        return -1
-
-    def virial_info(self):
-        r"""Not implemented"""
-        return -1 
-
     def __getitem__(self,key):
         r"""Not implemented"""
         return None
@@ -1318,6 +1296,8 @@
         f.close()
 
 class RockstarHaloList(HaloList):
+    _name = "Rockstar"
+    _halo_class = RockstarHalo
     #because we don't yet no halo-particle affiliations
     #most of the halo list methods are not implemented
     #furthermore, Rockstar only accepts DM particles of
@@ -1362,7 +1342,7 @@
         names = lines[0].replace('#','').split(' ')
         for j,line in enumerate(lines):
             if not line.startswith('#'): break
-
+        
         #find out the table datatypes but evaluating the first data line
         splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
         for num in splits:
@@ -1371,43 +1351,36 @@
             else:
                 formats += np.dtype('float'),
         assert len(formats) == len(names)
-
+        
         #Jc = 1.98892e33/pf['mpchcm']*1e5
         Jc = 1.0
         conv = dict(X=1.0/pf['mpchcm'],
                     Y=1.0/pf['mpchcm'],
                     Z=1.0/pf['mpchcm'], #to unitary
-                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    VX=1e0, VY=1e0, VZ=1e0, #to km/s
                     Mvir=1.0, #Msun/h
                     Vmax=1e0,Vrms=1e0,
                     Rvir=1.0/pf['kpchcm'],
                     Rs=1.0/pf['kpchcm'],
-                    JX=Jc,JY=Jc,JZ=Jc)
+                    JX=Jc, JY=Jc, JZ=Jc)
         dtype = {'names':names,'formats':formats}
         halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
-        #convert position units  
+        # Sort halos by Mvir.
+        halo_table.sort(order='Mvir')
+        halo_table = np.flipud(halo_table)
+        #convert position units
         for name in names:
             halo_table[name]=halo_table[name]*conv.get(name,1)
-        
-        for k,row in enumerate(halo_table):
+        # Store the halos in the halo list.
+        for i, row in enumerate(halo_table):
             args = tuple([val for val in row])
-            halo = RockstarHalo(self,k,*args)
+            halo = RockstarHalo(self, i, *args)
             self._groups.append(halo)
-    
-
-    #len is ok
-    #iter is OK
-    #getitem is ok
-    #nn is ok I think
-    #nn2d is ok I think
 
     def write_out(self):
         pass
     def write_particle_list(self):
         pass
-    
-
-    
 
 class HOPHaloList(HaloList):
 
@@ -2627,3 +2600,25 @@
             3.28392048e14
         """
         TextHaloList.__init__(self, pf, filename, columns, comment)
+
+LoadTextHalos = LoadTextHaloes
+
+class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    def __init__(self, pf, filename = None):
+        r"""Load Rockstar halos off disk from Rockstar-output format.
+
+        Parameters
+        ----------
+        fname : String
+            The name of the Rockstar file to read in. Default = 
+            "(dataset_name)_rockstar/out_0.list' where (dataset_name)
+            is equal to str(pf).
+
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadRockstarHalos(pf, "other_name.out")
+        """
+        if filename is None:
+            filename = str(pf) + '_rockstar/out_0.list'
+        RockstarHaloList.__init__(self, pf, filename)


diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r 35c2708a4937106a79193858fe315f69a3d88f62 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -47,37 +47,113 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,overwrite=False,
-            left_edge = None, right_edge = None):
+    def __init__(self, ts, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,dm_type=1):
+        r"""Spawns the Rockstar Halo finder, distributes dark matter
+        particles and finds halos.
+
+        The halo finder requires dark matter particles of a fixed size.
+        Rockstar has three main processes: reader, writer, and the 
+        server which coordinates reader/writer processes.
+
+        Parameters
+        ----------
+        ts   : TimeSeriesData, StaticOutput
+            This is the data source containing the DM particles. Because 
+            halo IDs may change from one snapshot to the next, the only
+            way to keep a consistent halo ID across time is to feed 
+            Rockstar a set of snapshots, ie, via TimeSeriesData.
+        num_readers: int
+            The number of reader can be increased from the default
+            of 1 in the event that a single snapshot is split among
+            many files. This can help in cases where performance is
+            IO-limited. Default is 1.
+        num_writers: int
+            The number of writers determines the number of processing threads
+            as well as the number of threads writing output data.
+            The default is set comm.size-num_readers-1.
+        outbase: str
+            This is where the out*list files that Rockstar makes should be
+            placed. Default is str(pf)+'_rockstar'.
+        particle_mass: float
+            This sets the DM particle mass used in Rockstar.
+        dm_type: 1
+            In order to exclude stars and other particle types, define
+            the dm_type. Default is 1, as Enzo has the DM particle type=1.
+
+        Returns
+        -------
+        None
+
+        Examples
+        --------
+        To use the script below you must run it using MPI:
+        mpirun -np 3 python test_rockstar.py --parallel
+
+        test_rockstar.py:
+
+        from mpi4py import MPI
+        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+        from yt.mods import *
+        import sys
+
+        files = glob.glob('/u/cmoody3/data/a*')
+        files.sort()
+        ts = TimeSeriesData.from_filenames(files)
+        pm = 7.81769027e+11
+        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh.run()
+        """
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
-        self.pf = pf
-        self.hierarchy = pf.h
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
+        if not isinstance(ts,TimeSeriesData):
+            ts = TimeSeriesData([ts])
+        self.ts = ts
+        self.dm_type = dm_type
+        if self.comm.size > 1: 
+            self.comm.barrier()            
+        tpf = ts.__iter__().next()
+        def _particle_count(field,data):
+            try:
+                return (data["particle_type"]==dm_type)
+            except KeyError:
+                return np.ones_like(data["particle_position_x"]).astype('bool')
+        add_field("particle_count",function=_particle_count, not_in_all=True,
+        particle_type=True)
+        # Get total_particles in parallel.
+        dd = tpf.h.all_data()
+        self.total_particles = dd.quantities['TotalQuantity']('particle_count')[0]
+        self.hierarchy = tpf.h
+        self.particle_mass = particle_mass 
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
+        if outbase is None:
+            outbase = str(tpf)+'_rockstar'
+        self.outbase = outbase        
         if num_writers is None:
             num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
-        self.particle_mass = particle_mass 
-        self.overwrite = overwrite
-        if left_edge is None:
-            left_edge = pf.domain_left_edge
-        if right_edge is None:
-            right_edge = pf.domain_right_edge
-        self.le = left_edge
-        self.re = right_edge
+        self.particle_mass = particle_mass
+        self.le = tpf.domain_left_edge
+        self.re = tpf.domain_right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
             print '%i reader + %i writers != %i mpi'%\
                     (self.num_readers, self.num_writers, self.comm.size)
             raise RuntimeError
-        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        data_source = self.pf.h.all_data()
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         self.handler = rockstar_interface.RockstarInterface(
-                self.pf, data_source)
+                ts, data_source)
         if outbase is None:
-            outbase = str(self.pf)+'_rockstar'
+            outbase = str(tpf)+'_rockstar'
         self.outbase = outbase        
 
+    def __del__(self):
+        self.pool.free_all()
+
     def _get_hosts(self):
         if self.comm.size == 1 or self.workgroup.name == "server":
             server_address = socket.gethostname()
@@ -107,14 +183,9 @@
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
-        #because rockstar *always* write to exactly the same
-        #out_0.list filename we make a directory for it
-        #to sit inside so it doesn't get accidentally
-        #overwritten 
-        if self.workgroup.name == "server":
-            if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
+                    len(self.ts), self.total_particles, 
+                    self.dm_type,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
@@ -123,6 +194,13 @@
                     outbase = self.outbase,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:


diff -r a9f28dd0dcb90d1d75ec39844c3d36ecae989ee5 -r 35c2708a4937106a79193858fe315f69a3d88f62 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -237,32 +237,38 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    print 'reading from particle filename %s'%filename # should print ./inline.0
+    global SCALE_NOW
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     block = int(str(filename).rsplit(".")[-1])
+    
 
     # Now we want to grab data from only a subset of the grids.
     n = rh.block_ratio
-    dd = rh.pf.h.all_data()
+    dd = pf.h.all_data()
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
-    tnpart = 0
-    for g in grids:
-        tnpart += dd._get_data_from_grid(g, "particle_index").size
+    tnpart = TOTAL_PARTICLES
+    print "Loading indices: size = ", tnpart
     p[0] = <particle *> malloc(sizeof(particle) * tnpart)
-    #print "Loading indices: size = ", tnpart
-    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = pf.domain_left_edge[0]
+    left_edge[1] = pf.domain_left_edge[1]
+    left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
+        try:
+            iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        except KeyError:
+            iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
         arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
             p[0][i+pi].id = arri[i]
@@ -272,38 +278,54 @@
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
-        pi += npart
+        print pi
     num_p[0] = tnpart
-    print "Block #%i | Particles %i | Grids %i"%\
-            ( block, pi, len(grids))
+    #print 'first particle coordinates'
+    #for i in range(3):
+    #    print p[0][0].pos[i],
+    #print ""
+    #print 'last particle coordinates'
+    #for i in range(3):
+    #    print p[0][tnpart-1].pos[i],
+    #print ""
+    print "done"
 
 cdef class RockstarInterface:
 
-    cdef public object pf
     cdef public object data_source
+    cdef public object ts
+    cdef public object tsl
     cdef int rank
     cdef int size
     cdef public int block_ratio
+    cdef public int dm_type
+    cdef public int total_particles
 
-    def __cinit__(self, pf, data_source):
-        self.pf = pf
+    def __cinit__(self, ts, data_source):
+        self.ts = ts
+        self.tsl = ts.__iter__() #timseries generator used by read
         self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
+                       int num_snaps, np.int64_t total_particles,
+                       int dm_type,
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, int num_snaps = 1,
+                       int periodic = 1, 
                        int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES
+        OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -319,29 +341,28 @@
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
         NUM_READERS = num_readers
-        NUM_SNAPS = 1
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
+        TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
-
-        h0 = self.pf.hubble_constant
-        Ol = self.pf.omega_lambda
-        Om = self.pf.omega_matter
-        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        
+        tpf = self.ts[0]
+        h0 = tpf.hubble_constant
+        Ol = tpf.omega_lambda
+        Om = tpf.omega_matter
+        SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory
-            print 'using %s as outbase'%outbase
             OUTBASE = outbase 
 
         if particle_mass < 0:
-            print "Assuming single-mass particle."
-            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
+            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
-        BOX_SIZE = (self.pf.domain_right_edge[0] -
-                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        BOX_SIZE = (tpf.domain_right_edge[0] -
+                    tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
         cdef LPG func = rh_read_particles



https://bitbucket.org/yt_analysis/yt/changeset/f54ceec283f5/
changeset:   f54ceec283f5
branch:      yt
user:        sskory
date:        2012-11-05 22:55:20
summary:     Can now write Rockstar halos to the common text output format.
affected #:  2 files

diff -r 35c2708a4937106a79193858fe315f69a3d88f62 -r f54ceec283f565cb0764eeada5d683a046bd3e9d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1305,6 +1305,7 @@
     #Still, we inherit from HaloList because in the future
     #we might implement halo-particle affiliations
     def __init__(self,pf,out_list):
+        ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
         self._data_source = None
         self._groups = []
@@ -1377,8 +1378,6 @@
             halo = RockstarHalo(self, i, *args)
             self._groups.append(halo)
 
-    def write_out(self):
-        pass
     def write_particle_list(self):
         pass
 


diff -r 35c2708a4937106a79193858fe315f69a3d88f62 -r f54ceec283f565cb0764eeada5d683a046bd3e9d yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -282,7 +282,7 @@
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
-        print pi
+        pi += npart
     num_p[0] = tnpart
     #print 'first particle coordinates'
     #for i in range(3):



https://bitbucket.org/yt_analysis/yt/changeset/123f2d4f33b0/
changeset:   123f2d4f33b0
branch:      yt
user:        sskory
date:        2012-11-06 17:48:07
summary:     Rockstar halos are now read in using binary, not text, which is
more accurate. Particles are not read in yet.
affected #:  1 file

diff -r f54ceec283f565cb0764eeada5d683a046bd3e9d -r 123f2d4f33b0cac4ed7f187088eb55309eea70e5 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -34,6 +34,8 @@
 import numpy as np
 import random
 import sys
+import glob
+import os
 import os.path as path
 from collections import defaultdict
 
@@ -540,43 +542,7 @@
             e0_vector[2], tilt)
 
 class RockstarHalo(Halo):
-    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
-                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
-        """Implement the properties reported by Rockstar: ID, Descendant ID,
-           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
-           Most defaults are removed since we don't read in which halos
-           particles belong to. 
-        """
-        self.ID = ID #from rockstar
-        self.id = index #index in the halo list
-        self.pf = halo_list.pf
-
-        self.DescID = DescID
-        self.Mvir = Mvir
-        self.Vmax = Vmax
-        self.Vrms = Vrms
-        self.Rvir = Rvir
-        self.Rs   = Rs
-        self.Np   = Np
-        self.X    = X
-        self.Y    = Y
-        self.Z    = Z
-        self.VX   = VX
-        self.VY   = VY
-        self.VZ   = VZ
-        self.JX   = JX
-        self.JY   = JY
-        self.JZ   = JZ
-        self.Spin = Spin
-
-        #Halo.__init__(self,halo_list,index,
-        self.size=Np 
-        self.CoM=np.array([X,Y,Z])
-        self.max_dens_point=-1
-        self.group_total_mass=Mvir
-        self.max_radius=Rvir
-        self.bulk_vel=np.array([VX,VY,VZ])*1e5
-        self.rms_vel=Vrms
+    _name = "RockstarHalo"
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -592,11 +558,11 @@
 
     def virial_mass(self):
         r"""Virial mass in Msun/h"""
-        return self.Mvir
+        return self.supp['m']
 
     def virial_radius(self):
         r"""Virial radius in Mpc/h comoving"""
-        return self.Rvir
+        return self.supp['r']
 
     def __getitem__(self,key):
         r"""Not implemented"""
@@ -1304,18 +1270,48 @@
     #a fixed mass, so we don't allow stars at all
     #Still, we inherit from HaloList because in the future
     #we might implement halo-particle affiliations
-    def __init__(self,pf,out_list):
+    def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
+        self._setup()
         self._data_source = None
         self._groups = []
         self._max_dens = -1
         self.pf = pf
         self.out_list = out_list
+        self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
-        self._parse_output(out_list)
+        self._parse_output()
         mylog.info("Finished %s"%out_list)
 
+    def _setup(self):
+        """ A few things we'll need later."""
+        # see io_internal.h in Rockstar.
+        BINARY_HEADER_SIZE=256
+        self.header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
+            ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
+            ('Ol', np.float32), ('h0', np.float32),
+            ('bounds', (np.float32, 6)), ('num_halos', np.int64),
+            ('num_particles', np.int64), ('box_size', np.float32),
+            ('particle_mass', np.float32), ('particle_type', np.int64),
+            ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
+        # see halo.h.
+        self.halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
+            ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
+            ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+            ('mgrav', np.float32), ('vmax', np.float32),
+            ('rvmax', np.float32), ('rs', np.float32),
+            ('vrms', np.float32), ('J', (np.float32, 3)),
+            ('energy', np.float32), ('spin', np.float32),
+            ('padding1', np.float32), ('num_p', np.int64),
+            ('num_child_particles', np.int64), ('p_start', np.int64),
+            ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
+            ('min_pos_err', np.float32), ('min_vel_err', np.float32),
+            ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
+        # Above, padding1&2 are due to c byte ordering which pads between
+        # 4 and 8 byte values in the struct as to not overlap memory registers.
+        self.tocleanup = ['padding1', 'padding2']
+
     def _run_finder(self):
         pass
 
@@ -1325,57 +1321,67 @@
     def _get_dm_indices(self):
         pass
 
-    def _parse_output(self,out_list=None):
+    def _get_halos_binary(self, files):
+        """
+        Parse the binary files to get information about halos in higher
+        precision than the text file.
+        """
+        halos = None
+        self.halo_to_fname = {}
+        for file in files:
+            fp = open(file, 'rb')
+            # read the header
+            header = np.fromfile(fp, dtype=self.header_dt, count=1)
+            # read the halo information
+            new_halos = np.fromfile(fp, dtype=self. halo_dt,
+                count=header['num_halos'])
+            # Record which binary file holds these halos.
+            for halo in new_halos['id']:
+                self.halo_to_fname[halo] = file
+            # Add to existing.
+            if halos is not None:
+                halos = np.concatenate((new_halos, halos))
+            else:
+                halos = new_halos.copy()
+            fp.close()
+        # Sort them by mass.
+        halos.sort(order='m')
+        halos = np.flipud(halos)
+        return halos
+
+    def _parse_output(self):
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
         
         pf = self.pf
-
-        if out_list is None:
-            out_list = self.out_list
-
-        lines = open(out_list).readlines()
-        names = []
-        formats = []
-        
-        #find the variables names from the first defining line
-        names = lines[0].replace('#','').split(' ')
-        for j,line in enumerate(lines):
-            if not line.startswith('#'): break
-        
-        #find out the table datatypes but evaluating the first data line
-        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
-        for num in splits:
-            if 'nan' not in num:
-                formats += np.array(eval(num)).dtype,
-            else:
-                formats += np.dtype('float'),
-        assert len(formats) == len(names)
+        out_list = self.out_list
+        # We need to also get the binary files.
+        basedir = os.path.dirname(out_list)
+        files = glob.glob(basedir + '/halos*bin')
+        halos = self._get_halos_binary(files)
         
         #Jc = 1.98892e33/pf['mpchcm']*1e5
         Jc = 1.0
-        conv = dict(X=1.0/pf['mpchcm'],
-                    Y=1.0/pf['mpchcm'],
-                    Z=1.0/pf['mpchcm'], #to unitary
-                    VX=1e0, VY=1e0, VZ=1e0, #to km/s
-                    Mvir=1.0, #Msun/h
-                    Vmax=1e0,Vrms=1e0,
-                    Rvir=1.0/pf['kpchcm'],
-                    Rs=1.0/pf['kpchcm'],
-                    JX=Jc, JY=Jc, JZ=Jc)
-        dtype = {'names':names,'formats':formats}
-        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
-        # Sort halos by Mvir.
-        halo_table.sort(order='Mvir')
-        halo_table = np.flipud(halo_table)
-        #convert position units
-        for name in names:
-            halo_table[name]=halo_table[name]*conv.get(name,1)
+        length = 1.0 / pf['mpchcm']
+        conv = dict(pos = np.array([length, length, length,
+                                    1, 1, 1]), # to unitary
+                    r=1.0/pf['kpchcm'], # to unitary
+                    rs=1.0/pf['kpchcm'], # to unitary
+                    )
+        #convert units
+        for name in self.halo_dt.names:
+            halos[name]=halos[name]*conv.get(name,1)
         # Store the halos in the halo list.
-        for i, row in enumerate(halo_table):
-            args = tuple([val for val in row])
-            halo = RockstarHalo(self, i, *args)
+        for i, row in enumerate(halos):
+            supp = {name:row[name] for name in self.halo_dt.names}
+            # Delete the padding columns. 'supp' below will contain
+            # repeated information, but that's OK.
+            for item in self.tocleanup: del supp[item]
+            halo = RockstarHalo(self, i, size=row['num_p'],
+                CoM=row['pos'][0:3], group_total_mass=row['m'],
+                max_radius=row['r'], bulk_vel=row['bulkvel'],
+                rms_vel=row['vrms'], supp=supp)
             self._groups.append(halo)
 
     def write_particle_list(self):



https://bitbucket.org/yt_analysis/yt/changeset/1a856afd60f7/
changeset:   1a856afd60f7
branch:      yt
user:        sskory
date:        2012-11-06 22:56:44
summary:     Can now pull particle data out of Rockstar binary files.
affected #:  1 file

diff -r 123f2d4f33b0cac4ed7f187088eb55309eea70e5 -r 1a856afd60f7dc48065c3debaadb9b1950f555e3 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -81,6 +81,7 @@
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
+        self.halo_list = halo_list
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -108,6 +109,10 @@
             self.supp = {}
         else:
             self.supp = supp
+        # Used for Loaded and RockstarHalos
+        self.saved_fields = {}
+        self.particle_mask = None
+        self.ds_sort = None
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -564,10 +569,62 @@
         r"""Virial radius in Mpc/h comoving"""
         return self.supp['r']
 
-    def __getitem__(self,key):
-        r"""Not implemented"""
-        return None
+    def __getitem__(self, key):
+        # This function will try to get particle data in one of three ways,
+        # in descending preference.
+        # 1. From saved_fields, e.g. we've already got it.
+        # 2. From the halo binary files off disk.
+        # 3. Use the unique particle indexes of the halo to select a missing
+        # field from an AMR Sphere.
+        try:
+            # We've already got it.
+            return self.saved_fields[key]
+        except KeyError:
+            # Gotta go get it from the Rockstar binary file.
+            if key == 'particle_index':
+                IDs = self._get_particle_data(self.supp['id'],
+                    self.halo_list.halo_to_fname, self.size, key)
+                IDs = IDs[IDs.argsort()]
+                self.saved_fields[key] = IDs
+                return self.saved_fields[key]
+            else:
+                # Dynamically create the masking array for particles, and get
+                # the data using standard yt methods. The > 1 is there to
+                # account for the fact that 'r' for Rockstar halos is the
+                # virial radius, not maximum radius. 4 might be a bit
+                # too generous, but it's safer this way.
+                ds = self.pf.h.sphere(self.CoM, 4 * self.max_radius)
+                if self.particle_mask is None:
+                    # This is from disk.
+                    pid = self.__getitem__('particle_index')
+                    # This is from the sphere.
+                    sp_pid = ds['particle_index']
+                    self.ds_sort = sp_pid.argsort()
+                    sp_pid = sp_pid[self.ds_sort]
+                    # This matches them up.
+                    self.particle_mask = np.in1d(sp_pid, pid)
+                # We won't store this field below in saved_fields because
+                # that would mean keeping two copies of it, one in the yt
+                # machinery and one here.
+                return ds[key][self.ds_sort][self.particle_mask]
 
+    def _get_particle_data(self, halo, fnames, size, field):
+        # Given a list of file names, a halo, its size, and the desired field,
+        # this returns the particle indices for that halo.
+        file = fnames[halo]
+        mylog.info("Getting %d particles from Rockstar binary file %s." % (self.supp['num_p'], file))
+        fp = open(file, 'rb')
+        # We need to skip past the header and all the halos.
+        fp.seek(self.halo_list.header_dt.itemsize + \
+            self.halo_list.fname_halos[file] * \
+            self.halo_list.halo_dt.itemsize, os.SEEK_CUR)
+        # Now we skip ahead to where this halos particles begin.
+        fp.seek(self.supp['p_start'] * 8, os.SEEK_CUR)
+        # And finally, read in the ids.
+        IDs = np.fromfile(fp, dtype=np.int64, count=self.supp['num_p'])
+        fp.close()
+        print IDs.shape
+        return IDs
 
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
@@ -772,9 +829,6 @@
         self.fnames = fnames
         self.bin_count = None
         self.overdensity = None
-        self.saved_fields = {}
-        self.particle_mask = None
-        self.ds_sort = None
         self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
@@ -796,8 +850,9 @@
             # Gotta go get it from the halo h5 files.
             field_data = self._get_particle_data(self.id, self.fnames,
                 self.size, key)
-            #if key == 'particle_position_x': field_data = None
             if field_data is not None:
+                if key == 'particle_index':
+                    field_data = field_data[field_data.argsort()]
                 self.saved_fields[key] = field_data
                 return self.saved_fields[key]
             else:
@@ -812,10 +867,8 @@
                     sp_pid = ds['particle_index']
                     self.ds_sort = sp_pid.argsort()
                     sp_pid = sp_pid[self.ds_sort]
-                    # The result of searchsorted is an array with the positions
-                    # of the indexes in pid as they are in sp_pid. This is
-                    # because each element of pid is in sp_pid only once.
-                    self.particle_mask = np.searchsorted(sp_pid, pid)
+                    # This matches them up.
+                    self.particle_mask = np.in1d(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -1328,6 +1381,7 @@
         """
         halos = None
         self.halo_to_fname = {}
+        self.fname_halos = {}
         for file in files:
             fp = open(file, 'rb')
             # read the header
@@ -1338,6 +1392,8 @@
             # Record which binary file holds these halos.
             for halo in new_halos['id']:
                 self.halo_to_fname[halo] = file
+            # Record how many halos are stored in each binary file.
+            self.fname_halos[file] = header['num_halos']
             # Add to existing.
             if halos is not None:
                 halos = np.concatenate((new_halos, halos))



https://bitbucket.org/yt_analysis/yt/changeset/131448599aa4/
changeset:   131448599aa4
branch:      yt
user:        sskory
date:        2012-11-07 23:53:20
summary:     Modifying how halos are read into yt.
affected #:  3 files

diff -r 1a856afd60f7dc48065c3debaadb9b1950f555e3 -r 131448599aa431659301377206951583df0b34f9 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1317,12 +1317,7 @@
 class RockstarHaloList(HaloList):
     _name = "Rockstar"
     _halo_class = RockstarHalo
-    #because we don't yet no halo-particle affiliations
-    #most of the halo list methods are not implemented
-    #furthermore, Rockstar only accepts DM particles of
-    #a fixed mass, so we don't allow stars at all
-    #Still, we inherit from HaloList because in the future
-    #we might implement halo-particle affiliations
+
     def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
@@ -1411,12 +1406,15 @@
         by Rockstar into memory."""
         
         pf = self.pf
-        out_list = self.out_list
-        # We need to also get the binary files.
-        basedir = os.path.dirname(out_list)
-        files = glob.glob(basedir + '/halos*bin')
+        # In order to read the binary data, we need to figure out which 
+        # binary files belong to this output.
+        basedir = os.path.dirname(self.out_list)
+        s = self.out_list.split('_')[-1]
+        s = s.rstrip('.list')
+        n = int(s)
+        fglob = path.join(basedir, 'halos_%d.*.bin' % n)
+        files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        
         #Jc = 1.98892e33/pf['mpchcm']*1e5
         Jc = 1.0
         length = 1.0 / pf['mpchcm']


diff -r 1a856afd60f7dc48065c3debaadb9b1950f555e3 -r 131448599aa431659301377206951583df0b34f9 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,6 +26,7 @@
 from yt.mods import *
 from os import environ
 from os import mkdir
+from os import path
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
@@ -74,7 +75,7 @@
             The default is set comm.size-num_readers-1.
         outbase: str
             This is where the out*list files that Rockstar makes should be
-            placed. Default is str(pf)+'_rockstar'.
+            placed. Default is 'rockstar_halos'.
         particle_mass: float
             This sets the DM particle mass used in Rockstar.
         dm_type: 1
@@ -112,26 +113,24 @@
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
-        if self.comm.size > 1: 
-            self.comm.barrier()            
         tpf = ts.__iter__().next()
-        def _particle_count(field,data):
+        def _particle_count(field, data):
             try:
-                return (data["particle_type"]==dm_type)
+                return (data["particle_type"]==dm_type).sum()
             except KeyError:
-                return np.ones_like(data["particle_position_x"]).astype('bool')
+                return np.prod(data["particle_position_x"].shape)
         add_field("particle_count",function=_particle_count, not_in_all=True,
-        particle_type=True)
+            particle_type=True)
         # Get total_particles in parallel.
         dd = tpf.h.all_data()
-        self.total_particles = dd.quantities['TotalQuantity']('particle_count')[0]
+        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         data_source = tpf.h.all_data()
         if outbase is None:
-            outbase = str(tpf)+'_rockstar'
-        self.outbase = outbase        
+            outbase = 'rockstar_halos'
+        self.outbase = outbase
         if num_writers is None:
             num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
@@ -147,9 +146,6 @@
         data_source = tpf.h.all_data()
         self.handler = rockstar_interface.RockstarInterface(
                 ts, data_source)
-        if outbase is None:
-            outbase = str(tpf)+'_rockstar'
-        self.outbase = outbase        
 
     def __del__(self):
         self.pool.free_all()
@@ -201,6 +197,15 @@
         if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
+            # Make a record of which dataset corresponds to which set of
+            # output files because it will be easy to lose this connection.
+            fp = open(self.outbase + '/pfs.txt', 'w')
+            fp.write("# pfname\tindex\n")
+            for i, pf in enumerate(self.ts):
+                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
+                line = "%s\t%d\n" % (pfloc, i)
+                fp.write(line)
+            fp.close()
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -215,7 +220,6 @@
                 self.handler.start_client()
             self.pool.free_all()
         self.comm.barrier()
-        #quickly rename the out_0.list 
     
     def halo_list(self,file_name='out_0.list'):
         """


diff -r 1a856afd60f7dc48065c3debaadb9b1950f555e3 -r 131448599aa431659301377206951583df0b34f9 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -365,6 +365,7 @@
                     tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
+        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 



https://bitbucket.org/yt_analysis/yt/changeset/99609dc959d1/
changeset:   99609dc959d1
branch:      yt
user:        sskory
date:        2012-11-08 00:31:40
summary:     Added tag rockstar fixes for changeset 131448599aa4
affected #:  1 file

diff -r 131448599aa431659301377206951583df0b34f9 -r 99609dc959d15dff397166737a9f39a1ad5e369b .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,4 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+131448599aa431659301377206951583df0b34f9 rockstar fixes



https://bitbucket.org/yt_analysis/yt/changeset/913166f0d5fa/
changeset:   913166f0d5fa
branch:      yt
user:        sskory
date:        2012-11-08 00:34:30
summary:     Removed tag rockstar fixes
affected #:  1 file

diff -r 99609dc959d15dff397166737a9f39a1ad5e369b -r 913166f0d5fa8fcb355ce8ff044aff461767559c .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5153,3 +5153,5 @@
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
 131448599aa431659301377206951583df0b34f9 rockstar fixes
+131448599aa431659301377206951583df0b34f9 rockstar fixes
+0000000000000000000000000000000000000000 rockstar fixes



https://bitbucket.org/yt_analysis/yt/changeset/2edcaa422d6d/
changeset:   2edcaa422d6d
branch:      yt
user:        sskory
date:        2012-11-08 00:34:37
summary:     Removed tag hop callback
affected #:  1 file

diff -r 913166f0d5fa8fcb355ce8ff044aff461767559c -r 2edcaa422d6dad62d144b77926688b33817b0ac3 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5155,3 +5155,5 @@
 131448599aa431659301377206951583df0b34f9 rockstar fixes
 131448599aa431659301377206951583df0b34f9 rockstar fixes
 0000000000000000000000000000000000000000 rockstar fixes
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback



https://bitbucket.org/yt_analysis/yt/changeset/d0ed284a43e1/
changeset:   d0ed284a43e1
branch:      yt
user:        sskory
date:        2012-11-08 17:38:33
summary:     Adding force_res as a parameter to Rockstar.
affected #:  2 files

diff -r 2edcaa422d6dad62d144b77926688b33817b0ac3 -r d0ed284a43e1b9217c6828fc4dc5279f4bac0f5d yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -49,7 +49,7 @@
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1):
+            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -81,6 +81,9 @@
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
+        force_res: None
+            The default force resolution is 0.0012 comoving Mpc/H
+            This overrides Rockstars' defaults
 
         Returns
         -------
@@ -136,6 +139,7 @@
         self.num_readers = num_readers
         self.num_writers = num_writers
         self.particle_mass = particle_mass
+        self.force_res = force_res
         self.le = tpf.domain_left_edge
         self.re = tpf.domain_right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
@@ -145,7 +149,7 @@
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         data_source = tpf.h.all_data()
         self.handler = rockstar_interface.RockstarInterface(
-                ts, data_source)
+                self.ts, data_source)
 
     def __del__(self):
         self.pool.free_all()
@@ -188,6 +192,7 @@
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
+                    force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
         #because rockstar *always* write to exactly the same
@@ -220,6 +225,7 @@
                 self.handler.start_client()
             self.pool.free_all()
         self.comm.barrier()
+        self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
         """


diff -r 2edcaa422d6dad62d144b77926688b33817b0ac3 -r d0ed284a43e1b9217c6828fc4dc5279f4bac0f5d yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -292,7 +292,6 @@
     #for i in range(3):
     #    print p[0][tnpart-1].pos[i],
     #print ""
-    print "done"
 
 cdef class RockstarInterface:
 
@@ -317,14 +316,17 @@
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, 
+                       int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
+            print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1



https://bitbucket.org/yt_analysis/yt/changeset/d7d8bb037880/
changeset:   d7d8bb037880
branch:      yt
user:        sskory
date:        2012-11-10 00:45:27
summary:     This fixes the duplicate halo problem. Readers now only allocate
enough space for the particles they will read in, not the
TOTAL_PARTICLES. Also, grids are now assigned to readers cyclically,
rather than by block, which prevents one reader from being assigned
to read in a disproportionate share of the root level grids
(in Enzo at least, I doubt that this affects other codes
negatively).
affected #:  1 file

diff -r d0ed284a43e1b9217c6828fc4dc5279f4bac0f5d -r d7d8bb0378809c83b725aeb4e4112bedeb109936 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -247,14 +247,25 @@
     block = int(str(filename).rsplit(".")[-1])
     
 
-    # Now we want to grab data from only a subset of the grids.
+    # Now we want to grab data from only a subset of the grids for each reader.
     n = rh.block_ratio
     dd = pf.h.all_data()
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
-    tnpart = TOTAL_PARTICLES
-    print "Loading indices: size = ", tnpart
-    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    grids = dd._grids[block::NUM_BLOCKS]
+
+    # First we need to find out how many this reader is going to read in.
+    local_parts = 0
+    for g in grids:
+        try:
+            iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        except KeyError:
+            iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        arri = arri[iddm] #pick only DM
+        local_parts += arri.size
+    print "Loading indices: size = ", local_parts
+    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
+
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
     left_edge[0] = pf.domain_left_edge[0]
@@ -283,15 +294,8 @@
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = tnpart
-    #print 'first particle coordinates'
-    #for i in range(3):
-    #    print p[0][0].pos[i],
-    #print ""
-    #print 'last particle coordinates'
-    #for i in range(3):
-    #    print p[0][tnpart-1].pos[i],
-    #print ""
+    num_p[0] = local_parts
+    print "Done loading particles."
 
 cdef class RockstarInterface:
 



https://bitbucket.org/yt_analysis/yt/changeset/3766fdf792f6/
changeset:   3766fdf792f6
branch:      yt
user:        sskory
date:        2012-11-10 01:03:08
summary:     Making readers with NUM_BLOCKS=1 revert to TOTAL_PARTICLES
and not read grid data twice.
affected #:  1 file

diff -r d7d8bb0378809c83b725aeb4e4112bedeb109936 -r 3766fdf792f65f9e39f46a310462eecd988cdce2 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -253,17 +253,22 @@
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     grids = dd._grids[block::NUM_BLOCKS]
 
-    # First we need to find out how many this reader is going to read in.
-    local_parts = 0
-    for g in grids:
-        try:
-            iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-        except KeyError:
-            iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
-        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
-        arri = arri[iddm] #pick only DM
-        local_parts += arri.size
-    print "Loading indices: size = ", local_parts
+    # First we need to find out how many this reader is going to read in
+    # if the number of readers > 1.
+    if NUM_BLOCKS > 1:
+        local_parts = 0
+        for g in grids:
+            try:
+                iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+            except KeyError:
+                iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
+            arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+            arri = arri[iddm] #pick only DM
+            local_parts += arri.size
+    else:
+        local_parts = TOTAL_PARTICLES
+
+    print "Loading particles: size = ", local_parts
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]



https://bitbucket.org/yt_analysis/yt/changeset/326ed6a5545c/
changeset:   326ed6a5545c
branch:      yt
user:        sskory
date:        2012-11-12 20:46:23
summary:     Auto-setting the force resolution for Rockstar.
affected #:  2 files

diff -r 3766fdf792f65f9e39f46a310462eecd988cdce2 -r 326ed6a5545c342f523e979970938b9b5c3e9ef1 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -81,10 +81,15 @@
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: None
-            The default force resolution is 0.0012 comoving Mpc/H
-            This overrides Rockstars' defaults
-
+        force_res: float
+            This parameter specifies the force resolution that Rockstar uses
+            in units of Mpc/h.
+            If no value is provided, this parameter is automatically set to
+            the width of the smallest grid element in the simulation from the
+            last data snapshot (i.e. the one where time has evolved the
+            longest) in the time series:
+            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+            
         Returns
         -------
         None
@@ -139,7 +144,10 @@
         self.num_readers = num_readers
         self.num_writers = num_writers
         self.particle_mass = particle_mass
-        self.force_res = force_res
+        if force_res is None:
+            self.force_res = ts[-1].h.get_smallest_dx() * pf['mpch']
+        else:
+            self.force_res = force_res
         self.le = tpf.domain_left_edge
         self.re = tpf.domain_right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:


diff -r 3766fdf792f65f9e39f46a310462eecd988cdce2 -r 326ed6a5545c342f523e979970938b9b5c3e9ef1 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -335,7 +335,7 @@
         global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
-            print "set force res to ",FORCE_RES
+            #print "set force res to ",FORCE_RES
         OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1



https://bitbucket.org/yt_analysis/yt/changeset/814bd453bbfe/
changeset:   814bd453bbfe
branch:      yt
user:        sskory
date:        2012-11-12 20:55:54
summary:     Small typo re: Rockstar.
affected #:  1 file

diff -r 326ed6a5545c342f523e979970938b9b5c3e9ef1 -r 814bd453bbfe641eda1d39e24cbb94794732eb12 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -145,7 +145,7 @@
         self.num_writers = num_writers
         self.particle_mass = particle_mass
         if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * pf['mpch']
+            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
         else:
             self.force_res = force_res
         self.le = tpf.domain_left_edge



https://bitbucket.org/yt_analysis/yt/changeset/6607999ff68e/
changeset:   6607999ff68e
branch:      yt
user:        sskory
date:        2012-11-12 21:00:34
summary:     Merge.
affected #:  5 files



diff -r 72fd8d5173d2a76978acf3384a5605f8370001a6 -r 6607999ff68ebedf31377250bc5eadce7250fe7d yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -45,4 +45,8 @@
     FOFHaloFinder, \
     HaloFinder, \
     LoadHaloes, \
-    LoadTextHaloes
+    LoadTextHalos, \
+    LoadTextHaloes, \
+    RockstarHalo, \
+    RockstarHaloList, \
+    LoadRockstarHalos


diff -r 72fd8d5173d2a76978acf3384a5605f8370001a6 -r 6607999ff68ebedf31377250bc5eadce7250fe7d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -34,6 +34,8 @@
 import numpy as np
 import random
 import sys
+import glob
+import os
 import os.path as path
 from collections import defaultdict
 
@@ -79,6 +81,7 @@
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
+        self.halo_list = halo_list
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -106,6 +109,10 @@
             self.supp = {}
         else:
             self.supp = supp
+        # Used for Loaded and RockstarHalos
+        self.saved_fields = {}
+        self.particle_mask = None
+        self.ds_sort = None
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -540,45 +547,7 @@
             e0_vector[2], tilt)
 
 class RockstarHalo(Halo):
-    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
-                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
-        """Implement the properties reported by Rockstar: ID, Descendant ID,
-           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
-           Most defaults are removed since we don't read in which halos
-           particles belong to. 
-        """
-        #we can still use get_sphere!
-        self.ID = ID #from rockstar
-        self.id = index #index in the halo list
-        self.pf = halo_list.pf
-
-        self.DescID = DescID
-        self.Mvir = Mvir
-        self.Vmax = Vmax
-        self.Vrms = Vrms
-        self.Rvir = Rvir
-        self.Rs   = Rs
-        self.Np   = Np
-        self.X    = X
-        self.Y    = Y
-        self.Z    = Z
-        self.VX   = VX
-        self.VY   = VY
-        self.VZ   = VZ
-        self.JX   = JX
-        self.JY   = JY
-        self.JZ   = JZ
-        self.Spin = Spin
-
-        #Halo.__init__(self,halo_list,index,
-        self.size=Np 
-        self.CoM=np.array([X,Y,Z])
-        self.max_dens_point=-1
-        self.group_total_mass=-1
-        self.max_radius=Rvir
-        self.bulk_vel=np.array([VX,VY,VZ])*1e5
-        self.rms_vel=-1
-        self.group_total_mass = -1 #not implemented 
+    _name = "RockstarHalo"
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -588,42 +557,74 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
-    def total_mass(self):
-        r"""Not implemented."""
-        return -1
-
-    def get_size(self):
-        r"""Return the number of particles belonging to the halo."""
-        return self.Np
-
     def write_particle_list(self,handle):
         r"""Not implemented."""
         return -1
 
     def virial_mass(self):
         r"""Virial mass in Msun/h"""
-        return self.Mvir
+        return self.supp['m']
 
     def virial_radius(self):
         r"""Virial radius in Mpc/h comoving"""
-        return self.Rvir
+        return self.supp['r']
 
-    def virial_bin(self):
-        r"""Not implemented"""
-        return -1
+    def __getitem__(self, key):
+        # This function will try to get particle data in one of three ways,
+        # in descending preference.
+        # 1. From saved_fields, e.g. we've already got it.
+        # 2. From the halo binary files off disk.
+        # 3. Use the unique particle indexes of the halo to select a missing
+        # field from an AMR Sphere.
+        try:
+            # We've already got it.
+            return self.saved_fields[key]
+        except KeyError:
+            # Gotta go get it from the Rockstar binary file.
+            if key == 'particle_index':
+                IDs = self._get_particle_data(self.supp['id'],
+                    self.halo_list.halo_to_fname, self.size, key)
+                IDs = IDs[IDs.argsort()]
+                self.saved_fields[key] = IDs
+                return self.saved_fields[key]
+            else:
+                # Dynamically create the masking array for particles, and get
+                # the data using standard yt methods. The > 1 is there to
+                # account for the fact that 'r' for Rockstar halos is the
+                # virial radius, not maximum radius. 4 might be a bit
+                # too generous, but it's safer this way.
+                ds = self.pf.h.sphere(self.CoM, 4 * self.max_radius)
+                if self.particle_mask is None:
+                    # This is from disk.
+                    pid = self.__getitem__('particle_index')
+                    # This is from the sphere.
+                    sp_pid = ds['particle_index']
+                    self.ds_sort = sp_pid.argsort()
+                    sp_pid = sp_pid[self.ds_sort]
+                    # This matches them up.
+                    self.particle_mask = np.in1d(sp_pid, pid)
+                # We won't store this field below in saved_fields because
+                # that would mean keeping two copies of it, one in the yt
+                # machinery and one here.
+                return ds[key][self.ds_sort][self.particle_mask]
 
-    def virial_density(self):
-        r"""Not implemented """
-        return -1
-
-    def virial_info(self):
-        r"""Not implemented"""
-        return -1 
-
-    def __getitem__(self,key):
-        r"""Not implemented"""
-        return None
-
+    def _get_particle_data(self, halo, fnames, size, field):
+        # Given a list of file names, a halo, its size, and the desired field,
+        # this returns the particle indices for that halo.
+        file = fnames[halo]
+        mylog.info("Getting %d particles from Rockstar binary file %s." % (self.supp['num_p'], file))
+        fp = open(file, 'rb')
+        # We need to skip past the header and all the halos.
+        fp.seek(self.halo_list.header_dt.itemsize + \
+            self.halo_list.fname_halos[file] * \
+            self.halo_list.halo_dt.itemsize, os.SEEK_CUR)
+        # Now we skip ahead to where this halos particles begin.
+        fp.seek(self.supp['p_start'] * 8, os.SEEK_CUR)
+        # And finally, read in the ids.
+        IDs = np.fromfile(fp, dtype=np.int64, count=self.supp['num_p'])
+        fp.close()
+        print IDs.shape
+        return IDs
 
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
@@ -828,9 +829,6 @@
         self.fnames = fnames
         self.bin_count = None
         self.overdensity = None
-        self.saved_fields = {}
-        self.particle_mask = None
-        self.ds_sort = None
         self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
@@ -852,8 +850,9 @@
             # Gotta go get it from the halo h5 files.
             field_data = self._get_particle_data(self.id, self.fnames,
                 self.size, key)
-            #if key == 'particle_position_x': field_data = None
             if field_data is not None:
+                if key == 'particle_index':
+                    field_data = field_data[field_data.argsort()]
                 self.saved_fields[key] = field_data
                 return self.saved_fields[key]
             else:
@@ -868,10 +867,8 @@
                     sp_pid = ds['particle_index']
                     self.ds_sort = sp_pid.argsort()
                     sp_pid = sp_pid[self.ds_sort]
-                    # The result of searchsorted is an array with the positions
-                    # of the indexes in pid as they are in sp_pid. This is
-                    # because each element of pid is in sp_pid only once.
-                    self.particle_mask = np.searchsorted(sp_pid, pid)
+                    # This matches them up.
+                    self.particle_mask = np.in1d(sp_pid, pid)
                 # We won't store this field below in saved_fields because
                 # that would mean keeping two copies of it, one in the yt
                 # machinery and one here.
@@ -1318,23 +1315,51 @@
         f.close()
 
 class RockstarHaloList(HaloList):
-    #because we don't yet no halo-particle affiliations
-    #most of the halo list methods are not implemented
-    #furthermore, Rockstar only accepts DM particles of
-    #a fixed mass, so we don't allow stars at all
-    #Still, we inherit from HaloList because in the future
-    #we might implement halo-particle affiliations
-    def __init__(self,pf,out_list):
+    _name = "Rockstar"
+    _halo_class = RockstarHalo
+
+    def __init__(self, pf, out_list):
+        ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
+        self._setup()
         self._data_source = None
         self._groups = []
         self._max_dens = -1
         self.pf = pf
         self.out_list = out_list
+        self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
-        self._parse_output(out_list)
+        self._parse_output()
         mylog.info("Finished %s"%out_list)
 
+    def _setup(self):
+        """ A few things we'll need later."""
+        # see io_internal.h in Rockstar.
+        BINARY_HEADER_SIZE=256
+        self.header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
+            ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
+            ('Ol', np.float32), ('h0', np.float32),
+            ('bounds', (np.float32, 6)), ('num_halos', np.int64),
+            ('num_particles', np.int64), ('box_size', np.float32),
+            ('particle_mass', np.float32), ('particle_type', np.int64),
+            ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
+        # see halo.h.
+        self.halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
+            ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
+            ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+            ('mgrav', np.float32), ('vmax', np.float32),
+            ('rvmax', np.float32), ('rs', np.float32),
+            ('vrms', np.float32), ('J', (np.float32, 3)),
+            ('energy', np.float32), ('spin', np.float32),
+            ('padding1', np.float32), ('num_p', np.int64),
+            ('num_child_particles', np.int64), ('p_start', np.int64),
+            ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
+            ('min_pos_err', np.float32), ('min_vel_err', np.float32),
+            ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
+        # Above, padding1&2 are due to c byte ordering which pads between
+        # 4 and 8 byte values in the struct as to not overlap memory registers.
+        self.tocleanup = ['padding1', 'padding2']
+
     def _run_finder(self):
         pass
 
@@ -1344,70 +1369,77 @@
     def _get_dm_indices(self):
         pass
 
-    def _parse_output(self,out_list=None):
+    def _get_halos_binary(self, files):
+        """
+        Parse the binary files to get information about halos in higher
+        precision than the text file.
+        """
+        halos = None
+        self.halo_to_fname = {}
+        self.fname_halos = {}
+        for file in files:
+            fp = open(file, 'rb')
+            # read the header
+            header = np.fromfile(fp, dtype=self.header_dt, count=1)
+            # read the halo information
+            new_halos = np.fromfile(fp, dtype=self. halo_dt,
+                count=header['num_halos'])
+            # Record which binary file holds these halos.
+            for halo in new_halos['id']:
+                self.halo_to_fname[halo] = file
+            # Record how many halos are stored in each binary file.
+            self.fname_halos[file] = header['num_halos']
+            # Add to existing.
+            if halos is not None:
+                halos = np.concatenate((new_halos, halos))
+            else:
+                halos = new_halos.copy()
+            fp.close()
+        # Sort them by mass.
+        halos.sort(order='m')
+        halos = np.flipud(halos)
+        return halos
+
+    def _parse_output(self):
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
         
         pf = self.pf
-
-        if out_list is None:
-            out_list = self.out_list
-
-        lines = open(out_list).readlines()
-        names = []
-        formats = []
-        
-        #find the variables names from the first defining line
-        names = lines[0].replace('#','').split(' ')
-        for j,line in enumerate(lines):
-            if not line.startswith('#'): break
-
-        #find out the table datatypes but evaluating the first data line
-        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
-        for num in splits:
-            if 'nan' not in num:
-                formats += np.array(eval(num)).dtype,
-            else:
-                formats += np.dtype('float'),
-        assert len(formats) == len(names)
-
+        # In order to read the binary data, we need to figure out which 
+        # binary files belong to this output.
+        basedir = os.path.dirname(self.out_list)
+        s = self.out_list.split('_')[-1]
+        s = s.rstrip('.list')
+        n = int(s)
+        fglob = path.join(basedir, 'halos_%d.*.bin' % n)
+        files = glob.glob(fglob)
+        halos = self._get_halos_binary(files)
         #Jc = 1.98892e33/pf['mpchcm']*1e5
         Jc = 1.0
-        conv = dict(X=1.0/pf['mpchcm'],
-                    Y=1.0/pf['mpchcm'],
-                    Z=1.0/pf['mpchcm'], #to unitary
-                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
-                    Mvir=1.0, #Msun/h
-                    Vmax=1e0,Vrms=1e0,
-                    Rvir=1.0/pf['kpchcm'],
-                    Rs=1.0/pf['kpchcm'],
-                    JX=Jc,JY=Jc,JZ=Jc)
-        dtype = {'names':names,'formats':formats}
-        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
-        #convert position units  
-        for name in names:
-            halo_table[name]=halo_table[name]*conv.get(name,1)
-        
-        for k,row in enumerate(halo_table):
-            args = tuple([val for val in row])
-            halo = RockstarHalo(self,k,*args)
+        length = 1.0 / pf['mpchcm']
+        conv = dict(pos = np.array([length, length, length,
+                                    1, 1, 1]), # to unitary
+                    r=1.0/pf['kpchcm'], # to unitary
+                    rs=1.0/pf['kpchcm'], # to unitary
+                    )
+        #convert units
+        for name in self.halo_dt.names:
+            halos[name]=halos[name]*conv.get(name,1)
+        # Store the halos in the halo list.
+        for i, row in enumerate(halos):
+            supp = {name:row[name] for name in self.halo_dt.names}
+            # Delete the padding columns. 'supp' below will contain
+            # repeated information, but that's OK.
+            for item in self.tocleanup: del supp[item]
+            halo = RockstarHalo(self, i, size=row['num_p'],
+                CoM=row['pos'][0:3], group_total_mass=row['m'],
+                max_radius=row['r'], bulk_vel=row['bulkvel'],
+                rms_vel=row['vrms'], supp=supp)
             self._groups.append(halo)
-    
 
-    #len is ok
-    #iter is OK
-    #getitem is ok
-    #nn is ok I think
-    #nn2d is ok I think
-
-    def write_out(self):
-        pass
     def write_particle_list(self):
         pass
-    
-
-    
 
 class HOPHaloList(HaloList):
 
@@ -2627,3 +2659,25 @@
             3.28392048e14
         """
         TextHaloList.__init__(self, pf, filename, columns, comment)
+
+LoadTextHalos = LoadTextHaloes
+
+class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    def __init__(self, pf, filename = None):
+        r"""Load Rockstar halos off disk from Rockstar-output format.
+
+        Parameters
+        ----------
+        fname : String
+            The name of the Rockstar file to read in. Default = 
+            "(dataset_name)_rockstar/out_0.list' where (dataset_name)
+            is equal to str(pf).
+
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadRockstarHalos(pf, "other_name.out")
+        """
+        if filename is None:
+            filename = str(pf) + '_rockstar/out_0.list'
+        RockstarHaloList.__init__(self, pf, filename)


diff -r 72fd8d5173d2a76978acf3384a5605f8370001a6 -r 6607999ff68ebedf31377250bc5eadce7250fe7d yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -26,6 +26,7 @@
 from yt.mods import *
 from os import environ
 from os import mkdir
+from os import path
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
@@ -47,36 +48,119 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,overwrite=False,
-            left_edge = None, right_edge = None):
+    def __init__(self, ts, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
+        r"""Spawns the Rockstar Halo finder, distributes dark matter
+        particles and finds halos.
+
+        The halo finder requires dark matter particles of a fixed size.
+        Rockstar has three main processes: reader, writer, and the 
+        server which coordinates reader/writer processes.
+
+        Parameters
+        ----------
+        ts   : TimeSeriesData, StaticOutput
+            This is the data source containing the DM particles. Because 
+            halo IDs may change from one snapshot to the next, the only
+            way to keep a consistent halo ID across time is to feed 
+            Rockstar a set of snapshots, ie, via TimeSeriesData.
+        num_readers: int
+            The number of reader can be increased from the default
+            of 1 in the event that a single snapshot is split among
+            many files. This can help in cases where performance is
+            IO-limited. Default is 1.
+        num_writers: int
+            The number of writers determines the number of processing threads
+            as well as the number of threads writing output data.
+            The default is set comm.size-num_readers-1.
+        outbase: str
+            This is where the out*list files that Rockstar makes should be
+            placed. Default is 'rockstar_halos'.
+        particle_mass: float
+            This sets the DM particle mass used in Rockstar.
+        dm_type: 1
+            In order to exclude stars and other particle types, define
+            the dm_type. Default is 1, as Enzo has the DM particle type=1.
+        force_res: float
+            This parameter specifies the force resolution that Rockstar uses
+            in units of Mpc/h.
+            If no value is provided, this parameter is automatically set to
+            the width of the smallest grid element in the simulation from the
+            last data snapshot (i.e. the one where time has evolved the
+            longest) in the time series:
+            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+            
+        Returns
+        -------
+        None
+
+        Examples
+        --------
+        To use the script below you must run it using MPI:
+        mpirun -np 3 python test_rockstar.py --parallel
+
+        test_rockstar.py:
+
+        from mpi4py import MPI
+        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+        from yt.mods import *
+        import sys
+
+        files = glob.glob('/u/cmoody3/data/a*')
+        files.sort()
+        ts = TimeSeriesData.from_filenames(files)
+        pm = 7.81769027e+11
+        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh.run()
+        """
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
-        self.pf = pf
-        self.hierarchy = pf.h
+        #we assume that all of the snapshots in the time series
+        #use the same domain info as the first snapshots
+        if not isinstance(ts,TimeSeriesData):
+            ts = TimeSeriesData([ts])
+        self.ts = ts
+        self.dm_type = dm_type
+        tpf = ts.__iter__().next()
+        def _particle_count(field, data):
+            try:
+                return (data["particle_type"]==dm_type).sum()
+            except KeyError:
+                return np.prod(data["particle_position_x"].shape)
+        add_field("particle_count",function=_particle_count, not_in_all=True,
+            particle_type=True)
+        # Get total_particles in parallel.
+        dd = tpf.h.all_data()
+        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        self.hierarchy = tpf.h
+        self.particle_mass = particle_mass 
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
+        if outbase is None:
+            outbase = 'rockstar_halos'
+        self.outbase = outbase
         if num_writers is None:
             num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
-        self.particle_mass = particle_mass 
-        self.overwrite = overwrite
-        if left_edge is None:
-            left_edge = pf.domain_left_edge
-        if right_edge is None:
-            right_edge = pf.domain_right_edge
-        self.le = left_edge
-        self.re = right_edge
+        self.particle_mass = particle_mass
+        if force_res is None:
+            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
+        else:
+            self.force_res = force_res
+        self.le = tpf.domain_left_edge
+        self.re = tpf.domain_right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
             print '%i reader + %i writers != %i mpi'%\
                     (self.num_readers, self.num_writers, self.comm.size)
             raise RuntimeError
-        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        data_source = self.pf.h.all_data()
+        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        data_source = tpf.h.all_data()
         self.handler = rockstar_interface.RockstarInterface(
-                self.pf, data_source)
-        if outbase is None:
-            outbase = str(self.pf)+'_rockstar'
-        self.outbase = outbase        
+                self.ts, data_source)
+
+    def __del__(self):
+        self.pool.free_all()
 
     def _get_hosts(self):
         if self.comm.size == 1 or self.workgroup.name == "server":
@@ -107,6 +191,18 @@
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        self.handler.setup_rockstar(self.server_address, self.port,
+                    len(self.ts), self.total_particles, 
+                    self.dm_type,
+                    parallel = self.comm.size > 1,
+                    num_readers = self.num_readers,
+                    num_writers = self.num_writers,
+                    writing_port = -1,
+                    block_ratio = block_ratio,
+                    outbase = self.outbase,
+                    force_res=self.force_res,
+                    particle_mass = float(self.particle_mass),
+                    **kwargs)
         #because rockstar *always* write to exactly the same
         #out_0.list filename we make a directory for it
         #to sit inside so it doesn't get accidentally
@@ -114,15 +210,15 @@
         if self.workgroup.name == "server":
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
-        self.handler.setup_rockstar(self.server_address, self.port,
-                    parallel = self.comm.size > 1,
-                    num_readers = self.num_readers,
-                    num_writers = self.num_writers,
-                    writing_port = -1,
-                    block_ratio = block_ratio,
-                    outbase = self.outbase,
-                    particle_mass = float(self.particle_mass),
-                    **kwargs)
+            # Make a record of which dataset corresponds to which set of
+            # output files because it will be easy to lose this connection.
+            fp = open(self.outbase + '/pfs.txt', 'w')
+            fp.write("# pfname\tindex\n")
+            for i, pf in enumerate(self.ts):
+                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
+                line = "%s\t%d\n" % (pfloc, i)
+                fp.write(line)
+            fp.close()
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -137,7 +233,7 @@
                 self.handler.start_client()
             self.pool.free_all()
         self.comm.barrier()
-        #quickly rename the out_0.list 
+        self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
         """


diff -r 72fd8d5173d2a76978acf3384a5605f8370001a6 -r 6607999ff68ebedf31377250bc5eadce7250fe7d yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -237,32 +237,54 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    print 'reading from particle filename %s'%filename # should print ./inline.0
+    global SCALE_NOW
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     block = int(str(filename).rsplit(".")[-1])
+    
 
-    # Now we want to grab data from only a subset of the grids.
+    # Now we want to grab data from only a subset of the grids for each reader.
     n = rh.block_ratio
-    dd = rh.pf.h.all_data()
-    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
-    tnpart = 0
-    for g in grids:
-        tnpart += dd._get_data_from_grid(g, "particle_index").size
-    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
-    #print "Loading indices: size = ", tnpart
-    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    dd = pf.h.all_data()
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+    grids = dd._grids[block::NUM_BLOCKS]
+
+    # First we need to find out how many this reader is going to read in
+    # if the number of readers > 1.
+    if NUM_BLOCKS > 1:
+        local_parts = 0
+        for g in grids:
+            try:
+                iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+            except KeyError:
+                iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
+            arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+            arri = arri[iddm] #pick only DM
+            local_parts += arri.size
+    else:
+        local_parts = TOTAL_PARTICLES
+
+    print "Loading particles: size = ", local_parts
+    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
+
+    conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = pf.domain_left_edge[0]
+    left_edge[1] = pf.domain_left_edge[1]
+    left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
+        try:
+            iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+        except KeyError:
+            iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
         arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
             p[0][i+pi].id = arri[i]
@@ -272,38 +294,49 @@
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = tnpart
-    print "Block #%i | Particles %i | Grids %i"%\
-            ( block, pi, len(grids))
+    num_p[0] = local_parts
+    print "Done loading particles."
 
 cdef class RockstarInterface:
 
-    cdef public object pf
     cdef public object data_source
+    cdef public object ts
+    cdef public object tsl
     cdef int rank
     cdef int size
     cdef public int block_ratio
+    cdef public int dm_type
+    cdef public int total_particles
 
-    def __cinit__(self, pf, data_source):
-        self.pf = pf
+    def __cinit__(self, ts, data_source):
+        self.ts = ts
+        self.tsl = ts.__iter__() #timseries generator used by read
         self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
+                       int num_snaps, np.int64_t total_particles,
+                       int dm_type,
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, int num_snaps = 1,
+                       int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None"):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
+            #print "set force res to ",FORCE_RES
+        OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -319,31 +352,31 @@
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
         NUM_READERS = num_readers
-        NUM_SNAPS = 1
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
+        TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
-
-        h0 = self.pf.hubble_constant
-        Ol = self.pf.omega_lambda
-        Om = self.pf.omega_matter
-        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        
+        tpf = self.ts[0]
+        h0 = tpf.hubble_constant
+        Ol = tpf.omega_lambda
+        Om = tpf.omega_matter
+        SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory
-            print 'using %s as outbase'%outbase
             OUTBASE = outbase 
 
         if particle_mass < 0:
-            print "Assuming single-mass particle."
-            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
+            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
-        BOX_SIZE = (self.pf.domain_right_edge[0] -
-                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        BOX_SIZE = (tpf.domain_right_edge[0] -
+                    tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
+        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 



https://bitbucket.org/yt_analysis/yt/changeset/ccaac7809283/
changeset:   ccaac7809283
branch:      yt
user:        sskory
date:        2012-11-12 21:11:45
summary:     Small change to LoadRockstarHalos.
affected #:  1 file

diff -r 6607999ff68ebedf31377250bc5eadce7250fe7d -r ccaac78092835363e8217cc9a118bab15a7cd14a yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2670,8 +2670,7 @@
         ----------
         fname : String
             The name of the Rockstar file to read in. Default = 
-            "(dataset_name)_rockstar/out_0.list' where (dataset_name)
-            is equal to str(pf).
+            "rockstar_halos/out_0.list'.
 
         Examples
         --------
@@ -2679,5 +2678,5 @@
         >>> halos = LoadRockstarHalos(pf, "other_name.out")
         """
         if filename is None:
-            filename = str(pf) + '_rockstar/out_0.list'
+            filename = 'rockstar_halos/out_0.list'
         RockstarHaloList.__init__(self, pf, filename)



https://bitbucket.org/yt_analysis/yt/changeset/f3996425874c/
changeset:   f3996425874c
branch:      yt
user:        sskory
date:        2012-11-13 23:36:50
summary:     More changes to Rockstar re:Matt's comments.
affected #:  3 files

diff -r ccaac78092835363e8217cc9a118bab15a7cd14a -r f3996425874cd44f0ac978db1d42c7de5ea8ce08 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -62,9 +62,6 @@
 
 TINY = 1.e-40
 
-# Ellipsoid funtions.
-# Rotation Matrixes should already be imported at top
-
 class Halo(object):
     """
     A data source that returns particle information about the members of a
@@ -78,6 +75,11 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
+    # Used for Loaded and RockstarHalos
+    _saved_fields = {}
+    _ds_sort = None
+    _particle_mask = None
+
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
@@ -109,10 +111,26 @@
             self.supp = {}
         else:
             self.supp = supp
-        # Used for Loaded and RockstarHalos
-        self.saved_fields = {}
-        self.particle_mask = None
-        self.ds_sort = None
+
+    @property
+    def particle_mask(self):
+        # Dynamically create the masking array for particles, and get
+        # the data using standard yt methods.
+        if self._particle_mask is not None:
+            return self._particle_mask
+        # This is from disk.
+        pid = self.__getitem__('particle_index')
+        # This is from the sphere.
+        if self._name is "RockstarHalo":
+            ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
+        elif self._name is "LoadedHalo":
+            ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
+        sp_pid = ds['particle_index']
+        self._ds_sort = sp_pid.argsort()
+        sp_pid = sp_pid[self._ds_sort]
+        # This matches them up.
+        self._particle_mask = np.in1d(sp_pid, pid)
+        return self._particle_mask
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -548,6 +566,8 @@
 
 class RockstarHalo(Halo):
     _name = "RockstarHalo"
+    # See particle_mask
+    _radjust = 4.
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -576,54 +596,37 @@
         # 2. From the halo binary files off disk.
         # 3. Use the unique particle indexes of the halo to select a missing
         # field from an AMR Sphere.
-        try:
+        if key in self._saved_fields:
             # We've already got it.
-            return self.saved_fields[key]
-        except KeyError:
-            # Gotta go get it from the Rockstar binary file.
-            if key == 'particle_index':
-                IDs = self._get_particle_data(self.supp['id'],
-                    self.halo_list.halo_to_fname, self.size, key)
-                IDs = IDs[IDs.argsort()]
-                self.saved_fields[key] = IDs
-                return self.saved_fields[key]
-            else:
-                # Dynamically create the masking array for particles, and get
-                # the data using standard yt methods. The > 1 is there to
-                # account for the fact that 'r' for Rockstar halos is the
-                # virial radius, not maximum radius. 4 might be a bit
-                # too generous, but it's safer this way.
-                ds = self.pf.h.sphere(self.CoM, 4 * self.max_radius)
-                if self.particle_mask is None:
-                    # This is from disk.
-                    pid = self.__getitem__('particle_index')
-                    # This is from the sphere.
-                    sp_pid = ds['particle_index']
-                    self.ds_sort = sp_pid.argsort()
-                    sp_pid = sp_pid[self.ds_sort]
-                    # This matches them up.
-                    self.particle_mask = np.in1d(sp_pid, pid)
-                # We won't store this field below in saved_fields because
-                # that would mean keeping two copies of it, one in the yt
-                # machinery and one here.
-                return ds[key][self.ds_sort][self.particle_mask]
+            return self._saved_fields[key]
+        # Gotta go get it from the Rockstar binary file.
+        if key == 'particle_index':
+            IDs = self._get_particle_data(self.supp['id'],
+                self.halo_list.halo_to_fname, self.size, key)
+            IDs = IDs[IDs.argsort()]
+            self._saved_fields[key] = IDs
+            return self._saved_fields[key]
+        # We won't store this field below in saved_fields because
+        # that would mean keeping two copies of it, one in the yt
+        # machinery and one here.
+        ds = self.pf.h.sphere(self.CoM, 4 * self.max_radius)
+        return np.take(ds[key][self._ds_sort], self.particle_mask)
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
         # this returns the particle indices for that halo.
         file = fnames[halo]
-        mylog.info("Getting %d particles from Rockstar binary file %s." % (self.supp['num_p'], file))
+        mylog.info("Getting %d particles from Rockstar binary file %s.", self.supp['num_p'], file)
         fp = open(file, 'rb')
         # We need to skip past the header and all the halos.
-        fp.seek(self.halo_list.header_dt.itemsize + \
+        fp.seek(self.halo_list._header_dt.itemsize + \
             self.halo_list.fname_halos[file] * \
-            self.halo_list.halo_dt.itemsize, os.SEEK_CUR)
+            self.halo_list._halo_dt.itemsize, os.SEEK_CUR)
         # Now we skip ahead to where this halos particles begin.
         fp.seek(self.supp['p_start'] * 8, os.SEEK_CUR)
         # And finally, read in the ids.
         IDs = np.fromfile(fp, dtype=np.int64, count=self.supp['num_p'])
         fp.close()
-        print IDs.shape
         return IDs
 
     def get_ellipsoid_parameters(self):
@@ -803,6 +806,10 @@
 
 
 class LoadedHalo(Halo):
+    _name = "LoadedHalo"
+    # See particle_mask
+    _radjust = 1.05
+
     def __init__(self, pf, id, size=None, CoM=None,
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
@@ -843,36 +850,22 @@
         # 2. From the halo h5 files off disk.
         # 3. Use the unique particle indexes of the halo to select a missing
         # field from an AMR Sphere.
-        try:
+        if key in self._saved_fields:
             # We've already got it.
-            return self.saved_fields[key]
-        except KeyError:
-            # Gotta go get it from the halo h5 files.
-            field_data = self._get_particle_data(self.id, self.fnames,
-                self.size, key)
-            if field_data is not None:
-                if key == 'particle_index':
-                    field_data = field_data[field_data.argsort()]
-                self.saved_fields[key] = field_data
-                return self.saved_fields[key]
-            else:
-                # Dynamically create the masking array for particles, and get
-                # the data using standard yt methods. The 1.05 is there to
-                # account for possible silliness having to do with whether
-                # the maximum density or center of mass was used to calculate
-                # the maximum radius.
-                ds = self.pf.h.sphere(self.CoM, 1.05 * self.max_radius)
-                if self.particle_mask is None:
-                    pid = self.__getitem__('particle_index')
-                    sp_pid = ds['particle_index']
-                    self.ds_sort = sp_pid.argsort()
-                    sp_pid = sp_pid[self.ds_sort]
-                    # This matches them up.
-                    self.particle_mask = np.in1d(sp_pid, pid)
-                # We won't store this field below in saved_fields because
-                # that would mean keeping two copies of it, one in the yt
-                # machinery and one here.
-                return ds[key][self.ds_sort][self.particle_mask]
+            return self._saved_fields[key]
+        # Gotta go get it from the halo h5 files.
+        field_data = self._get_particle_data(self.id, self.fnames,
+            self.size, key)
+        if field_data is not None:
+            if key == 'particle_index':
+                field_data = field_data[field_data.argsort()]
+            self._saved_fields[key] = field_data
+            return self._saved_fields[key]
+        # We won't store this field below in saved_fields because
+        # that would mean keeping two copies of it, one in the yt
+        # machinery and one here.
+        ds = self.pf.h.sphere(self.CoM, 1.05 * self.max_radius)
+        return np.take(ds[key][self._ds_sort], self.particle_mask)
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1317,11 +1310,35 @@
 class RockstarHaloList(HaloList):
     _name = "Rockstar"
     _halo_class = RockstarHalo
+    # see io_internal.h in Rockstar.
+    BINARY_HEADER_SIZE=256
+    _header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
+        ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
+        ('Ol', np.float32), ('h0', np.float32),
+        ('bounds', (np.float32, 6)), ('num_halos', np.int64),
+        ('num_particles', np.int64), ('box_size', np.float32),
+        ('particle_mass', np.float32), ('particle_type', np.int64),
+        ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
+    # see halo.h.
+    _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
+        ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
+        ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+        ('mgrav', np.float32), ('vmax', np.float32),
+        ('rvmax', np.float32), ('rs', np.float32),
+        ('vrms', np.float32), ('J', (np.float32, 3)),
+        ('energy', np.float32), ('spin', np.float32),
+        ('padding1', np.float32), ('num_p', np.int64),
+        ('num_child_particles', np.int64), ('p_start', np.int64),
+        ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
+        ('min_pos_err', np.float32), ('min_vel_err', np.float32),
+        ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
+    # Above, padding1&2 are due to c byte ordering which pads between
+    # 4 and 8 byte values in the struct as to not overlap memory registers.
+    _tocleanup = ['padding1', 'padding2']
 
     def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
-        self._setup()
         self._data_source = None
         self._groups = []
         self._max_dens = -1
@@ -1332,34 +1349,6 @@
         self._parse_output()
         mylog.info("Finished %s"%out_list)
 
-    def _setup(self):
-        """ A few things we'll need later."""
-        # see io_internal.h in Rockstar.
-        BINARY_HEADER_SIZE=256
-        self.header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
-            ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
-            ('Ol', np.float32), ('h0', np.float32),
-            ('bounds', (np.float32, 6)), ('num_halos', np.int64),
-            ('num_particles', np.int64), ('box_size', np.float32),
-            ('particle_mass', np.float32), ('particle_type', np.int64),
-            ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
-        # see halo.h.
-        self.halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
-            ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
-            ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-            ('mgrav', np.float32), ('vmax', np.float32),
-            ('rvmax', np.float32), ('rs', np.float32),
-            ('vrms', np.float32), ('J', (np.float32, 3)),
-            ('energy', np.float32), ('spin', np.float32),
-            ('padding1', np.float32), ('num_p', np.int64),
-            ('num_child_particles', np.int64), ('p_start', np.int64),
-            ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
-            ('min_pos_err', np.float32), ('min_vel_err', np.float32),
-            ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
-        # Above, padding1&2 are due to c byte ordering which pads between
-        # 4 and 8 byte values in the struct as to not overlap memory registers.
-        self.tocleanup = ['padding1', 'padding2']
-
     def _run_finder(self):
         pass
 
@@ -1380,9 +1369,9 @@
         for file in files:
             fp = open(file, 'rb')
             # read the header
-            header = np.fromfile(fp, dtype=self.header_dt, count=1)
+            header = np.fromfile(fp, dtype=self._header_dt, count=1)
             # read the halo information
-            new_halos = np.fromfile(fp, dtype=self. halo_dt,
+            new_halos = np.fromfile(fp, dtype=self._halo_dt,
                 count=header['num_halos'])
             # Record which binary file holds these halos.
             for halo in new_halos['id']:
@@ -1424,14 +1413,14 @@
                     rs=1.0/pf['kpchcm'], # to unitary
                     )
         #convert units
-        for name in self.halo_dt.names:
+        for name in self._halo_dt.names:
             halos[name]=halos[name]*conv.get(name,1)
         # Store the halos in the halo list.
         for i, row in enumerate(halos):
-            supp = {name:row[name] for name in self.halo_dt.names}
+            supp = {name:row[name] for name in self._halo_dt.names}
             # Delete the padding columns. 'supp' below will contain
             # repeated information, but that's OK.
-            for item in self.tocleanup: del supp[item]
+            for item in self._tocleanup: del supp[item]
             halo = RockstarHalo(self, i, size=row['num_p'],
                 CoM=row['pos'][0:3], group_total_mass=row['m'],
                 max_radius=row['r'], bulk_vel=row['bulkvel'],


diff -r ccaac78092835363e8217cc9a118bab15a7cd14a -r f3996425874cd44f0ac978db1d42c7de5ea8ce08 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -135,7 +135,6 @@
         self.hierarchy = tpf.h
         self.particle_mass = particle_mass 
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        data_source = tpf.h.all_data()
         if outbase is None:
             outbase = 'rockstar_halos'
         self.outbase = outbase
@@ -148,16 +147,15 @@
             self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
         else:
             self.force_res = force_res
-        self.le = tpf.domain_left_edge
-        self.re = tpf.domain_right_edge
+        self.left_edge = tpf.domain_left_edge
+        self.right_edge = tpf.domain_right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
             print '%i reader + %i writers != %i mpi'%\
                     (self.num_readers, self.num_writers, self.comm.size)
             raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        data_source = tpf.h.all_data()
         self.handler = rockstar_interface.RockstarInterface(
-                self.ts, data_source)
+                self.ts, dd)
 
     def __del__(self):
         self.pool.free_all()


diff -r ccaac78092835363e8217cc9a118bab15a7cd14a -r f3996425874cd44f0ac978db1d42c7de5ea8ce08 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -25,6 +25,7 @@
 
 import numpy as np
 import os, sys
+from yt.config import ytcfg
 cimport numpy as np
 cimport cython
 from libc.stdlib cimport malloc
@@ -245,30 +246,48 @@
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
     block = int(str(filename).rsplit(".")[-1])
+    n = rh.block_ratio
+
+    all_grids = pf.h.grids
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+    # Now we want to grab data from only a subset of the grids for each reader.
+    # We will want to select on filename or proc_num depending on
+    # which is available.
+    if NUM_BLOCKS == 1:
+        grids = all_grids #dd._grids
+    else:
+        if hasattr(all_grids[0], 'proc_num'):
+            # This should happen when we're running inline.
+            # We select by cpu. There should be a 1:1 relationship of
+            # myid:block.
+            myid = ytcfg.getint('yt','__topcomm_parallel_rank')
+            grids = [g for g in all_grids if g.proc_num == myid]
+        else:
+            # This should happen when we're reading off disk.
+            # We select by filename.
+            fnames = np.array([g.filename for g in all_grids])
+            sort = fnames.argsort()
+            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
     
-
-    # Now we want to grab data from only a subset of the grids for each reader.
-    n = rh.block_ratio
-    dd = pf.h.all_data()
-    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-    grids = dd._grids[block::NUM_BLOCKS]
+    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
     if NUM_BLOCKS > 1:
         local_parts = 0
         for g in grids:
-            try:
-                iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-            except KeyError:
-                iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
-            arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+            if g.NumberOfParticles == 0: continue
+            if "particle_type" in all_fields:
+                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+                iddm = g["particle_type"] == rh.dm_type
+            else:
+                iddm = Ellipsis
+            arri = g["particle_index"].astype("int64")
             arri = arri[iddm] #pick only DM
             local_parts += arri.size
     else:
         local_parts = TOTAL_PARTICLES
 
-    print "Loading particles: size = ", local_parts
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -279,11 +298,12 @@
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
-        try:
-            iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
-        except KeyError:
-            iddm = np.ones_like(dd._get_data_from_grid(g, "particle_index")).astype('bool')
-        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        if g.NumberOfParticles == 0: continue
+        if "particle_type" in all_fields:
+            iddm = g["particle_type"] == rh.dm_type
+        else:
+            iddm = Ellipsis
+        arri = g["particle_index"].astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
@@ -293,14 +313,13 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = g[field].astype("float64")
             arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
     num_p[0] = local_parts
-    print "Done loading particles."
 
 cdef class RockstarInterface:
 



https://bitbucket.org/yt_analysis/yt/changeset/faa73cec6e0f/
changeset:   faa73cec6e0f
branch:      yt
user:        sskory
date:        2012-11-14 01:16:26
summary:     Removing some stuff that unused or that I'm unsure about in Rockstar.
affected #:  2 files

diff -r f3996425874cd44f0ac978db1d42c7de5ea8ce08 -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -35,18 +35,6 @@
 import socket
 import time
 
-class DomainDecomposer(ParallelAnalysisInterface):
-    def __init__(self, pf, comm):
-        ParallelAnalysisInterface.__init__(self, comm=comm)
-        self.pf = pf
-        self.hierarchy = pf.h
-        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
-
-    def decompose(self):
-        dd = self.pf.h.all_data()
-        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
-        return data_source
-
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
             outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):


diff -r f3996425874cd44f0ac978db1d42c7de5ea8ce08 -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -25,7 +25,6 @@
 
 import numpy as np
 import os, sys
-from yt.config import ytcfg
 cimport numpy as np
 cimport cython
 from libc.stdlib cimport malloc
@@ -251,23 +250,12 @@
     all_grids = pf.h.grids
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
-    # We will want to select on filename or proc_num depending on
-    # which is available.
     if NUM_BLOCKS == 1:
         grids = all_grids #dd._grids
     else:
-        if hasattr(all_grids[0], 'proc_num'):
-            # This should happen when we're running inline.
-            # We select by cpu. There should be a 1:1 relationship of
-            # myid:block.
-            myid = ytcfg.getint('yt','__topcomm_parallel_rank')
-            grids = [g for g in all_grids if g.proc_num == myid]
-        else:
-            # This should happen when we're reading off disk.
-            # We select by filename.
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
+        fnames = np.array([g.filename for g in all_grids])
+        sort = fnames.argsort()
+        grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
     
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 



https://bitbucket.org/yt_analysis/yt/changeset/9842a9ed5144/
changeset:   9842a9ed5144
branch:      yt
user:        sskory
date:        2012-11-14 20:45:22
summary:     Merge.
affected #:  5 files

diff -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -59,18 +59,17 @@
 def standard_small_simulation(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [None]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
     for field in fields:
         yield GridValuesTest(pf_fn, field)
-        for axis in [0, 1, 2]:
-            for ds in dso:
+        if 'particle' in field: continue
+        for ds in dso:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
-                        ds)
-                yield FieldValuesTest(
-                        pf_fn, field, ds)
+                        ds, decimals=3)
+            yield FieldValuesTest(
+                    pf_fn, field, ds, decimals=3)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 


diff -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -274,6 +274,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):


diff -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,7 +29,7 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
     assert_allclose
 
-def assert_rel_equal(a1, a2, decimals, err_msg=''):
+def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -39,7 +39,8 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg,
+                               verbose=verbose)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -309,7 +309,7 @@
             assert_equal(new_result, old_result, 
                          err_msg=err_msg, verbose=True)
         else:
-            assert_rel_equal(new_result, old_result, self.decimals,
+            assert_allclose(new_result, old_result, 10.**(-self.decimals),
                              err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
@@ -370,8 +370,8 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_rel_equal(new_result[k], old_result[k], 
-                                 self.decimals, err_msg=err_msg)
+                assert_allclose(new_result[k], old_result[k], 
+                                 10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"


diff -r faa73cec6e0f36d3d8e3c61196c855fac7f08e9c -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,6 +20,7 @@
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations



https://bitbucket.org/yt_analysis/yt/changeset/d861b2ff9431/
changeset:   d861b2ff9431
branch:      yt
user:        sskory
date:        2012-11-15 21:12:13
summary:     Fixed the top communicator issue in Rockstar. Rockstar also works inline with Enzo now.
affected #:  3 files

diff -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 -r d861b2ff9431519edec170d636e68fbe3be88506 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -24,16 +24,134 @@
 """
 
 from yt.mods import *
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, ProcessorPool, Communicator
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
+from yt.config import ytcfg
+
+import rockstar_interface
+
+import socket
+import time
+import threading
+import signal
+import os
 from os import environ
 from os import mkdir
 from os import path
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, ProcessorPool, Communicator
 
-from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-import rockstar_interface
-import socket
-import time
+# Get some definitions from Rockstar directly.
+ROCKSTAR_DIR = environ['ROCKSTAR_DIR']
+lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
+READER_TYPE = None
+WRITER_TYPE = None
+for line in lines:
+    if "READER_TYPE" in line:
+        line = line.split()
+        READER_TYPE = int(line[-1])
+    if "WRITER_TYPE" in line:
+        line = line.split()
+        WRITER_TYPE = int(line[-1])
+    if READER_TYPE != None and WRITER_TYPE != None:
+        break
+lines.close()
+
+class InlineRunner(ParallelAnalysisInterface):
+    def __init__(self, num_writers):
+        # If this is being run inline, num_readers == comm.size, always.
+        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
+        if num_writers is None:
+            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
+        else:
+            self.num_writers = min(num_writers,
+                ytcfg.getint("yt", "__global_parallel_size"))
+
+    def split_work(self, pool):
+        avail = range(pool.comm.size)
+        self.writers = []
+        self.readers = []
+        # If we're inline, everyone is a reader.
+        self.readers = avail[:]
+        if self.num_writers == pool.comm.size:
+            # And everyone is a writer!
+            self.writers = avail[:]
+        else:
+            # Everyone is not a writer.
+            # Cyclically assign writers which should approximate
+            # memory load balancing (depending on the mpirun call,
+            # but this should do it in most cases).
+            stride = int(ceil(float(pool.comm.size) / self.num_writers))
+            while len(self.writers) < self.num_writers:
+                self.writers.extend(avail[::stride])
+                for r in readers:
+                    avail.pop(avail.index(r))
+
+    def run(self, handler, pool):
+        # If inline, we use forks.
+        server_pid = 0
+        # Start a server on only one machine/fork.
+        if pool.comm.rank == 0:
+            server_pid = os.fork()
+            if server_pid == 0:
+                handler.start_server()
+                os._exit(0)
+        # Start writers.
+        writer_pid = 0
+        if pool.comm.rank in self.writers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            writer_pid = os.fork()
+            if writer_pid == 0:
+                handler.start_client(WRITER_TYPE)
+                os._exit(0)
+        # Start readers, not forked.
+        if pool.comm.rank in self.readers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            handler.start_client(READER_TYPE)
+        # Make sure the forks are done, which they should be.
+        if writer_pid != 0:
+            os.waitpid(writer_pid, 0)
+        if server_pid != 0:
+            os.waitpid(server_pid, 0)
+
+class StandardRunner(ParallelAnalysisInterface):
+    def __init__(self, num_readers, num_writers):
+        self.num_readers = num_readers
+        if num_writers is None:
+            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
+                - num_readers - 1
+        else:
+            self.num_writers = min(num_writers,
+                ytcfg.getint("yt", "__global_parallel_size"))
+        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
+                "__global_parallel_size"):
+            mylog.error('%i reader + %i writers != %i mpi',
+                    self.num_readers, self.num_writers,
+                    ytcfg.getint("yt", "__global_parallel_size"))
+            raise RuntimeError
+    
+    def split_work(self, pool):
+        # Who is going to do what.
+        avail = range(pool.comm.size)
+        self.writers = []
+        self.readers = []
+        # If we're not running inline, rank 0 should be removed immediately.
+        avail.pop(0)
+        # Now we assign the rest.
+        for i in range(self.num_readers):
+            self.readers.append(avail.pop(0))
+        for i in range(self.num_writers):
+            self.writers.append(avail.pop(0))
+    
+    def run(self, handler, pool):
+        # Not inline so we just launch them directly from our MPI threads.
+        if pool.comm.rank == 0:
+            handler.start_server()
+        if pool.comm.rank in self.readers:
+            time.sleep(0.1 + pool.comm.rank/10.0)
+            handler.start_client(READER_TYPE)
+        if pool.comm.rank in self.writers:
+            time.sleep(0.2 + pool.comm.rank/10.0)
+            handler.start_client(WRITER_TYPE)
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
     def __init__(self, ts, num_readers = 1, num_writers = None, 
@@ -56,11 +174,13 @@
             The number of reader can be increased from the default
             of 1 in the event that a single snapshot is split among
             many files. This can help in cases where performance is
-            IO-limited. Default is 1.
+            IO-limited. Default is 1. If run inline, it is
+            equal to the number of MPI threads.
         num_writers: int
             The number of writers determines the number of processing threads
             as well as the number of threads writing output data.
-            The default is set comm.size-num_readers-1.
+            The default is set to comm.size-num_readers-1. If run inline,
+            the default is equal to the number of MPI threads.
         outbase: str
             This is where the out*list files that Rockstar makes should be
             placed. Default is 'rockstar_halos'.
@@ -89,7 +209,6 @@
 
         test_rockstar.py:
 
-        from mpi4py import MPI
         from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
         from yt.mods import *
         import sys
@@ -101,10 +220,18 @@
         rh = RockstarHaloFinder(ts, particle_mass=pm)
         rh.run()
         """
-        ParallelAnalysisInterface.__init__(self)
-        # No subvolume support
-        #we assume that all of the snapshots in the time series
-        #use the same domain info as the first snapshots
+        # Decide how we're working.
+        if ytcfg.getboolean("yt", "inline") == True:
+            self.runner = InlineRunner(num_writers)
+        else:
+            self.runner = StandardRunner(num_readers, num_writers)
+        self.num_readers = self.runner.num_readers
+        self.num_writers = self.runner.num_writers
+        mylog.info("Rockstar is using %d readers and %d writers",
+            self.num_readers, self.num_writers)
+        # Note that Rockstar does not support subvolumes.
+        # We assume that all of the snapshots in the time series
+        # use the same domain info as the first snapshots.
         if not isinstance(ts,TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
@@ -126,10 +253,6 @@
         if outbase is None:
             outbase = 'rockstar_halos'
         self.outbase = outbase
-        if num_writers is None:
-            num_writers = self.comm.size - num_readers -1
-        self.num_readers = num_readers
-        self.num_writers = num_writers
         self.particle_mass = particle_mass
         if force_res is None:
             self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
@@ -137,11 +260,16 @@
             self.force_res = force_res
         self.left_edge = tpf.domain_left_edge
         self.right_edge = tpf.domain_right_edge
-        if self.num_readers + self.num_writers + 1 != self.comm.size:
-            print '%i reader + %i writers != %i mpi'%\
-                    (self.num_readers, self.num_writers, self.comm.size)
-            raise RuntimeError
         self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        # We set up the workgroups *before* initializing
+        # ParallelAnalysisInterface. Everyone is their own workgroup!
+        self.pool = ProcessorPool()
+        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
+             self.pool.add_workgroup(size=1)
+        ParallelAnalysisInterface.__init__(self)
+        for wg in self.pool.workgroups:
+            if self.pool.comm.rank in wg.ranks:
+                self.workgroup = wg
         self.handler = rockstar_interface.RockstarInterface(
                 self.ts, dd)
 
@@ -149,7 +277,7 @@
         self.pool.free_all()
 
     def _get_hosts(self):
-        if self.comm.size == 1 or self.workgroup.name == "server":
+        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -157,7 +285,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.comm.mpi_bcast(
+        self.server_address, self.port = self.pool.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -165,22 +293,13 @@
         """
         
         """
-        if self.comm.size > 1:
-            self.pool = ProcessorPool()
-            mylog.debug("Num Writers = %s Num Readers = %s",
-                        self.num_writers, self.num_readers)
-            self.pool.add_workgroup(1, name = "server")
-            self.pool.add_workgroup(self.num_readers, name = "readers")
-            self.pool.add_workgroup(self.num_writers, name = "writers")
-            for wg in self.pool.workgroups:
-                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         self.handler.setup_rockstar(self.server_address, self.port,
                     len(self.ts), self.total_particles, 
                     self.dm_type,
-                    parallel = self.comm.size > 1,
+                    parallel = self.pool.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
@@ -189,11 +308,8 @@
                     force_res=self.force_res,
                     particle_mass = float(self.particle_mass),
                     **kwargs)
-        #because rockstar *always* write to exactly the same
-        #out_0.list filename we make a directory for it
-        #to sit inside so it doesn't get accidentally
-        #overwritten 
-        if self.workgroup.name == "server":
+        # Make the directory to store the halo lists in.
+        if self.pool.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.mkdir(self.outbase)
             # Make a record of which dataset corresponds to which set of
@@ -205,20 +321,16 @@
                 line = "%s\t%d\n" % (pfloc, i)
                 fp.write(line)
             fp.close()
-        if self.comm.size == 1:
+        # This barrier makes sure the directory exists before it might be used.
+        self.pool.comm.barrier()
+        if self.pool.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            self.comm.barrier()
-            if self.workgroup.name == "server":
-                self.handler.start_server()
-            elif self.workgroup.name == "readers":
-                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            elif self.workgroup.name == "writers":
-                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            self.pool.free_all()
-        self.comm.barrier()
+            # Split up the work.
+            self.runner.split_work(self.pool)
+            # And run it!
+            self.runner.run(self.handler, self.pool)
+        self.pool.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):


diff -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 -r d861b2ff9431519edec170d636e68fbe3be88506 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,6 +29,8 @@
 cimport cython
 from libc.stdlib cimport malloc
 
+from yt.config import ytcfg
+
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -44,11 +46,11 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h":
+cdef import from "server.h" nogil:
     int server()
 
-cdef import from "client.h":
-    void client()
+cdef import from "client.h" nogil:
+    void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
@@ -237,13 +239,13 @@
     print "SINGLE_SNAP =", SINGLE_SNAP
 
 cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
-    pf = rh.tsl.next()
-    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
     n = rh.block_ratio
 
@@ -251,11 +253,16 @@
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
     if NUM_BLOCKS == 1:
-        grids = all_grids #dd._grids
+        grids = all_grids
     else:
-        fnames = np.array([g.filename for g in all_grids])
-        sort = fnames.argsort()
-        grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
+        if ytcfg.getboolean("yt", "inline") == False:
+            fnames = np.array([g.filename for g in all_grids])
+            sort = fnames.argsort()
+            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
+        else:
+            # We must be inline, grap only the local grids.
+            grids  = [g for g in all_grids if g.proc_num ==
+                          ytcfg.getint('yt','__topcomm_parallel_rank')]
     
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
@@ -276,6 +283,8 @@
     else:
         local_parts = TOTAL_PARTICLES
 
+    #print "local_parts", local_parts
+
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -393,7 +402,9 @@
         output_and_free_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        server()
+        with nogil:
+            server()
 
-    def start_client(self):
-        client()
+    def start_client(self, in_type):
+        in_type = np.int64(in_type)
+        client(in_type)


diff -r 9842a9ed5144bcfd3fb7ff468b20460f31f859a3 -r d861b2ff9431519edec170d636e68fbe3be88506 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -290,13 +290,14 @@
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
-            print 'Not enough resources available', size, self.available_ranks
+            mylog.error('Not enough resources available, asked for %d have %d',
+                size, self.available_ranks)
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(self.workgroups))
+            name = str(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:



https://bitbucket.org/yt_analysis/yt/changeset/ba01a9998070/
changeset:   ba01a9998070
branch:      yt
user:        sskory
date:        2012-11-15 21:22:47
summary:     Forgot this small change.
affected #:  1 file

diff -r d861b2ff9431519edec170d636e68fbe3be88506 -r ba01a9998070649a0c57fda1112be3470c9dddc3 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -121,9 +121,9 @@
         # This is from disk.
         pid = self.__getitem__('particle_index')
         # This is from the sphere.
-        if self._name is "RockstarHalo":
+        if self._name == "RockstarHalo":
             ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
-        elif self._name is "LoadedHalo":
+        elif self._name == "LoadedHalo":
             ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
         sp_pid = ds['particle_index']
         self._ds_sort = sp_pid.argsort()



https://bitbucket.org/yt_analysis/yt/changeset/b4e8a1ce6fad/
changeset:   b4e8a1ce6fad
branch:      yt
user:        sskory
date:        2012-11-15 21:23:06
summary:     Merge.
affected #:  12 files



diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -326,7 +326,12 @@
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +339,8 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        ND = self.dimensionality
+        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -69,7 +69,8 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
-
+        self._particle_handle = pf._particle_handle
+        
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
@@ -79,9 +80,9 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._handle) :
+        if ("/particle names" in self._particle_handle) :
             self.field_list += ["particle_" + s[0].strip() for s
-                                in self._handle["/particle names"][:]]
+                                in self._particle_handle["/particle names"][:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -98,6 +99,7 @@
     def _parse_hierarchy(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
+        f_part = self._particle_handle # shortcut
         
         # Initialize to the domain left / domain right
         ND = self.parameter_file.dimensionality
@@ -120,7 +122,7 @@
                               for ax in 'xyz']
         self.grid_dimensions[:] *= (nxb, nyb, nzb)
         try:
-            self.grid_particle_count[:] = f["/localnp"][:][:,None]
+            self.grid_particle_count[:] = f_part["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
@@ -209,6 +211,7 @@
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
+                 particle_filename = None, 
                  conversion_override = None):
 
         if self._handle is not None: return
@@ -216,6 +219,16 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None :
+            self._particle_handle = self._handle
+        else :
+            try :
+                self._particle_handle = h5py.File(self.particle_filename, "r")
+            except :
+                raise IOError(self.particle_filename)
+                                                                
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
@@ -397,9 +410,15 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
-        nblockx = self.parameters["nblockx"]
-        nblocky = self.parameters["nblocky"]
-        nblockz = self.parameters["nblockz"]
+        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+            nblockx = self.parameters["nblockx"]
+            nblocky = self.parameters["nblocky"]
+            nblockz = self.parameters["nblockz"]
+        else : # Uniform Grid
+            nblockx = self.parameters["iprocs"]
+            nblocky = self.parameters["jprocs"]
+            nblockz = self.parameters["kprocs"]
+                        
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -39,9 +39,11 @@
         # Now we cache the particle fields
         self.pf = pf
         self._handle = pf._handle
+        self._particle_handle = pf._particle_handle
+        
         try :
             particle_fields = [s[0].strip() for s in
-                               self._handle["/particle names"][:]]
+                               self._particle_handle["/particle names"][:]]
             self._particle_fields = dict([("particle_" + s, i) for i, s in
                                           enumerate(particle_fields)])
         except KeyError:
@@ -53,12 +55,13 @@
 
     def _read_data_set(self, grid, field):
         f = self._handle
+        f_part = self._particle_handle
         if field in self._particle_fields:
             if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
-            tr = f["/tracer particles"][start:end, fi]
+            tr = f_part["/tracer particles"][start:end, fi]
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -567,3 +567,7 @@
         
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
+
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -49,6 +49,7 @@
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
@@ -65,16 +66,25 @@
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
+    @property
+    def my_version(self, version=None):
+        if self._my_version is not None:
+            return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
+
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
+        if options.this_name is None: 
+            options.this_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -84,19 +94,28 @@
         elif options.compare_name == "latest":
             options.compare_name = _latest
 
-        # We only either store or test.
+        # Local/Cloud storage 
         if options.store_local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
             if options.compare_name is not None:
-                options.compare_name = "%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.compare_name)
-            AnswerTestingTest.reference_storage = \
-                self.storage = \
-                    AnswerTestLocalStorage(options.compare_name, 
-                                           not options.store_results)
+                options.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), options.compare_name, 
+                     options.compare_name)
+            if options.this_name is not None:
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    options.this_name)
+                if not os.path.isdir(name_dir_path):
+                    os.mkdir(name_dir_path)
+                options.this_name= "%s/%s" % \
+                        (name_dir_path, options.this_name)
         else:
-            AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+            storage_class = AnswerTestCloudStorage
+
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(options.compare_name, options.this_name)
 
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
@@ -108,10 +127,10 @@
         self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name, read=True):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
-        self.read = read
     def dump(self, result_storage, result):
         raise NotImplementedError 
     def get(self, pf_name, default=None):
@@ -119,7 +138,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -135,7 +154,7 @@
         return rv
 
     def dump(self, result_storage):
-        if self.read: return
+        if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,18 +163,18 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
             if tk is not None: tk.delete()
             k = Key(bucket)
-            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
-        if self.read: return 
+        if self.answer_name is None: return
         # Store data using shelve
-        ds = shelve.open(self.reference_name, protocol=-1)
+        ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
             if name in ds:
@@ -164,7 +183,7 @@
         ds.close()
 
     def get(self, pf_name, default=None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         # Read data using shelve
         answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
@@ -224,8 +243,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage.read and \
-           self.reference_storage.reference_name is not None:
+        if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -989,9 +989,9 @@
             tmax[i] = 1e60
     # We have to jumpstart our calculation
     for i in range(3):
-        if cur_ind[i] == vc.dims[i] and step[i] == 1:
+        if cur_ind[i] == vc.dims[i] and step[i] >= 0:
             return 0
-        if cur_ind[i] == -1 and step[i] == -1:
+        if cur_ind[i] == -1 and step[i] <= -1:
             return 0
     enter_t = intersect_t
     hit = 0


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r ba01a9998070649a0c57fda1112be3470c9dddc3 -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -146,53 +146,35 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
-    if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+def StandardWidth(axis, width, depth, pf):
+    if width is None:
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            # axis is actually the normal vector
+            # for an off-axis data object.
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
-    if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        width = ((width, '1'), (width, '1'))
+    if depth is not None:
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -200,21 +182,39 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = (width[0][1], width[1][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = (width[0][1], width[1][1], width[2][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -264,7 +264,7 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         self.fontsize = fontsize
@@ -435,42 +435,43 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  the width of the image.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will  
+             use the supplied units.
         unit : str
-            the unit the width has been specified in.
-            defaults to code units.  If width is a tuple this 
-            argument is ignored
+             the unit the width has been specified in.
+             defaults to code units.  If width is a tuple this 
+             argument is ignored
 
         """
-        if iterable(width): 
-            if isinstance(width[1],str):
-                w, unit = width
-                width = (w, w)
-            elif isinstance(width[1], tuple):
-                wx,unitx = width[0]
-                wy,unity = width[1]
-                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        if width is not None:
+            set_axes_unit = True
         else:
-            width = (width, width)
-        Wx, Wy = width
-        width = (Wx,Wy)
-        width = [w / self.pf[unit] for w in width]
+            set_axes_unit = False
+
+        width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
         centery = (self.ylim[1] + self.ylim[0])/2. 
         
-        self.xlim = (centerx - width[0]/2.,
-                     centerx + width[0]/2.)
-        self.ylim = (centery - width[1]/2.,
-                     centery + width[1]/2.)
+        units = (width[0][1], width[1][1])
+
+        if set_axes_unit:
+            self._axes_unit_names = units
+        else:
+            self._axes_unit_names = None
+
+        self.xlim = (centerx - width[0][0]/self.pf[units[0]]/2.,
+                     centerx + width[0][0]/self.pf[units[0]]/2.)
+        self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
+                     centery + width[1][0]/self.pf[units[1]]/2.)
         
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
-            mw = max(width)
+            mw = max([width[0][0], width[1][0]])
             self.zlim = (centerz - mw/2.,
                          centerz + mw/2.)
-        
+
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -527,7 +528,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -654,12 +655,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -677,12 +680,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -690,10 +694,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -707,16 +712,16 @@
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -745,9 +750,9 @@
     _plot_type = None
 
     def __init__(self, *args, **kwargs):
-        if self._frb_generator == None:
+        if self._frb_generator is None:
             self._frb_generator = kwargs.pop("frb_generator")
-        if self._plot_type == None:
+        if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
 
@@ -776,17 +781,20 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            (unit_x, unit_y) = md['axes_unit_names']
+
+            extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
+
+            extent = extentx + extenty
 
             if f in self.plots.keys():
-                zlim = (self.plots[f].zmin,self.plots[f].zmax)
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
             else:
-                zlim = (None,None)
+                zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0])/(self.ylim[1]-self.ylim[0])
-
+            aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
@@ -796,22 +804,26 @@
             else:
                 size = (aspect*norm_size*(1.+cbar_frac), norm_size)
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+            # Correct the aspect ratio in case unit_x and unit_y are different
+            aspect *= self.pf[unit_x]/self.pf[unit_y]
+
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
+            axes_unit_labels = ['', '']
+            for i, un in enumerate((unit_x, unit_y)):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
+            if self.oblique:
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
-                axes_unit_label = ''
-
-            if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
-            else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                          axes_unit_labels[i] + r'}$' for i in (0,1)]
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -831,14 +843,14 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] == None or md['units'] == '':
+            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
             self.plots[f].cb.set_label(label,fontsize=self.fontsize)
 
@@ -905,9 +917,9 @@
         """
         names = []
         if mpl_kwargs is None: mpl_kwargs = {}
-        if name == None:
+        if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1012,13 +1024,14 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -1038,12 +1051,14 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1096,23 +1111,24 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will 
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : A string
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options: 'left-domain', corresponding
-            to the bottom-left hand corner of the simulation domain, 'center-domain',
-            corresponding the center of the simulation domain, or 'center-window' for 
-            the center of the plot window.
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
         weight_field : string
-            The name of the weighting field.  Set to None for no weight.
+             The name of the weighting field.  Set to None for no weight.
         max_level: int
-            The maximum level to project to.
+             The maximum level to project to.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         
@@ -1130,7 +1146,9 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1139,7 +1157,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
                  axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
@@ -1180,8 +1198,10 @@
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1214,8 +1234,8 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
+                 depth=(1, '1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
                  le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
@@ -1264,8 +1284,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1279,9 +1300,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1473,7 +1494,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'
@@ -1539,17 +1560,17 @@
         return f.read()
 
 class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, field_transform, cmap, size, zlim):
+    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
         self.zmin, self.zmax = zlim
         PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform, cmap)
+        self.__init_image(data, extent, aspect, field_transform, cmap)
 
-    def __init_image(self, data, extent, field_transform, cmap):
+    def __init_image(self, data, extent, aspect, field_transform, cmap):
         if (field_transform.name == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
-        self.image.axes.ticklabel_format(scilimits=(-4,3))
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect, 
+                                      vmax=self.zmax, cmap=cmap)
+        self.image.axes.ticklabel_format(scilimits=(-2,3))





https://bitbucket.org/yt_analysis/yt/changeset/1a1455873e16/
changeset:   1a1455873e16
branch:      yt
user:        sskory
date:        2012-11-20 23:34:42
summary:     Merging.
affected #:  18 files

diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,4 +6,4 @@
 detailed-errors=1
 where=yt
 exclude=answer_testing
-with-xunit=1
+with-xunit=1
\ No newline at end of file


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -242,6 +242,23 @@
         cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
+    def is_in_grid(self, x, y, z) :
+        """
+        Generate a mask that shows which points in *x*, *y*, and *z*
+        fall within this grid's boundaries.
+        """
+        xcond = np.logical_and(x >= self.LeftEdge[0],
+                               x < self.RightEdge[0])
+        ycond = np.logical_and(y >= self.LeftEdge[1],
+                               y < self.RightEdge[1])
+        zcond = np.logical_and(z >= self.LeftEdge[2],
+                               z < self.RightEdge[2])
+
+        cond = np.logical_and(xcond, ycond)
+        cond = np.logical_and(zcond, cond)
+
+        return cond
+        
     def __repr__(self):
         return "AMRGridPatch_%04i" % (self.id)
 


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -29,8 +29,11 @@
 from yt.utilities.lib import \
     get_box_grids_level, \
     get_box_grids_below_level
+from yt.utilities.lib import \
+    MatchPointsToGrids, \
+    GridTree
 
-class ObjectFindingMixin(object):
+class ObjectFindingMixin(object) :
 
     def find_ray_grids(self, coord, axis):
         """
@@ -110,6 +113,16 @@
         ind = np.where(mask == 1)
         return self.grids[ind], ind
 
+    def find_points(self, x, y, z) :
+        """
+        Returns the (objects, indices) of leaf grids containing a number of (x,y,z) points
+        """
+        num_points = len(x)
+        grid_tree = self.get_grid_tree()
+        pts = MatchPointsToGrids(grid_tree,num_points,x,y,z)
+        ind = pts.find_points_in_tree() 
+        return self.grids[ind], ind
+    
     def find_field_value_at_point(self, fields, coord):
         r"""Find the value of fields at a point.
         
@@ -239,3 +252,24 @@
                     mask[gi] = True
         return self.grids[mask], np.where(mask)
 
+    def get_grid_tree(self) :
+
+        left_edge = np.zeros((self.num_grids, 3))
+        right_edge = np.zeros((self.num_grids, 3))
+        level = np.zeros((self.num_grids), dtype='int64')
+        parent_ind = np.zeros((self.num_grids), dtype='int64')
+        num_children = np.zeros((self.num_grids), dtype='int64')
+
+        for i, grid in enumerate(self.grids) :
+
+            left_edge[i,:] = grid.LeftEdge
+            right_edge[i,:] = grid.RightEdge
+            level[i] = grid.Level
+            if grid.Parent is None :
+                parent_ind[i] = -1
+            else :
+                parent_ind[i] = grid.Parent.id - grid.Parent._id_offset
+            num_children[i] = np.int64(len(grid.Children))
+
+        return GridTree(self.num_grids, left_edge, right_edge, parent_ind,
+                        level, num_children)


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -30,6 +30,7 @@
 import h5py
 import numpy as np
 import weakref
+import glob #ST 9/12
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
            AMRGridPatch
@@ -112,6 +113,8 @@
         grid['read_field'] = field
         grid['read_type'] = 'vector'
 
+
+
 class AthenaHierarchy(AMRHierarchy):
 
     grid = AthenaGrid
@@ -215,34 +218,94 @@
                   (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
+        # Need to determine how many grids: self.num_grids
+        dname = self.hierarchy_filename
+        gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
+        gridlistread.insert(0,self.hierarchy_filename)
+        self.num_grids = len(gridlistread)
         dxs=[]
         self.grids = np.empty(self.num_grids, dtype='object')
         levels = np.zeros(self.num_grids, dtype='int32')
-        single_grid_width = grid['dds']*grid['dimensions']
-        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
-        glis = np.empty((self.num_grids,3), dtype='int64')
-        for i in range(self.num_grids):
-            procz = i/(grids_per_dim[0]*grids_per_dim[1])
-            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
-            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
-            glis[i, 0] = procx*grid['dimensions'][0]
-            glis[i, 1] = procy*grid['dimensions'][1]
-            glis[i, 2] = procz*grid['dimensions'][2]
+        glis = np.empty((self.num_grids,3), dtype='float64')
+        gdds = np.empty((self.num_grids,3), dtype='float64')
         gdims = np.ones_like(glis)
-        gdims[:] = grid['dimensions']
+        j = 0
+        while j < (self.num_grids):
+            f = open(gridlistread[j],'rb')
+            f.close()
+            if j == 0:
+                f = open(dname,'rb')
+            if j != 0:
+                f = open('id%i/%s-id%i%s' % (j, dname[4:-9],j, dname[-9:]),'rb')
+            gridread = {}
+            gridread['read_field'] = None
+            gridread['read_type'] = None
+            table_read=False
+            line = f.readline()
+            while gridread['read_field'] is None:
+                parse_line(line, gridread)
+                if "SCALAR" in line.strip().split():
+                    break
+                if "VECTOR" in line.strip().split():
+                    break 
+                if 'TABLE' in line.strip().split():
+                    break
+                if len(line) == 0: break
+                line = f.readline()
+            f.close()
+            glis[j,0] = gridread['left_edge'][0]
+            glis[j,1] = gridread['left_edge'][1]
+            glis[j,2] = gridread['left_edge'][2]
+            # It seems some datasets have a mismatch between ncells and 
+            # the actual grid dimensions.
+            if np.prod(gridread['dimensions']) != gridread['ncells']:
+                gridread['dimensions'] -= 1
+                gridread['dimensions'][gridread['dimensions']==0]=1
+            if np.prod(gridread['dimensions']) != gridread['ncells']:
+                mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                      (np.prod(gridread['dimensions']), gridread['ncells']))
+                raise TypeError
+            gdims[j,0] = gridread['dimensions'][0]
+            gdims[j,1] = gridread['dimensions'][1]
+            gdims[j,2] = gridread['dimensions'][2]
+            # Setting dds=1 for non-active dimensions in 1D/2D datasets
+            gridread['dds'][gridread['dimensions']==1] = 1.
+            gdds[j,:] = gridread['dds']
+            
+            j=j+1
+
+        gres = glis + gdims*gdds
+        # Now we convert the glis, which were left edges (floats), to indices 
+        # from the domain left edge.  Then we do a bunch of fixing now that we
+        # know the extent of all the grids. 
+        glis = np.round((glis - self.parameter_file.domain_left_edge)/gdds).astype('int')
+        new_dre = np.max(gres,axis=0)
+        self.parameter_file.domain_right_edge = np.round(new_dre, decimals=6)
+        self.parameter_file.domain_width = \
+                (self.parameter_file.domain_right_edge - 
+                 self.parameter_file.domain_left_edge)
+        self.parameter_file.domain_center = \
+                0.5*(self.parameter_file.domain_left_edge + 
+                     self.parameter_file.domain_right_edge)
+        self.parameter_file.domain_dimensions = \
+                np.round(self.parameter_file.domain_width/gdds[0]).astype('int')
+        if self.parameter_file.dimensionality <= 2 :
+            self.parameter_file.domain_dimensions[2] = np.int(1)
+        if self.parameter_file.dimensionality == 1 :
+            self.parameter_file.domain_dimensions[1] = np.int(1)
         for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i, self, levels[i],
+            self.grids[i] = self.grid(i,self,levels[i],
                                       glis[i],
                                       gdims[i])
-
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx = dx/self.parameter_file.refine_by**(levels[i])
-            dxs.append(grid['dds'])
+            dxs.append(dx)
+        
         dx = np.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
         self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
         self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
 
     def _populate_grid_objects(self):
@@ -256,9 +319,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-#     def _setup_derived_fields(self):
-#         self.derived_field_list = []
-
     def _get_grid_children(self, grid):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
@@ -277,6 +337,11 @@
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
         self.storage_filename = filename[4:-4]
+        
+        # Unfortunately we now have to mandate that the hierarchy gets 
+        # instantiated so that we can make sure we have the correct left 
+        # and right domain edges.
+        self.h
 
     def _set_units(self):
         """
@@ -315,14 +380,11 @@
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
-        if 'domain_right_edge' in self.specified_parameters:
-            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
-        else:
-            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
-                    "if it is not equal to -domain_left_edge.")
-            self.domain_right_edge = -self.domain_left_edge
+        mylog.info("Temporarily setting domain_right_edge = -domain_left_edge."+
+                  " This will be corrected automatically if it is not the case.")
+        self.domain_right_edge = -self.domain_left_edge
         self.domain_width = self.domain_right_edge-self.domain_left_edge
-        self.domain_dimensions = self.domain_width/grid['dds']
+        self.domain_dimensions = np.round(self.domain_width/grid['dds']).astype('int32')
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
@@ -331,6 +393,8 @@
             dimensionality = 2
         if grid['dimensions'][1] == 1 :
             dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = np.int32(1)
+        if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
@@ -339,8 +403,9 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        ND = self.dimensionality
-        self.nvtk = int(np.product(self.domain_dimensions[:ND]/(grid['dimensions'][:ND]-1)))
+        dname = self.parameter_filename
+        gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
+        self.nvtk = len(gridlistread)+1 
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
@@ -348,6 +413,7 @@
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
 
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -72,13 +72,13 @@
           units=r"")
 
 add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_x}$")
 
 add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_y}$")
 
 add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_z}$")
 
 for f,v in log_translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=True)


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -45,10 +45,15 @@
 
     def _read_data_set(self,grid,field):
         f = file(grid.filename, 'rb')
-        dtype, offset = grid.hierarchy._field_map[field]
+        dtype, offsetr = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
         grid_dims = grid.ActiveDimensions
+        grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
         read_table_offset = get_read_table_offset(f)
+        if grid_ncells != grid0_ncells:
+            offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+        if grid_ncells == grid0_ncells:
+            offset = offsetr
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
             data = np.fromfile(f, dtype='>f4',
@@ -74,10 +79,15 @@
             sl.reverse()
 
         f = file(grid.filename, 'rb')
-        dtype, offset = grid.hierarchy._field_map[field]
+        dtype, offsetr = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
-
+        grid_dims = grid.ActiveDimensions
+        grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
         read_table_offset = get_read_table_offset(f)
+        if grid_ncells != grid0_ncells:
+            offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+        if grid_ncells == grid0_ncells:
+            offset = offsetr
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
             data = np.fromfile(f, dtype='>f4', 


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -35,7 +35,8 @@
      GridValuesTest, \
      ProjectionValuesTest, \
      ParentageRelationshipsTest, \
-     temp_cwd
+     temp_cwd, \
+     AssertWrapper
 
 def requires_outputlog(path = ".", prefix = ""):
     def ffalse(func):
@@ -94,9 +95,10 @@
             for xmin, xmax in zip(self.left_edges, self.right_edges):
                 mask = (position >= xmin)*(position <= xmax)
                 exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                myname = "ShockTubeTest_%s" % k
                 # yield test vs analytical solution 
-                yield assert_allclose, field[mask], exact_field, \
-                    self.rtol, self.atol
+                yield AssertWrapper(myname, assert_allclose, field[mask], 
+                                    exact_field, self.rtol, self.atol)
 
     def get_analytical_solution(self):
         # Reads in from file 


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -126,8 +126,11 @@
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        np.add.accumulate(self.grid_particle_count.squeeze(),
-                          out=self._particle_indices[1:])
+        if self.num_grids > 1 :
+            np.add.accumulate(self.grid_particle_count.squeeze(),
+                              out=self._particle_indices[1:])
+        else :
+            self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
@@ -418,7 +421,11 @@
             nblockx = self.parameters["iprocs"]
             nblocky = self.parameters["jprocs"]
             nblockz = self.parameters["kprocs"]
-                        
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -266,12 +266,12 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -126,7 +126,7 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
-add_orion_field("ThermalEnergy", function=_ThermalEnergy,
+add_field("ThermalEnergy", function=_ThermalEnergy,
                 units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
@@ -134,11 +134,11 @@
        NB: this will need to be modified for radiation
     """
     return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
-add_orion_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field,data):
     return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_orion_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
 
 # particle fields
 


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,6 +29,7 @@
 import contextlib
 import urllib2
 import cPickle
+import sys
 
 from nose.plugins import Plugin
 from yt.testing import *
@@ -53,16 +54,14 @@
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-compare-name", dest="compare_name", metavar='str',
+            default=_latest, help="The name of tests against which we will compare")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
+        parser.add_option("--answer-store-name", dest="store_name", metavar='str',
             default=None,
             help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
         parser.add_option("--local-store", dest="store_local_results",
             default=False, action="store_true", help="Store/Load local results?")
 
@@ -83,8 +82,14 @@
         if not self.enabled:
             return
         disable_stream_logging()
-        if options.this_name is None: 
-            options.this_name = self.my_version
+        if options.store_name is not None:
+            self.store_results = True
+        # If the user sets the storage_name, then it means they are storing and
+        # not comparing, even if they set the compare_name (since it is set by default)
+            options.compare_name = None
+        else: 
+            self.store_results = False
+            options.store_name = self.my_version
         from yt.config import ytcfg
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
@@ -93,7 +98,7 @@
             options.compare_name = None
         elif options.compare_name == "latest":
             options.compare_name = _latest
-
+            
         # Local/Cloud storage 
         if options.store_local_results:
             storage_class = AnswerTestLocalStorage
@@ -102,22 +107,21 @@
                 options.compare_name = "%s/%s/%s" % \
                     (os.path.realpath(options.output_dir), options.compare_name, 
                      options.compare_name)
-            if options.this_name is not None:
+            if options.store_name is not None:
                 name_dir_path = "%s/%s" % \
                     (os.path.realpath(options.output_dir), 
-                    options.this_name)
+                    options.store_name)
                 if not os.path.isdir(name_dir_path):
-                    os.mkdir(name_dir_path)
-                options.this_name= "%s/%s" % \
-                        (name_dir_path, options.this_name)
+                    os.makedirs(name_dir_path)
+                options.store_name= "%s/%s" % \
+                        (name_dir_path, options.store_name)
         else:
             storage_class = AnswerTestCloudStorage
 
         # Initialize answer/reference storage
         AnswerTestingTest.reference_storage = self.storage = \
-                storage_class(options.compare_name, options.this_name)
+                storage_class(options.compare_name, options.store_name)
 
-        self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
@@ -216,9 +220,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
@@ -550,3 +554,18 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
+
+class AssertWrapper(object):
+    """
+    Used to wrap a numpy testing assertion, in order to provide a useful name
+    for a given assertion test.
+    """
+    def __init__(self, description, *args):
+        # The key here is to add a description attribute, which nose will pick
+        # up.
+        self.args = args
+        self.description = description
+
+    def __call__(self):
+        self.args[0](*self.args[1:])
+






diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/utilities/lib/GridTree.pyx
--- /dev/null
+++ b/yt/utilities/lib/GridTree.pyx
@@ -0,0 +1,273 @@
+"""
+Matching points on the grid to specific grids
+
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+Copyright (C) 2012 John ZuHone.  All Rights Reserved.
+
+This file is part of yt.
+
+yt is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from libc.stdlib cimport malloc, free
+
+cdef struct GridTreeNode :
+    int num_children
+    int level
+    int index
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    GridTreeNode **children
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef GridTreeNode Grid_initialize(np.ndarray[np.float64_t, ndim=1] le,
+                                  np.ndarray[np.float64_t, ndim=1] re,
+                                  int num_children, int level, int index) :
+
+    cdef GridTreeNode node
+    cdef int i
+
+    node.index = index
+    node.level = level
+    for i in range(3) :
+        node.left_edge[i] = le[i]
+        node.right_edge[i] = re[i]
+    node.num_children = num_children
+    
+    if num_children > 0:
+        node.children = <GridTreeNode **> malloc(sizeof(GridTreeNode *) *
+                                                 num_children)
+        for i in range(num_children) :
+            node.children[i] = NULL
+    else :
+        node.children = NULL
+
+    return node
+
+cdef class GridTree :
+
+    cdef GridTreeNode *grids
+    cdef GridTreeNode *root_grids
+    cdef int num_grids
+    cdef int num_root_grids
+    cdef int num_leaf_grids
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, int num_grids, 
+                  np.ndarray[np.float64_t, ndim=2] left_edge,
+                  np.ndarray[np.float64_t, ndim=2] right_edge,
+                  np.ndarray[np.int64_t, ndim=1] parent_ind,
+                  np.ndarray[np.int64_t, ndim=1] level,
+                  np.ndarray[np.int64_t, ndim=1] num_children) :
+
+        cdef int i, j, k
+        cdef np.ndarray[np.int64_t, ndim=1] child_ptr
+
+        child_ptr = np.zeros(num_grids, dtype='int64')
+
+        self.num_grids = num_grids
+        self.num_root_grids = 0
+        self.num_leaf_grids = 0
+        
+        self.grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
+                                             num_grids)
+                
+        for i in range(num_grids) :
+
+            self.grids[i] = Grid_initialize(left_edge[i,:],
+                                            right_edge[i,:],
+                                            num_children[i],
+                                            level[i], i)
+            if level[i] == 0 :
+                self.num_root_grids += 1
+
+            if num_children[i] == 0 : self.num_leaf_grids += 1
+
+        self.root_grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
+                                                  self.num_root_grids)
+                
+        k = 0
+        
+        for i in range(num_grids) :
+
+            j = parent_ind[i]
+            
+            if j >= 0:
+                
+                self.grids[j].children[child_ptr[j]] = &self.grids[i]
+
+                child_ptr[j] += 1
+
+            else :
+
+                self.root_grids[k] = self.grids[i] 
+                
+                k = k + 1
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def return_tree_info(self) :
+
+        cdef int i, j
+        
+        levels = []
+        indices = []
+        nchild = []
+        children = []
+        
+        for i in range(self.num_grids) : 
+
+            childs = []
+            
+            levels.append(self.grids[i].level)
+            indices.append(self.grids[i].index)
+            nchild.append(self.grids[i].num_children)
+            for j in range(self.grids[i].num_children) :
+                childs.append(self.grids[i].children[j].index)
+            children.append(childs)
+
+        return indices, levels, nchild, children
+    
+cdef class MatchPointsToGrids :
+
+    cdef int num_points
+    cdef np.float64_t * xp
+    cdef np.float64_t * yp
+    cdef np.float64_t * zp
+    cdef GridTree tree
+    cdef np.int64_t * point_grids
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, GridTree tree,
+                  int num_points, 
+                  np.ndarray[np.float64_t, ndim=1] x,
+                  np.ndarray[np.float64_t, ndim=1] y,
+                  np.ndarray[np.float64_t, ndim=1] z) :
+
+        cdef int i
+        
+        self.num_points = num_points
+
+        self.xp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.yp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.zp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.point_grids = <np.int64_t *> malloc(sizeof(np.int64_t) *
+                                              num_points)
+        
+        for i in range(num_points) :
+            self.xp[i] = x[i]
+            self.yp[i] = y[i]
+            self.zp[i] = z[i]
+            self.point_grids[i] = -1
+            
+        self.tree = tree
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def find_points_in_tree(self) :
+
+        cdef np.ndarray[np.int64_t, ndim=1] pt_grids
+        cdef int i, j
+        cdef np.uint8_t in_grid
+        
+        pt_grids = np.zeros(self.num_points, dtype='int64')
+
+        for i in range(self.num_points) :
+
+            in_grid = 0
+            
+            for j in range(self.tree.num_root_grids) :
+
+                if not in_grid : 
+                    in_grid = self.check_position(i, self.xp[i], self.yp[i], self.zp[i],
+                                                  &self.tree.root_grids[j])
+
+        for i in range(self.num_points) :
+            pt_grids[i] = self.point_grids[i]
+        
+        return pt_grids
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef np.uint8_t check_position(self,
+                                   np.int64_t pt_index, 
+                                   np.float64_t x,
+                                   np.float64_t y,
+                                   np.float64_t z,
+                                   GridTreeNode * grid) :
+
+        cdef int i
+        cdef np.uint8_t in_grid
+	
+        in_grid = self.is_in_grid(x, y, z, grid)
+
+        if in_grid :
+
+            if grid.num_children > 0 :
+
+                in_grid = 0
+                
+                for i in range(grid.num_children) :
+
+                    if not in_grid :
+
+                        in_grid = self.check_position(pt_index, x, y, z, grid.children[i])
+
+                if not in_grid :
+                    self.point_grids[pt_index] = grid.index
+                    in_grid = 1
+                    
+            else :
+
+                self.point_grids[pt_index] = grid.index
+                in_grid = 1
+                
+        return in_grid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.uint8_t is_in_grid(self,
+			 np.float64_t x,
+			 np.float64_t y,
+			 np.float64_t z,
+			 GridTreeNode * grid) :
+
+        cdef np.uint8_t xcond, ycond, zcond, cond
+            
+        xcond = x >= grid.left_edge[0] and x < grid.right_edge[0]
+        ycond = y >= grid.left_edge[1] and y < grid.right_edge[1]
+        zcond = z >= grid.left_edge[2] and z < grid.right_edge[2]
+	
+        cond = xcond and ycond
+        cond = cond and zcond
+
+        return cond


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -38,3 +38,4 @@
 from .RayIntegrators import *
 from .grid_traversal import *
 from .marching_cubes import *
+from .GridTree import *


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -204,6 +204,10 @@
                           "yt/utilities/lib/field_interpolation_tables.pxd",
                           ]
           )
+    config.add_extension("GridTree", 
+    ["yt/utilities/lib/GridTree.pyx"],
+        libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]
         idir = os.path.join(gpd, "include")


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/utilities/lib/tests/test_grid_tree.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_grid_tree.py
@@ -0,0 +1,75 @@
+import numpy as np
+
+from yt.testing import *
+from yt.frontends.stream.api import load_amr_grids
+
+def setup():
+
+    global pf
+    
+    grid_data = [
+        dict(left_edge = [0.0, 0.0, 0.0], right_edge = [1.0, 1.0, 1.],
+             level = 0, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.25, 0.25, 0.25], right_edge = [0.75, 0.75, 0.75],
+             level = 1, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.25, 0.25, 0.375], right_edge = [0.5, 0.5, 0.625],
+             level = 2, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.5, 0.5, 0.375], right_edge = [0.75, 0.75, 0.625],
+             level = 2, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.3125, 0.3125, 0.4375], right_edge = [0.4375, 0.4375, 0.5625],
+             level = 3, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.5625, 0.5625, 0.4375], right_edge = [0.6875, 0.6875, 0.5625],
+             level = 3, dimensions = [16, 16, 16])
+        ]
+
+    for g in grid_data: g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    pf = load_amr_grids(grid_data, [16, 16, 16], 1.0)
+
+def test_grid_tree() :
+
+    grid_tree = pf.h.get_grid_tree()
+    indices, levels, nchild, children = grid_tree.return_tree_info()
+
+    grid_levels = [grid.Level for grid in pf.h.grids]
+    grid_indices = [grid.id-grid._id_offset for grid in pf.h.grids]
+    grid_nchild = [len(grid.Children) for grid in pf.h.grids]
+
+    print levels, grid_levels
+    assert_equal(levels, grid_levels)
+    assert_equal(indices, grid_indices)
+    assert_equal(nchild, grid_nchild)
+
+    for i, grid in enumerate(pf.h.grids) :
+        if grid_nchild[i] > 0:
+            grid_children = np.array([child.id-child._id_offset
+                                      for child in grid.Children])
+            assert_equal(grid_children, children[i])
+
+def test_find_points() :
+    
+    num_points = 100
+
+    x = np.random.uniform(low=pf.domain_left_edge[0],
+                          high=pf.domain_right_edge[0], size=num_points)
+    y = np.random.uniform(low=pf.domain_left_edge[1],
+                          high=pf.domain_right_edge[1], size=num_points)
+    z = np.random.uniform(low=pf.domain_left_edge[2],
+                          high=pf.domain_right_edge[2], size=num_points)
+
+    point_grids, point_grid_inds = pf.h.find_points(x,y,z)
+
+    grid_inds = np.zeros((num_points), dtype='int64')
+
+    for i, xx, yy, zz in zip(range(num_points), x, y, z) :
+
+        pt_level = -1
+        
+        for grid in pf.h.grids:
+
+            if grid.is_in_grid(xx, yy, zz) :
+            
+                if grid.Level > pt_level :
+                    pt_level = grid.Level
+                    grid_inds[i] = grid.id-grid._id_offset
+                    
+    assert_equal(point_grid_inds, grid_inds)


diff -r b4e8a1ce6fadf4195b3343ab3b5bf4ded4f78583 -r 1a1455873e16c0f407fca38c29b134e22e489d16 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -793,20 +793,20 @@
             else:
                 zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
             
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
             cbar_frac = 0.0
-            if aspect > 1.0:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            if plot_aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/plot_aspect)
             else:
-                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
+                size = (plot_aspect*norm_size*(1.+cbar_frac), norm_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
-            aspect *= self.pf[unit_x]/self.pf[unit_y]
-
+            aspect = self.pf[unit_x]/self.pf[unit_y]
+            
             self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
 



https://bitbucket.org/yt_analysis/yt/changeset/3151dc581d9a/
changeset:   3151dc581d9a
branch:      yt
user:        sskory
date:        2012-11-25 17:05:12
summary:     Merging.
affected #:  1 file

diff -r 1a1455873e16c0f407fca38c29b134e22e489d16 -r 3151dc581d9aa37105740a276c3e6ef7905f55d1 yt/visualization/tests/test_plotwindow.py
--- /dev/null
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -0,0 +1,28 @@
+from yt.testing import *
+from yt.mods import SlicePlot, ProjectionPlot, \
+    OffAxisSlicePlot, OffAxisProjectionPlot
+import glob, os
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
+def test_plotwindow():
+    pf = fake_random_pf(64)
+    fns = []
+    for dim in [0,1,2]:
+        slc = SlicePlot(pf, dim, 'Density')
+        fns.append(slc.save()[0])
+        prj = ProjectionPlot(pf, dim, 'Density')
+        fns.append(prj.save()[0])
+    normal = [1,1,1]
+    oaslc = OffAxisSlicePlot(pf, normal, 'Density')
+    fns.append(oaslc.save()[0])
+    oaprj = OffAxisProjectionPlot(pf, normal, 'Density')
+    fns.append(oaprj.save()[0])
+    teardown_func(fns)
+    



https://bitbucket.org/yt_analysis/yt/changeset/4db3f718f0cc/
changeset:   4db3f718f0cc
branch:      yt
user:        sskory
date:        2012-11-25 17:50:40
summary:     Adding Rockstar to the install script.
affected #:  3 files

diff -r 3151dc581d9aa37105740a276c3e6ef7905f55d1 -r 4db3f718f0cc1e87ba92156a402407fa302feabb doc/activate
--- a/doc/activate
+++ b/doc/activate
@@ -32,6 +32,11 @@
         export LD_LIBRARY_PATH
         unset _OLD_VIRTUAL_LD_LIBRARY_PATH
     fi
+    if [ -n "$_OLD_VIRTUAL_ROCKSTAR_DIR"] ; then
+        ROCKSTAR_DIR="$_OLD_VIRTUAL_ROCKSTAR_DIR"
+        export ROCKSTAR_DIR
+        unset _OLD_VIRTUAL_ROCKSTAR_DIR
+    fi
     ### End extra yt vars
 
     # This should detect bash and zsh, which have a hash command that must
@@ -76,6 +81,10 @@
 _OLD_VIRTUAL_LD_LIBRARY_PATH="$LD_LIBRARY_PATH"
 LD_LIBRARY_PATH="$VIRTUAL_ENV/lib:$LD_LIBRARY_PATH"
 export LD_LIBRARY_PATH
+
+_OLD_VIRTUAL_ROCKSTAR_DIR="$ROCKSTAR_DIR"
+ROCKSTAR_DIR="$VIRTUAL_ENV/src/Rockstar-0.99"
+export ROCKSTAR_DIR
 ### End extra env vars for yt
 
 # unset PYTHONHOME if set


diff -r 3151dc581d9aa37105740a276c3e6ef7905f55d1 -r 4db3f718f0cc1e87ba92156a402407fa302feabb doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -2,7 +2,7 @@
 # You cannot run it directly.
 # Created by Davide Di Blasi <davidedb at gmail.com>.
 
-alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; test $?_OLD_VIRTUAL_ROCKSTAR_DIR != 0 && setenv ROCKSTAR_DIR "$_OLD_VIRTUAL_ROCKSTAR_DIR" && unset _OLD_VIRTUAL_ROCKSTAR_DIR; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
 
 # Unset irrelavent variables.
 deactivate nondestructive
@@ -33,6 +33,12 @@
 endif
 set _OLD_VIRTUAL_LD_LIBRARY_PATH="$LD_LIBRARY_PATH"
 setenv LD_LIBRARY_PATH "${VIRTUAL_ENV}/lib:${LD_LIBRARY_PATH}"
+
+if ($?ROCKSTAR_DIR == 0) then
+    setenv ROCKSTAR_DIR
+endif
+set _OLD_VIRTUAL_ROCKSTAR_DIR="$ROCKSTAR_DIR"
+setenv ROCKSTAR_DIR "${VIRTUAL_ENV}/src/Rockstar-0.99"
 ### End extra yt vars
 
 set _OLD_VIRTUAL_PROMPT="$prompt"


diff -r 3151dc581d9aa37105740a276c3e6ef7905f55d1 -r 4db3f718f0cc1e87ba92156a402407fa302feabb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -433,6 +433,7 @@
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo 'd91fa23d8f86c5f5b3df210731c14c92a2e3d999979ef2c4c67be3dda19a8a8d2363f8cd23611957280c17f61653b0e66f59d3f5515b6d443b22cddeaef86ab6  Rockstar-0.99.tar.gz' > Rockstar-0.99.tar.gz.sha512
 
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
@@ -457,6 +458,7 @@
 get_ytproject nose-1.2.1.tar.gz 
 get_ytproject python-hglib-0.2.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
+get_ytproject Rockstar-0.99.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -699,6 +701,18 @@
 do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
+# Now we build Rockstar and set its environment variable.
+if [ ! -e Rockstar-0.99/done ]
+then
+    [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
+    echo "Building Rockstar"
+    cd Rockstar-0.99
+    ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    export ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+    touch done
+    cd ..
+fi
+
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
 cd $YT_DIR



https://bitbucket.org/yt_analysis/yt/changeset/8ff6de25b99e/
changeset:   8ff6de25b99e
branch:      yt
user:        sskory
date:        2012-11-25 23:23:49
summary:     Making Rockstar optional upon install (default true).
affected #:  1 file

diff -r 4db3f718f0cc1e87ba92156a402407fa302feabb -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -43,6 +43,7 @@
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
+INST_ROCKSTAR=1 # Install the Rockstar halo finder?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -702,15 +703,18 @@
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 # Now we build Rockstar and set its environment variable.
-if [ ! -e Rockstar-0.99/done ]
+if [ $INST_ROCKSTAR -eq 1]
 then
-    [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
-    echo "Building Rockstar"
-    cd Rockstar-0.99
-    ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    export ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
-    touch done
-    cd ..
+    if [ ! -e Rockstar-0.99/done ]
+    then
+        [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
+        echo "Building Rockstar"
+        cd Rockstar-0.99
+        ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        export ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        touch done
+        cd ..
+    fi
 fi
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"



https://bitbucket.org/yt_analysis/yt/changeset/9ff211d7f9a5/
changeset:   9ff211d7f9a5
branch:      yt
user:        sskory
date:        2012-11-26 17:06:31
summary:     Setting up a rockstar.cfg file.
affected #:  6 files

diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec doc/activate
--- a/doc/activate
+++ b/doc/activate
@@ -32,11 +32,6 @@
         export LD_LIBRARY_PATH
         unset _OLD_VIRTUAL_LD_LIBRARY_PATH
     fi
-    if [ -n "$_OLD_VIRTUAL_ROCKSTAR_DIR"] ; then
-        ROCKSTAR_DIR="$_OLD_VIRTUAL_ROCKSTAR_DIR"
-        export ROCKSTAR_DIR
-        unset _OLD_VIRTUAL_ROCKSTAR_DIR
-    fi
     ### End extra yt vars
 
     # This should detect bash and zsh, which have a hash command that must
@@ -81,10 +76,6 @@
 _OLD_VIRTUAL_LD_LIBRARY_PATH="$LD_LIBRARY_PATH"
 LD_LIBRARY_PATH="$VIRTUAL_ENV/lib:$LD_LIBRARY_PATH"
 export LD_LIBRARY_PATH
-
-_OLD_VIRTUAL_ROCKSTAR_DIR="$ROCKSTAR_DIR"
-ROCKSTAR_DIR="$VIRTUAL_ENV/src/Rockstar-0.99"
-export ROCKSTAR_DIR
 ### End extra env vars for yt
 
 # unset PYTHONHOME if set


diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -2,7 +2,7 @@
 # You cannot run it directly.
 # Created by Davide Di Blasi <davidedb at gmail.com>.
 
-alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; test $?_OLD_VIRTUAL_ROCKSTAR_DIR != 0 && setenv ROCKSTAR_DIR "$_OLD_VIRTUAL_ROCKSTAR_DIR" && unset _OLD_VIRTUAL_ROCKSTAR_DIR; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
 
 # Unset irrelavent variables.
 deactivate nondestructive
@@ -33,12 +33,6 @@
 endif
 set _OLD_VIRTUAL_LD_LIBRARY_PATH="$LD_LIBRARY_PATH"
 setenv LD_LIBRARY_PATH "${VIRTUAL_ENV}/lib:${LD_LIBRARY_PATH}"
-
-if ($?ROCKSTAR_DIR == 0) then
-    setenv ROCKSTAR_DIR
-endif
-set _OLD_VIRTUAL_ROCKSTAR_DIR="$ROCKSTAR_DIR"
-setenv ROCKSTAR_DIR "${VIRTUAL_ENV}/src/Rockstar-0.99"
 ### End extra yt vars
 
 set _OLD_VIRTUAL_PROMPT="$prompt"


diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -712,6 +712,7 @@
         cd Rockstar-0.99
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         export ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        echo $ROCKSTAR_DIR > rockstar.cfg
         touch done
         cd ..
     fi


diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -41,7 +41,16 @@
 from os import path
 
 # Get some definitions from Rockstar directly.
-ROCKSTAR_DIR = environ['ROCKSTAR_DIR']
+if "ROCKSTAR_DIR" in os.environ:
+    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
+elif os.path.exists("rockstar.cfg"):
+    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
+else:
+    print "Reading Rockstar location from rockstar.cfg failed."
+    print "Please place the base directory of your"
+    print "Rockstar install in rockstar.cfg and restart."
+    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+    sys.exit(1)
 lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
 READER_TYPE = None
 WRITER_TYPE = None


diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -9,7 +9,16 @@
     config = Configuration('rockstar',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
-    rd = os.environ["ROCKSTAR_DIR"]
+    if "ROCKSTAR_DIR" in os.environ:
+        rd = os.environ["ROCKSTAR_DIR"]
+    elif os.path.exists("rockstar.cfg"):
+        rd = open("rockstar.cfg").read().strip()
+    else:
+        print "Reading Rockstar location from rockstar.cfg failed."
+        print "Please place the base directory of your"
+        print "Rockstar install in rockstar.cfg and restart."
+        print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+        sys.exit(1)
     config.add_extension("rockstar_interface",
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],


diff -r 8ff6de25b99ed0ea5631853cacbb0a7604609d51 -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -12,6 +12,8 @@
     config.add_subpackage("parallel_hop")
     if "ROCKSTAR_DIR" in os.environ:
         config.add_subpackage("rockstar")
+    elif os.path.exists("rockstar.cfg"):
+        config.add_subpackage("rockstar")
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt/changeset/999745ab95c6/
changeset:   999745ab95c6
branch:      yt
user:        sskory
date:        2012-11-26 17:14:42
summary:     Small changes to Rockstar setup stuff.
affected #:  3 files

diff -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec -r 999745ab95c6f80e6f87f22008d3c2ab5d7bbd4d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -711,7 +711,7 @@
         echo "Building Rockstar"
         cd Rockstar-0.99
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        export ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
         echo $ROCKSTAR_DIR > rockstar.cfg
         touch done
         cd ..


diff -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec -r 999745ab95c6f80e6f87f22008d3c2ab5d7bbd4d yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -9,11 +9,9 @@
     config = Configuration('rockstar',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
-    if "ROCKSTAR_DIR" in os.environ:
-        rd = os.environ["ROCKSTAR_DIR"]
-    elif os.path.exists("rockstar.cfg"):
+    try:
         rd = open("rockstar.cfg").read().strip()
-    else:
+    except IOError:
         print "Reading Rockstar location from rockstar.cfg failed."
         print "Please place the base directory of your"
         print "Rockstar install in rockstar.cfg and restart."


diff -r 9ff211d7f9a5febbd70372acc7f8cfbc5031e8ec -r 999745ab95c6f80e6f87f22008d3c2ab5d7bbd4d yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -10,9 +10,7 @@
     config.add_subpackage("fof")
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
-    if "ROCKSTAR_DIR" in os.environ:
-        config.add_subpackage("rockstar")
-    elif os.path.exists("rockstar.cfg"):
+    if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()



https://bitbucket.org/yt_analysis/yt/changeset/9f7c233dfb07/
changeset:   9f7c233dfb07
branch:      yt
user:        sskory
date:        2012-11-26 17:30:12
summary:     Rockstar setup.
affected #:  1 file

diff -r 999745ab95c6f80e6f87f22008d3c2ab5d7bbd4d -r 9f7c233dfb077777e34275f6f70f53b3003c8e6b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -703,7 +703,7 @@
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 # Now we build Rockstar and set its environment variable.
-if [ $INST_ROCKSTAR -eq 1]
+if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar-0.99/done ]
     then
@@ -712,7 +712,7 @@
         cd Rockstar-0.99
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
-        echo $ROCKSTAR_DIR > rockstar.cfg
+        echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..
     fi

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list