[yt-svn] commit/yt: 253 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Aug 21 11:50:29 PDT 2013


253 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/20a074192f92/
Changeset:   20a074192f92
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-08 05:32:11
Summary:     some hacks and fixes for star formation histories
Affected #:  4 files

diff -r 382704af8a3f86e440e3d6a3bd25d8e4671e3412 -r 20a074192f9257cc0774b09d67a6921e04a9bcac yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -105,16 +105,19 @@
         """
         # Pick out the stars.
         if self.mode == 'data_source':
-            ct = self._data_source["creation_time"]
+            ct = self._data_source["stars","particle_age"]
+            if ct == None :
+                print 'data source must have particle_age!'
+                sys.exit(1)
             ct_stars = ct[ct > 0]
-            mass_stars = self._data_source["ParticleMassMsun"][ct > 0]
+            mass_stars = self._data_source["stars", "ParticleMassMsun"][ct > 0]
         elif self.mode == 'provided':
             ct_stars = self.star_creation_time
             mass_stars = self.star_mass
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*1.01, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
         inds = np.digitize(ct_stars, self.time_bins) - 1
@@ -127,7 +130,7 @@
         for index in xrange(self.bin_count):
             self.cum_mass_bins[index+1] += self.cum_mass_bins[index]
         # We will want the time taken between bins.
-        self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
+        self.time_bins_dt = self.time_bins[:-1] - self.time_bins[1:]
     
     def attach_arrays(self):
         """
@@ -143,7 +146,7 @@
                 vol = ds.volume('mpc')
         elif self.mode == 'provided':
             vol = self.volume
-        tc = self._pf["Time"]
+        tc = self._pf["Time"] #time to seconds?
         self.time = []
         self.lookback_time = []
         self.redshift = []

diff -r 382704af8a3f86e440e3d6a3bd25d8e4671e3412 -r 20a074192f9257cc0774b09d67a6921e04a9bcac yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -962,8 +962,10 @@
         the container, so this may vary very slightly
         from what might be expected from the geometric volume.
         """
-        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
-            (self.pf[unit] / self.pf['cm']) ** 3.0
+#        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
+#            (self.pf[unit] / self.pf['cm']) ** 3.0
+        print 'data_containers.py bad fix to volume to prevent data_size like issue for ARTIO'
+        return 1
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]

diff -r 382704af8a3f86e440e3d6a3bd25d8e4671e3412 -r 20a074192f9257cc0774b09d67a6921e04a9bcac yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -341,7 +341,7 @@
                             if selected_mass[ispec] :
                                 count = len(data[selected_mass[ispec]])
                                 data[selected_mass[ispec]].resize(count+1)
-                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"]
+                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"][0]
                         
                     status = artio_particle_read_species_end( self.handle )
                     check_artio_status(status)

diff -r 382704af8a3f86e440e3d6a3bd25d8e4671e3412 -r 20a074192f9257cc0774b09d67a6921e04a9bcac yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -295,12 +295,12 @@
 
 #add_artio_field("creation_time", function=NullFunc, particle_type=True)
 def _particle_age(field, data):
-    pa = b2t(data['creation_time'])
+    pa = b2t(data['stars','creation_time'])*1e9*31556926
 #    tr = np.zeros(pa.shape,dtype='float')-1.0
 #    tr[pa>0] = pa[pa>0]
     tr = pa
     return tr
-add_field("particle_age", function=_particle_age, units=r"\rm{s}",
+add_field(("stars","particle_age"), function=_particle_age, units=r"\rm{s}",
           particle_type=True)
 
 
@@ -416,9 +416,9 @@
 
 def b2t(tb, n=1e2, logger=None, **kwargs):
     tb = np.array(tb)
-    if isinstance(tb, 1.1):
-        return a2t(b2a(tb))
     if tb.shape == ():
+        return None 
+    if len(tb) == 1: 
         return a2t(b2a(tb))
     if len(tb) < n:
         n = len(tb)


https://bitbucket.org/yt_analysis/yt/commits/2314f9d71cc9/
Changeset:   2314f9d71cc9
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-08 05:36:49
Summary:     increase max hash table size (for isolated disk)
Affected #:  2 files

diff -r 20a074192f9257cc0774b09d67a6921e04a9bcac -r 2314f9d71cc900225418a03b4ec6a3842149d6de yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2504,7 +2504,7 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
             else:
                 total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles

diff -r 20a074192f9257cc0774b09d67a6921e04a9bcac -r 2314f9d71cc900225418a03b4ec6a3842149d6de yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -443,7 +443,7 @@
 	    /* Else, this slot was full, go to the next one */
 	    hp++;
 	    if (hp>=smx->hash+smx->nHashLength) hp = smx->hash;
-	    if (++count>1000) {
+	    if (++count>1000000) {
 		fprintf(stderr,"Hash Table is too full.\n");
 		exit(1);
 	    }


https://bitbucket.org/yt_analysis/yt/commits/c8df14c9eaea/
Changeset:   c8df14c9eaea
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-15 16:02:33
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  3 files

diff -r 2314f9d71cc900225418a03b4ec6a3842149d6de -r c8df14c9eaea914a66426ec2063672bda16b5d3a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -55,7 +55,7 @@
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
         if len(ptypes) > 1: raise NotImplementedError
-        pfields = [(ptypes[0], "position_%s" % ax) for ax in 'xyz']
+        pfields = [(ptypes[0], "particle_position_%s" % ax) for ax in 'xyz']
         size = 0
         for chunk in chunks:
             data = self._read_chunk_data(chunk, pfields, 'active', 

diff -r 2314f9d71cc900225418a03b4ec6a3842149d6de -r c8df14c9eaea914a66426ec2063672bda16b5d3a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -116,6 +116,9 @@
             self.particle_field_offsets = {}
             return
         f = open(self.part_fn, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
         hvals = {}
         attrs = ( ('ncpu', 1, 'I'),
                   ('ndim', 1, 'I'),
@@ -143,12 +146,15 @@
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
-        field_offsets = {particle_fields[0][0]: f.tell()}
-        for field, vtype in particle_fields[1:]:
+        field_offsets = {}
+        _pfields = {}
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets[field] = f.tell()
+            _pfields[field] = vtype
             fpu.skip(f, 1)
-            field_offsets[field] = f.tell()
         self.particle_field_offsets = field_offsets
-        self.particle_field_types = dict(particle_fields)
+        self.particle_field_types = _pfields
 
     def _read_amr_header(self):
         hvals = {}

diff -r 2314f9d71cc900225418a03b4ec6a3842149d6de -r c8df14c9eaea914a66426ec2063672bda16b5d3a yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -91,6 +91,8 @@
     "particle_mass",
     "particle_identifier",
     "particle_refinement_level",
+    "particle_age",
+    "particle_metallicity",
 ]
 
 for f in known_ramses_particle_fields:


https://bitbucket.org/yt_analysis/yt/commits/5154fec5bf32/
Changeset:   5154fec5bf32
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-15 16:20:32
Summary:     current_time, smallest_dx, b2t fixes. b2t now in seconds
Affected #:  1 file

diff -r 2314f9d71cc900225418a03b4ec6a3842149d6de -r 5154fec5bf3280048fd25d5be4f094a0a03681d0 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -32,7 +32,7 @@
     artio_is_valid, artio_fileset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
-from .fields import ARTIOFieldInfo, KnownARTIOFields
+from .fields import ARTIOFieldInfo, KnownARTIOFields, b2t
 
 from yt.funcs import *
 from yt.geometry.geometry_handler import \
@@ -145,7 +145,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return (self.parameter_file.domain_width/(2**self.max_level)).min()
+        return  1.0/(2**self.max_level)
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]
@@ -391,7 +391,7 @@
             list(set(art_to_yt[s] for s in
                      self.artio_parameters["particle_species_labels"])))
 
-        self.current_time = self.artio_parameters["tl"][0]
+        self.current_time = b2t(self.artio_parameters["tl"][0])
 
         # detect cosmology
         if "abox" in self.artio_parameters:


https://bitbucket.org/yt_analysis/yt/commits/ec3681a46d72/
Changeset:   ec3681a46d72
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-15 16:25:52
Summary:     oops b2t fix
Affected #:  1 file

diff -r 5154fec5bf3280048fd25d5be4f094a0a03681d0 -r ec3681a46d72d80cf2b296032f292608dcc02052 yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -295,7 +295,7 @@
 
 #add_artio_field("creation_time", function=NullFunc, particle_type=True)
 def _particle_age(field, data):
-    pa = b2t(data['stars','creation_time'])*1e9*31556926
+    pa = b2t(data['stars','creation_time'])
 #    tr = np.zeros(pa.shape,dtype='float')-1.0
 #    tr[pa>0] = pa[pa>0]
     tr = pa
@@ -416,10 +416,10 @@
 
 def b2t(tb, n=1e2, logger=None, **kwargs):
     tb = np.array(tb)
+    if len(np.atleast_1d(tb)) == 1: 
+        return a2t(b2a(tb))
     if tb.shape == ():
         return None 
-    if len(tb) == 1: 
-        return a2t(b2a(tb))
     if len(tb) < n:
         n = len(tb)
     age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
@@ -434,7 +434,7 @@
     ages = np.array(ages)
     fb2t = np.interp(tb, tbs, ages)
     #fb2t = interp1d(tbs,ages)
-    return fb2t
+    return fb2t*1e9*31556926
 
 
 def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):


https://bitbucket.org/yt_analysis/yt/commits/94eeca505250/
Changeset:   94eeca505250
Branch:      yt-3.0
User:        sleitner
Date:        2013-04-15 17:10:01
Summary:     merge
Affected #:  3 files

diff -r ec3681a46d72d80cf2b296032f292608dcc02052 -r 94eeca5052506364a8db0afcc76d53ed2f36ff7b yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -55,7 +55,7 @@
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
         if len(ptypes) > 1: raise NotImplementedError
-        pfields = [(ptypes[0], "position_%s" % ax) for ax in 'xyz']
+        pfields = [(ptypes[0], "particle_position_%s" % ax) for ax in 'xyz']
         size = 0
         for chunk in chunks:
             data = self._read_chunk_data(chunk, pfields, 'active', 

diff -r ec3681a46d72d80cf2b296032f292608dcc02052 -r 94eeca5052506364a8db0afcc76d53ed2f36ff7b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -116,6 +116,9 @@
             self.particle_field_offsets = {}
             return
         f = open(self.part_fn, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
         hvals = {}
         attrs = ( ('ncpu', 1, 'I'),
                   ('ndim', 1, 'I'),
@@ -143,12 +146,15 @@
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
-        field_offsets = {particle_fields[0][0]: f.tell()}
-        for field, vtype in particle_fields[1:]:
+        field_offsets = {}
+        _pfields = {}
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets[field] = f.tell()
+            _pfields[field] = vtype
             fpu.skip(f, 1)
-            field_offsets[field] = f.tell()
         self.particle_field_offsets = field_offsets
-        self.particle_field_types = dict(particle_fields)
+        self.particle_field_types = _pfields
 
     def _read_amr_header(self):
         hvals = {}

diff -r ec3681a46d72d80cf2b296032f292608dcc02052 -r 94eeca5052506364a8db0afcc76d53ed2f36ff7b yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -91,6 +91,8 @@
     "particle_mass",
     "particle_identifier",
     "particle_refinement_level",
+    "particle_age",
+    "particle_metallicity",
 ]
 
 for f in known_ramses_particle_fields:


https://bitbucket.org/yt_analysis/yt/commits/b75275862000/
Changeset:   b75275862000
Branch:      yt-3.0
User:        drudd
Date:        2013-03-19 03:26:28
Summary:     Removed size and shape parameters from YTSelectionContainer, made size a property, and stopped caching data_size
Affected #:  1 file

diff -r dba8e90381b5be3b028790fc00201cca02fd936c -r b752758620000799b9c38818ddc8be4301088aa0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -424,8 +424,6 @@
     _sort_by = None
     _selector = None
     _current_chunk = None
-    size = None
-    shape = None
 
     def __init__(self, *args, **kwargs):
         super(YTSelectionContainer, self).__init__(*args, **kwargs)
@@ -522,17 +520,19 @@
         # There are several items that need to be swapped out
         # field_data, size, shape
         old_field_data, self.field_data = self.field_data, YTFieldData()
-        old_size, self.size = self.size, chunk.data_size
         old_chunk, self._current_chunk = self._current_chunk, chunk
         old_locked, self._locked = self._locked, False
-        #self.shape = (self.size,)
         yield
         self.field_data = old_field_data
-        self.size = old_size
-        #self.shape = (old_size,)
         self._current_chunk = old_chunk
         self._locked = old_locked
 
+    @property   
+    def size(self) :
+        if self._current_chunk is None :
+            self.hierarchy._identify_base_chunk(self)
+        return self_current_chunk.data_size
+
     @property
     def icoords(self):
         if self._current_chunk is None:


https://bitbucket.org/yt_analysis/yt/commits/fb14dd338289/
Changeset:   fb14dd338289
Branch:      yt-3.0
User:        drudd
Date:        2013-03-19 06:16:02
Summary:     Trying to remove accesses to data.shape, data.size
Affected #:  2 files

diff -r b752758620000799b9c38818ddc8be4301088aa0 -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -527,11 +527,12 @@
         self._current_chunk = old_chunk
         self._locked = old_locked
 
-    @property   
-    def size(self) :
-        if self._current_chunk is None :
-            self.hierarchy._identify_base_chunk(self)
-        return self_current_chunk.data_size
+#    @property   
+#    def size(self) :
+#        if self._current_chunk is None :
+##            self.hierarchy._identify_base_chunk(self)
+#            return 0
+#        return self._current_chunk.data_size
 
     @property
     def icoords(self):

diff -r b752758620000799b9c38818ddc8be4301088aa0 -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -95,7 +95,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)


https://bitbucket.org/yt_analysis/yt/commits/3a1a2a954578/
Changeset:   3a1a2a954578
Branch:      yt-3.0
User:        drudd
Date:        2013-04-04 22:43:48
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  25 files

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -5,11 +5,16 @@
 hdf5.cfg
 png.cfg
 yt_updater.log
+yt/frontends/artio/_artio_caller.c
 yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
+yt/geometry/oct_container.c
+yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1059,7 +1059,7 @@
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True):
+    def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -210,6 +210,8 @@
         """
         Deletes a field
         """
+        if key  not in self.field_data:
+            key = self._determine_fields(key)[0]
         del self.field_data[key]
 
     def _generate_field(self, field):

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -678,3 +678,37 @@
     return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)
+
+def _ParticleDensityCenter(data,nbins=3,particle_type="all"):
+    """
+    Find the center of the particle density
+    by histogramming the particles iteratively.
+    """
+    pos = [data[(particle_type,"particle_position_%s"%ax)] for ax in "xyz"]
+    pos = np.array(pos).T
+    mas = data[(particle_type,"particle_mass")]
+    calc_radius= lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1))
+    density = 0
+    if pos.shape[0]==0:
+        return -1.0,[-1.,-1.,-1.]
+    while pos.shape[0] > 1:
+        table,bins=np.histogramdd(pos,bins=nbins, weights=mas)
+        bin_size = min((np.max(bins,axis=1)-np.min(bins,axis=1))/nbins)
+        centeridx = np.where(table==table.max())
+        le = np.array([bins[0][centeridx[0][0]],
+                       bins[1][centeridx[1][0]],
+                       bins[2][centeridx[2][0]]])
+        re = np.array([bins[0][centeridx[0][0]+1],
+                       bins[1][centeridx[1][0]+1],
+                       bins[2][centeridx[2][0]+1]])
+        center = 0.5*(le+re)
+        idx = calc_radius(pos,center)<bin_size
+        pos, mas = pos[idx],mas[idx]
+        density = max(density,mas.sum()/bin_size**3.0)
+    return density, center
+def _combParticleDensityCenter(data,densities,centers):
+    i = np.argmax(densities)
+    return densities[i],centers[i]
+
+add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
+             combine_function=_combParticleDensityCenter,n_ret=2)

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -33,6 +33,7 @@
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
+    pf.omega_matter = 0.27
     for unit in mpc_conversion:
         pf.units[unit+'h'] = pf.units[unit]
         pf.units[unit+'cm'] = pf.units[unit]
@@ -72,7 +73,7 @@
         if not field.particle_type:
             assert_equal(v1, dd1["gas", self.field_name])
         if not needs_spatial:
-            assert_equal(v1, conv*field._function(field, dd2))
+            assert_array_almost_equal_nulp(v1, conv*field._function(field, dd2), 4)
         if not skip_grids:
             for g in pf.h.grids:
                 g.field_parameters.update(_sample_parameters)
@@ -80,7 +81,7 @@
                 v1 = g[self.field_name]
                 g.clear_data()
                 g.field_parameters.update(_sample_parameters)
-                assert_equal(v1, conv*field._function(field, g))
+                assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
 
 def test_all_fields():
     for field in FieldInfo:

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1,4 +1,5 @@
 """
+
 The basic field info container resides here.  These classes, code specific and
 universal, are the means by which we access fields across YT, both derived and
 native.
@@ -754,8 +755,9 @@
     for i, ax in enumerate('xyz'):
         np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
         if data.pf.periodicity[i] == True:
-            np.subtract(DW[i], r, rdw)
             np.abs(r, r)
+            np.subtract(r, DW[i], rdw)
+            np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
         np.power(r, 2.0, r)
         np.add(radius, r, radius)
@@ -946,7 +948,7 @@
                  data["particle_position_x"].size,
                  blank, np.array(data.LeftEdge).astype(np.float64),
                  np.array(data.ActiveDimensions).astype(np.int32),
-                 np.float64(data['dx']))
+                 just_one(data['dx']))
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,6 +30,8 @@
 import stat
 import weakref
 import cStringIO
+import difflib
+import glob
 
 from yt.funcs import *
 from yt.geometry.oct_geometry_handler import \
@@ -37,9 +39,9 @@
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
-      StaticOutput
+    StaticOutput
 from yt.geometry.oct_container import \
-    RAMSESOctreeContainer
+    ARTOctreeContainer
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 from .fields import \
@@ -52,20 +54,15 @@
     get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-from .definitions import *
-from .io import _read_frecord
-from .io import _read_record
-from .io import _read_struct
+from yt.frontends.art.definitions import *
+from yt.utilities.fortran_utils import *
 from .io import _read_art_level_info
 from .io import _read_child_mask_level
 from .io import _read_child_level
 from .io import _read_root_level
-from .io import _read_record_size
-from .io import _skip_record
 from .io import _count_art_octs
 from .io import b2t
 
-
 import yt.frontends.ramses._ramses_reader as _ramses_reader
 
 from .fields import ARTFieldInfo, KnownARTFields
@@ -80,13 +77,9 @@
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
+
 class ARTGeometryHandler(OctreeGeometryHandler):
-    def __init__(self,pf,data_style="art"):
-        """
-        Life is made simpler because we only have one AMR file
-        and one domain. However, we are matching to the RAMSES
-        multi-domain architecture.
-        """
+    def __init__(self, pf, data_style="art"):
         self.fluid_field_list = fluid_fields
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
@@ -94,7 +87,16 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.max_level = pf.max_level
         self.float_type = np.float64
-        super(ARTGeometryHandler,self).__init__(pf,data_style)
+        super(ARTGeometryHandler, self).__init__(pf, data_style)
+
+    def get_smallest_dx(self):
+        """
+        Returns (in code units) the smallest cell size in the simulation.
+        """
+        # Overloaded
+        pf = self.parameter_file
+        return (1.0/pf.domain_dimensions.astype('f8') /
+                (2**self.max_level)).min()
 
     def _initialize_oct_handler(self):
         """
@@ -102,23 +104,37 @@
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.domains = [ARTDomainFile(self.parameter_file,1,nv)]
+        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l)
+                        for l in range(self.pf.max_level)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
-        self.oct_handler = RAMSESOctreeContainer(
-            self.parameter_file.domain_dimensions/2, #dd is # of root cells
+        self.oct_handler = ARTOctreeContainer(
+            self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
         for domain in self.domains:
-            domain._read_amr(self.oct_handler)
+            if domain.domain_level == 0:
+                domain._read_amr_root(self.oct_handler)
+            else:
+                domain._read_amr_level(self.oct_handler)
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields + particle_star_fields)
+        self.field_list = set(fluid_fields + particle_fields +
+                              particle_star_fields)
         self.field_list = list(self.field_list)
-    
+        # now generate all of the possible particle fields
+        if "wspecies" in self.parameter_file.parameters.keys():
+            wspecies = self.parameter_file.parameters['wspecies']
+            nspecies = len(wspecies)
+            self.parameter_file.particle_types = ["all", "darkmatter", "stars"]
+            for specie in range(nspecies):
+                self.parameter_file.particle_types.append("specie%i" % specie)
+        else:
+            self.parameter_file.particle_types = []
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         super(ARTGeometryHandler, self)._setup_classes(dd)
@@ -127,19 +143,23 @@
     def _identify_base_chunk(self, dobj):
         """
         Take the passed in data source dobj, and use its embedded selector
-        to calculate the domain mask, build the reduced domain 
+        to calculate the domain mask, build the reduced domain
         subsets and oct counts. Attach this information to dobj.
         """
         if getattr(dobj, "_chunk_info", None) is None:
-            #Get all octs within this oct handler
+            # Get all octs within this oct handler
             mask = dobj.selector.select_octs(self.oct_handler)
-            if mask.sum()==0:
+            if mask.sum() == 0:
                 mylog.debug("Warning: selected zero octs")
             counts = self.oct_handler.count_cells(dobj.selector, mask)
-            #For all domains, figure out how many counts we have 
-            #and build a subset=mask of domains 
-            subsets = [ARTDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
+            # For all domains, figure out how many counts we have
+            # and build a subset=mask of domains
+            subsets = []
+            for d, c in zip(self.domains, counts):
+                if c < 1:
+                    continue
+                subset = ARTDomainSubset(d, mask, c, d.domain_level)
+                subsets.append(subset)
             dobj._chunk_info = subsets
             dobj.size = sum(counts)
             dobj.shape = (dobj.size,)
@@ -147,8 +167,8 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        #We pass the chunk both the current chunk and list of chunks,
-        #as well as the referring data source
+        # We pass the chunk both the current chunk and list of chunks,
+        # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
     def _chunk_spatial(self, dobj, ngz):
@@ -157,7 +177,7 @@
     def _chunk_io(self, dobj):
         """
         Since subsets are calculated per domain,
-        i.e. per file, yield each domain at a time to 
+        i.e. per file, yield each domain at a time to
         organize by IO. We will eventually chunk out NMSU ART
         to be level-by-level.
         """
@@ -165,77 +185,66 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
+
 class ARTStaticOutput(StaticOutput):
     _hierarchy_class = ARTGeometryHandler
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
 
-    def __init__(self,filename,data_style='art',
-                 fields = None, storage_filename = None,
-                 skip_particles=False,skip_stars=False,
-                 limit_level=None,spread_age=True):
+    def __init__(self, filename, data_style='art',
+                 fields=None, storage_filename=None,
+                 skip_particles=False, skip_stars=False,
+                 limit_level=None, spread_age=True,
+                 force_max_level=None, file_particle_header=None,
+                 file_particle_data=None, file_particle_stars=None):
         if fields is None:
             fields = fluid_fields
         filename = os.path.abspath(filename)
         self._fields_in_file = fields
+        self._file_amr = filename
+        self._file_particle_header = file_particle_header
+        self._file_particle_data = file_particle_data
+        self._file_particle_stars = file_particle_stars
         self._find_files(filename)
-        self.file_amr = filename
         self.parameter_filename = filename
         self.skip_particles = skip_particles
         self.skip_stars = skip_stars
         self.limit_level = limit_level
         self.max_level = limit_level
+        self.force_max_level = force_max_level
         self.spread_age = spread_age
-        self.domain_left_edge = np.zeros(3,dtype='float')
-        self.domain_right_edge = np.zeros(3,dtype='float')+1.0
-        StaticOutput.__init__(self,filename,data_style)
+        self.domain_left_edge = np.zeros(3, dtype='float')
+        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
+        StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-    def _find_files(self,file_amr):
+    def _find_files(self, file_amr):
         """
         Given the AMR base filename, attempt to find the
         particle header, star files, etc.
         """
-        prefix,suffix = filename_pattern['amr'].split('%s')
-        affix = os.path.basename(file_amr).replace(prefix,'')
-        affix = affix.replace(suffix,'')
-        affix = affix.replace('_','')
-        full_affix = affix
-        affix = affix[1:-1]
-        dirname = os.path.dirname(file_amr)
-        for fp in (filename_pattern_hf,filename_pattern):
-            for filetype, pattern in fp.items():
-                #if this attribute is already set skip it
-                if getattr(self,"file_"+filetype,None) is not None:
-                    continue
-                #sometimes the affix is surrounded by an extraneous _
-                #so check for an extra character on either side
-                check_filename = dirname+'/'+pattern%('?%s?'%affix)
-                filenames = glob.glob(check_filename)
-                if len(filenames)>1:
-                    check_filename_strict = \
-                            dirname+'/'+pattern%('?%s'%full_affix[1:])
-                    filenames = glob.glob(check_filename_strict)
-                
-                if len(filenames)==1:
-                    setattr(self,"file_"+filetype,filenames[0])
-                    mylog.info('discovered %s:%s',filetype,filenames[0])
-                elif len(filenames)>1:
-                    setattr(self,"file_"+filetype,None)
-                    mylog.info("Ambiguous number of files found for %s",
-                            check_filename)
-                    for fn in filenames:
-                        faffix = float(affix)
-                else:
-                    setattr(self,"file_"+filetype,None)
+        base_prefix, base_suffix = filename_pattern['amr']
+        possibles = glob.glob(os.path.dirname(file_amr)+"/*")
+        for filetype, (prefix, suffix) in filename_pattern.iteritems():
+            # if this attribute is already set skip it
+            if getattr(self, "_file_"+filetype, None) is not None:
+                continue
+            stripped = file_amr.replace(base_prefix, prefix)
+            stripped = stripped.replace(base_suffix, suffix)
+            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            if match is not None:
+                mylog.info('discovered %s:%s', filetype, match)
+                setattr(self, "_file_"+filetype, match)
+            else:
+                setattr(self, "_file_"+filetype, None)
 
     def __repr__(self):
-        return self.file_amr.rsplit(".",1)[0]
+        return self._file_amr.split('/')[-1]
 
     def _set_units(self):
         """
-        Generates the conversion to various physical units based 
-		on the parameters from the header
+        Generates the conversion to various physical units based
+                on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
@@ -243,9 +252,9 @@
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0
 
-        #spatial units
-        z   = self.current_redshift
-        h   = self.hubble_constant
+        # spatial units
+        z = self.current_redshift
+        h = self.hubble_constant
         boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
@@ -256,55 +265,59 @@
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
 
-        #all other units
+        # all other units
         wmu = self.parameters["wmu"]
         Om0 = self.parameters['Om0']
-        ng  = self.parameters['ng']
+        ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
-        boxh   = self.parameters['boxh'] 
-        aexpn  = self.parameters["aexpn"]
+        boxh = self.parameters['boxh']
+        aexpn = self.parameters["aexpn"]
         hubble = self.parameters['hubble']
 
         cf = defaultdict(lambda: 1.0)
         r0 = boxh/ng
-        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        P0 = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0  # [K]
         S_0 = 52.077 * wmu**(5.0/3.0)
         S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        # v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         v0 = 50.0*r0*np.sqrt(Om0)
         t0 = r0/v0
         rho1 = 1.8791e-29 * hubble**2.0 * self.omega_matter
         rho0 = 2.776e11 * hubble**2.0 * Om0
-        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        tr = 2./3. * (3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))
         aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        cf['r0']=r0
-        cf['P0']=P0
-        cf['T_0']=T_0
-        cf['S_0']=S_0
-        cf['v0']=v0
-        cf['t0']=t0
-        cf['rho0']=rho0
-        cf['rho1']=rho1
-        cf['tr']=tr
-        cf['aM0']=aM0
+        cf['r0'] = r0
+        cf['P0'] = P0
+        cf['T_0'] = T_0
+        cf['S_0'] = S_0
+        cf['v0'] = v0
+        cf['t0'] = t0
+        cf['rho0'] = rho0
+        cf['rho1'] = rho1
+        cf['tr'] = tr
+        cf['aM0'] = aM0
 
-        #factors to multiply the native code units to CGS
-        cf['Pressure'] = P0 #already cgs
-        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        # factors to multiply the native code units to CGS
+        cf['Pressure'] = P0  # already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5  # proper cm/s
         cf["Mass"] = aM0 * 1.98892e33
         cf["Density"] = rho1*(aexpn**-3.0)
         cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
         cf["Potential"] = 1.0
         cf["Entropy"] = S_0
         cf["Temperature"] = tr
+        cf["Time"] = 1.0
+        cf["particle_mass"] = cf['Mass']
+        cf["particle_mass_initial"] = cf['Mass']
         self.cosmological_simulation = True
         self.conversion_factors = cf
-        
-        for particle_field in particle_fields:
-            self.conversion_factors[particle_field] =  1.0
+
         for ax in 'xyz':
             self.conversion_factors["%s-velocity" % ax] = 1.0
+        for pt in particle_fields:
+            if pt not in self.conversion_factors.keys():
+                self.conversion_factors[pt] = 1.0
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
 
@@ -320,72 +333,89 @@
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters.update(constants)
-        #read the amr header
-        with open(self.file_amr,'rb') as f:
-            amr_header_vals = _read_struct(f,amr_header_struct)
-            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-                _skip_record(f)
-            (self.ncell,) = struct.unpack('>l', _read_record(f))
+        self.parameters['Time'] = 1.0
+        # read the amr header
+        with open(self._file_amr, 'rb') as f:
+            amr_header_vals = read_attrs(f, amr_header_struct, '>')
+            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
+                skipped = skip(f, endian='>')
+            (self.ncell) = read_vector(f, 'i', '>')[0]
             # Try to figure out the root grid dimensions
             est = int(np.rint(self.ncell**(1.0/3.0)))
             # Note here: this is the number of *cells* on the root grid.
             # This is not the same as the number of Octs.
-            #domain dimensions is the number of root *cells*
+            # domain dimensions is the number of root *cells*
             self.domain_dimensions = np.ones(3, dtype='int64')*est
             self.root_grid_mask_offset = f.tell()
             self.root_nocts = self.domain_dimensions.prod()/8
             self.root_ncells = self.root_nocts*8
-            mylog.debug("Estimating %i cells on a root grid side,"+ \
-                        "%i root octs",est,self.root_nocts)
-            self.root_iOctCh = _read_frecord(f,'>i')[:self.root_ncells]
+            mylog.debug("Estimating %i cells on a root grid side," +
+                        "%i root octs", est, self.root_nocts)
+            self.root_iOctCh = read_vector(f, 'i', '>')[:self.root_ncells]
             self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
-                 order='F')
+                                                        order='F')
             self.root_grid_offset = f.tell()
-            #_skip_record(f) # hvar
-            #_skip_record(f) # var
-            self.root_nhvar = _read_frecord(f,'>f',size_only=True)
-            self.root_nvar  = _read_frecord(f,'>f',size_only=True)
-            #make sure that the number of root variables is a multiple of rootcells
-            assert self.root_nhvar%self.root_ncells==0
-            assert self.root_nvar%self.root_ncells==0
-            self.nhydro_variables = ((self.root_nhvar+self.root_nvar)/ 
-                                    self.root_ncells)
-            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.root_nhvar = skip(f, endian='>')
+            self.root_nvar = skip(f, endian='>')
+            # make sure that the number of root variables is a multiple of
+            # rootcells
+            assert self.root_nhvar % self.root_ncells == 0
+            assert self.root_nvar % self.root_ncells == 0
+            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
+                                     self.root_ncells)
+            self.iOctFree, self.nOct = read_vector(f, 'i', '>')
             self.child_grid_offset = f.tell()
             self.parameters.update(amr_header_vals)
             self.parameters['ncell0'] = self.parameters['ng']**3
-        #read the particle header
-        if not self.skip_particles and self.file_particle_header:
-            with open(self.file_particle_header,"rb") as fh:
-                particle_header_vals = _read_struct(fh,particle_header_struct)
+            # estimate the root level
+            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
+                f,
+                [0, self.child_grid_offset], 1,
+                coarse_grid=self.domain_dimensions[0])
+            del float_center, fl, iocts, nocts
+            self.root_level = root_level
+            mylog.info("Using root level of %02i", self.root_level)
+        # read the particle header
+        if not self.skip_particles and self._file_particle_header:
+            with open(self._file_particle_header, "rb") as fh:
+                particle_header_vals = read_attrs(
+                    fh, particle_header_struct, '>')
                 fh.seek(seek_extras)
                 n = particle_header_vals['Nspecies']
-                wspecies = np.fromfile(fh,dtype='>f',count=10)
-                lspecies = np.fromfile(fh,dtype='>i',count=10)
+                wspecies = np.fromfile(fh, dtype='>f', count=10)
+                lspecies = np.fromfile(fh, dtype='>i', count=10)
             self.parameters['wspecies'] = wspecies[:n]
             self.parameters['lspecies'] = lspecies[:n]
             ls_nonzero = np.diff(lspecies)[:n-1]
-            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            self.star_type = len(ls_nonzero)
+            mylog.info("Discovered %i species of particles", len(ls_nonzero))
             mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                *ls_nonzero)
-            for k,v in particle_header_vals.items():
+                       *ls_nonzero)
+            for k, v in particle_header_vals.items():
                 if k in self.parameters.keys():
                     if not self.parameters[k] == v:
-                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
-                                   self.parameters[k])
+                        mylog.info(
+                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
+                            self.parameters[k])
                 else:
-                    self.parameters[k]=v
+                    self.parameters[k] = v
             self.parameters_particles = particle_header_vals
-    
-        #setup standard simulation params yt expects to see
+
+        # setup standard simulation params yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.omega_lambda = amr_header_vals['Oml0']
         self.omega_matter = amr_header_vals['Om0']
         self.hubble_constant = amr_header_vals['hubble']
         self.min_level = amr_header_vals['min_level']
         self.max_level = amr_header_vals['max_level']
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
+        if self.limit_level is not None:
+            self.max_level = min(
+                self.limit_level, amr_header_vals['max_level'])
+        if self.force_max_level is not None:
+            self.max_level = self.force_max_level
+        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
         self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+        mylog.info("Max level is %02i", self.max_level)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -393,20 +423,24 @@
         Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
-        fn = ("%s" % (os.path.basename(args[0])))
         f = ("%s" % args[0])
-        prefix, suffix = filename_pattern['amr'].split('%s')
-        if fn.endswith(suffix) and fn.startswith(prefix) and\
-                os.path.exists(f): 
+        prefix, suffix = filename_pattern['amr']
+        with open(f, 'rb') as fh:
+            try:
+                amr_header_vals = read_attrs(fh, amr_header_struct, '>')
                 return True
+            except AssertionError:
+                return False
         return False
 
+
 class ARTDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
+    def __init__(self, domain, mask, cell_count, domain_level):
         self.mask = mask
         self.domain = domain
         self.oct_handler = domain.pf.h.oct_handler
         self.cell_count = cell_count
+        self.domain_level = domain_level
         level_counts = self.oct_handler.count_levels(
             self.domain.pf.max_level, self.domain.domain_id, mask)
         assert(level_counts.sum() == cell_count)
@@ -432,12 +466,12 @@
     def select_fwidth(self, dobj):
         base_dx = 1.0/self.domain.pf.domain_dimensions
         widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
+        dds = (2**self.select_ires(dobj))
         for i in range(3):
-            widths[:,i] = base_dx[i] / dds
+            widths[:, i] = base_dx[i] / dds
         return widths
 
-    def fill(self, content, fields):
+    def fill_root(self, content, ftfields):
         """
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
@@ -446,135 +480,153 @@
         the order they are in in the octhandler.
         """
         oct_handler = self.oct_handler
-        all_fields  = self.domain.pf.h.fluid_field_list
-        fields = [f for ft, f in fields]
-        dest= {}
-        filled = pos = level_offset = 0
+        all_fields = self.domain.pf.h.fluid_field_list
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
         field_idxs = [all_fields.index(f) for f in fields]
+        dest = {}
         for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')
-        for level, offset in enumerate(self.domain.level_offsets):
-            no = self.domain.level_count[level]
-            if level==0:
-                data = _read_root_level(content,self.domain.level_child_offsets,
-                                       self.domain.level_count)
-                data = data[field_idxs,:]
-            else:
-                data = _read_child_level(content,self.domain.level_child_offsets,
-                                         self.domain.level_offsets,
-                                         self.domain.level_count,level,fields,
-                                         self.domain.pf.domain_dimensions,
-                                         self.domain.pf.parameters['ncell0'])
-            source= {}
-            for i,field in enumerate(fields):
-                source[field] = np.empty((no, 8), dtype="float64")
-                source[field][:,:] = np.reshape(data[i,:],(no,8))
-            level_offset += oct_handler.fill_level(self.domain.domain_id, 
-                                   level, dest, source, self.mask, level_offset)
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        source = {}
+        data = _read_root_level(content, self.domain.level_child_offsets,
+                                self.domain.level_count)
+        for field, i in zip(fields, field_idxs):
+            temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
+                              order='F').astype('float64').T
+            source[field] = temp
+        level_offset += oct_handler.fill_level_from_grid(
+            self.domain.domain_id,
+            level, dest, source, self.mask, level_offset)
         return dest
 
+    def fill_level(self, content, ftfields):
+        oct_handler = self.oct_handler
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
+        dest = {}
+        for field in fields:
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        no = self.domain.level_count[level]
+        noct_range = [0, no]
+        source = _read_child_level(
+            content, self.domain.level_child_offsets,
+            self.domain.level_offsets,
+            self.domain.level_count, level, fields,
+            self.domain.pf.domain_dimensions,
+            self.domain.pf.parameters['ncell0'],
+            noct_range=noct_range)
+        nocts_filling = noct_range[1]-noct_range[0]
+        level_offset += oct_handler.fill_level(self.domain.domain_id,
+                                               level, dest, source,
+                                               self.mask, level_offset,
+                                               noct_range[0],
+                                               nocts_filling)
+        return dest
+
+
 class ARTDomainFile(object):
     """
     Read in the AMR, left/right edges, fill out the octhandler
     """
-    #We already read in the header in static output,
-    #and since these headers are defined in only a single file it's
-    #best to leave them in the static output
+    # We already read in the header in static output,
+    # and since these headers are defined in only a single file it's
+    # best to leave them in the static output
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self,pf,domain_id,nvar):
+    def __init__(self, pf, domain_id, nvar, level):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
+        self.domain_level = level
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
 
     @property
     def level_count(self):
-        #this is number of *octs*
-        if self._level_count is not None: return self._level_count
+        # this is number of *octs*
+        if self._level_count is not None:
+            return self._level_count
         self.level_offsets
-        return self._level_count
+        return self._level_count[self.domain_level]
 
     @property
     def level_child_offsets(self):
-        if self._level_count is not None: return self._level_child_offsets
+        if self._level_count is not None:
+            return self._level_child_offsets
         self.level_offsets
         return self._level_child_offsets
 
     @property
-    def level_offsets(self): 
-        #this is used by the IO operations to find the file offset,
-        #and then start reading to fill values
-        #note that this is called hydro_offset in ramses
-        if self._level_oct_offsets is not None: 
+    def level_offsets(self):
+        # this is used by the IO operations to find the file offset,
+        # and then start reading to fill values
+        # note that this is called hydro_offset in ramses
+        if self._level_oct_offsets is not None:
             return self._level_oct_offsets
         # We now have to open the file and calculate it
-        f = open(self.pf.file_amr, "rb")
+        f = open(self.pf._file_amr, "rb")
         nhydrovars, inoll, _level_oct_offsets, _level_child_offsets = \
             _count_art_octs(f,  self.pf.child_grid_offset, self.pf.min_level,
                             self.pf.max_level)
-        #remember that the root grid is by itself; manually add it back in
+        # remember that the root grid is by itself; manually add it back in
         inoll[0] = self.pf.domain_dimensions.prod()/8
         _level_child_offsets[0] = self.pf.root_grid_offset
         self.nhydrovars = nhydrovars
-        self.inoll = inoll #number of octs
+        self.inoll = inoll  # number of octs
         self._level_oct_offsets = _level_oct_offsets
         self._level_child_offsets = _level_child_offsets
         self._level_count = inoll
         return self._level_oct_offsets
-    
-    def _read_amr(self, oct_handler):
+
+    def _read_amr_level(self, oct_handler):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
+           For each oct, only the position, index, level and domain
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
         """
-        #on the root level we typically have 64^3 octs
-        #giving rise to 128^3 cells
-        #but on level 1 instead of 128^3 octs, we have 256^3 octs
-        #leave this code here instead of static output - it's memory intensive
         self.level_offsets
-        f = open(self.pf.file_amr, "rb")
-        #add the root *cell* not *oct* mesh
+        f = open(self.pf._file_amr, "rb")
+        level = self.domain_level
+        unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
+            f,
+            self._level_oct_offsets, level,
+            coarse_grid=self.pf.domain_dimensions[0],
+            root_level=self.pf.root_level)
+        nocts_check = oct_handler.add(self.domain_id, level, nocts,
+                                      unitary_center, self.domain_id)
+        assert(nocts_check == nocts)
+        mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
+                    nocts, level, oct_handler.nocts)
+
+    def _read_amr_root(self, oct_handler):
+        self.level_offsets
+        f = open(self.pf._file_amr, "rb")
+        # add the root *cell* not *oct* mesh
+        level = self.domain_level
         root_octs_side = self.pf.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
+        octs_side = NX*2**level
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
         LL = LE + root_dx/2.0
         RL = RE - root_dx/2.0
-        #compute floating point centers of root octs
-        root_fc= np.mgrid[LL[0]:RL[0]:NX[0]*1j,
-                          LL[1]:RL[1]:NX[1]*1j,
-                          LL[2]:RL[2]:NX[2]*1j ]
-        root_fc= np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(1, 0, root_octs_side**3,
+        # compute floating point centers of root octs
+        root_fc = np.mgrid[LL[0]:RL[0]:NX[0]*1j,
+                           LL[1]:RL[1]:NX[1]*1j,
+                           LL[2]:RL[2]:NX[2]*1j]
+        root_fc = np.vstack([p.ravel() for p in root_fc]).T
+        nocts_check = oct_handler.add(self.domain_id, level,
+                                      root_octs_side**3,
                                       root_fc, self.domain_id)
         assert(oct_handler.nocts == root_fc.shape[0])
-        nocts_added = root_fc.shape[0]
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                    root_octs_side**3, 0,nocts_added)
-        for level in xrange(1, self.pf.max_level+1):
-            left_index, fl, iocts, nocts,root_level = _read_art_level_info(f, 
-                self._level_oct_offsets,level,
-                coarse_grid=self.pf.domain_dimensions[0])
-            left_index/=2
-            #at least one of the indices should be odd
-            #assert np.sum(left_index[:,0]%2==1)>0
-            octs_side = NX*2**level
-            float_left_edge = left_index.astype("float64") / octs_side
-            float_center = float_left_edge + 0.5*1.0/octs_side
-            #all floatin unitary positions should fit inside the domain
-            assert np.all(float_center<1.0)
-            nocts_check = oct_handler.add(1,level, nocts, float_left_edge, self.domain_id)
-            nocts_added += nocts
-            assert(oct_handler.nocts == nocts_added)
-            mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                        nocts, level,nocts_added)
+                    root_octs_side**3, 0, oct_handler.nocts)
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:
@@ -585,8 +637,8 @@
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
+            if self._last_mask is None:
+                return 0
             return self._last_mask.sum()
         self.select(selector)
         return self.count(selector)
-

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -25,7 +25,10 @@
 
 """
 
-fluid_fields= [ 
+# If not otherwise specified, we are big endian
+endian = '>'
+
+fluid_fields = [
     'Density',
     'TotalEnergy',
     'XMomentumDensity',
@@ -40,32 +43,29 @@
     'PotentialOld'
 ]
 
-hydro_struct = [('pad1','>i'),('idc','>i'),('iOctCh','>i')]
+hydro_struct = [('pad1', '>i'), ('idc', '>i'), ('iOctCh', '>i')]
 for field in fluid_fields:
-    hydro_struct += (field,'>f'),
-hydro_struct += ('pad2','>i'),
+    hydro_struct += (field, '>f'),
+hydro_struct += ('pad2', '>i'),
 
-particle_fields= [
-    'particle_age',
+particle_fields = [
+    'particle_mass',  # stars have variable mass
     'particle_index',
-    'particle_mass',
-    'particle_mass_initial',
-    'particle_creation_time',
-    'particle_metallicity1',
-    'particle_metallicity2',
-    'particle_metallicity',
+    'particle_type',
     'particle_position_x',
     'particle_position_y',
     'particle_position_z',
     'particle_velocity_x',
     'particle_velocity_y',
     'particle_velocity_z',
-    'particle_type',
-    'particle_index'
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
 ]
 
 particle_star_fields = [
-    'particle_age',
     'particle_mass',
     'particle_mass_initial',
     'particle_creation_time',
@@ -74,110 +74,65 @@
     'particle_metallicity',
 ]
 
-filename_pattern = {				
-	'amr':'10MpcBox_csf512_%s.d',
-	'particle_header':'PMcrd%s.DAT',
-	'particle_data':'PMcrs0%s.DAT',
-	'particle_stars':'stars_%s.dat'
-}
 
-filename_pattern_hf = {				
-	'particle_header':'PMcrd_%s.DAT',
-	'particle_data':'PMcrs0_%s.DAT',
+filename_pattern = {
+    'amr': ['10MpcBox_', '.d'],
+    'particle_header': ['PMcrd', '.DAT'],
+    'particle_data': ['PMcrs', '.DAT'],
+    'particle_stars': ['stars', '.dat']
 }
 
 amr_header_struct = [
-    ('>i','pad byte'),
-    ('>256s','jname'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','istep'),
-    ('>d','t'),
-    ('>d','dt'),
-    ('>f','aexpn'),
-    ('>f','ainit'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','boxh'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','Omb0'),
-    ('>f','hubble'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','nextras'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','extra1'),
-    ('>f','extra2'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>256s','lextra'),
-    ('>256s','lextra'),
-    ('>i','pad byte'),
-    ('>i', 'pad byte'),
-    ('>i', 'min_level'),
-    ('>i', 'max_level'),
-    ('>i', 'pad byte'),
+    ('jname', 1, '256s'),
+    (('istep', 't', 'dt', 'aexpn', 'ainit'), 1, 'iddff'),
+    (('boxh', 'Om0', 'Oml0', 'Omb0', 'hubble'), 5, 'f'),
+    ('nextras', 1, 'i'),
+    (('extra1', 'extra2'), 2, 'f'),
+    ('lextra', 1, '512s'),
+    (('min_level', 'max_level'), 2, 'i')
 ]
 
-particle_header_struct =[
-    ('>i','pad'),
-    ('45s','header'), 
-    ('>f','aexpn'),
-    ('>f','aexp0'),
-    ('>f','amplt'),
-    ('>f','astep'),
-    ('>i','istep'),
-    ('>f','partw'),
-    ('>f','tintg'),
-    ('>f','Ekin'),
-    ('>f','Ekin1'),
-    ('>f','Ekin2'),
-    ('>f','au0'),
-    ('>f','aeu0'),
-    ('>i','Nrow'),
-    ('>i','Ngridc'),
-    ('>i','Nspecies'),
-    ('>i','Nseed'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','hubble'),
-    ('>f','Wp5'),
-    ('>f','Ocurv'),
-    ('>f','Omb0'),
-    ('>%ds'%(396),'extras'),
-    ('>f','unknown'),
-    ('>i','pad')
+particle_header_struct = [
+    (('header',
+     'aexpn', 'aexp0', 'amplt', 'astep',
+     'istep',
+     'partw', 'tintg',
+     'Ekin', 'Ekin1', 'Ekin2',
+     'au0', 'aeu0',
+     'Nrow', 'Ngridc', 'Nspecies', 'Nseed',
+     'Om0', 'Oml0', 'hubble', 'Wp5', 'Ocurv', 'Omb0',
+     'extras', 'unknown'),
+     1,
+     '45sffffi'+'fffffff'+'iiii'+'ffffff'+'396s'+'f')
 ]
 
 star_struct = [
-        ('>d',('tdum','adum')),
-        ('>i','nstars'),
-        ('>d',('ws_old','ws_oldi')),
-        ('>f','mass'),
-        ('>f','imass'),
-        ('>f','tbirth'),
-        ('>f','metallicity1'),
-        ('>f','metallicity2')
-        ]
+    ('>d', ('tdum', 'adum')),
+    ('>i', 'nstars'),
+    ('>d', ('ws_old', 'ws_oldi')),
+    ('>f', 'particle_mass'),
+    ('>f', 'particle_mass_initial'),
+    ('>f', 'particle_creation_time'),
+    ('>f', 'particle_metallicity1'),
+    ('>f', 'particle_metallicity2')
+]
 
 star_name_map = {
-        'particle_mass':'mass',
-        'particle_mass_initial':'imass',
-        'particle_age':'tbirth',
-        'particle_metallicity1':'metallicity1',
-        'particle_metallicity2':'metallicity2',
-        'particle_metallicity':'metallicity',
-        }
+    'particle_mass': 'mass',
+    'particle_mass_initial': 'imass',
+    'particle_creation_time': 'tbirth',
+    'particle_metallicity1': 'metallicity1',
+    'particle_metallicity2': 'metallicity2',
+    'particle_metallicity': 'metallicity',
+}
 
 constants = {
-    "Y_p":0.245,
-    "gamma":5./3.,
-    "T_CMB0":2.726,
-    "T_min":300.,
-    "ng":128,
-    "wmu":4.0/(8.0-5.0*0.245)
+    "Y_p": 0.245,
+    "gamma": 5./3.,
+    "T_CMB0": 2.726,
+    "T_min": 300.,
+    "ng": 128,
+    "wmu": 4.0/(8.0-5.0*0.245)
 }
 
 seek_extras = 137

diff -r fb14dd338289ba004f2d42bfe4f5e7f5160809c7 -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Chris Moody <matthewturk at gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -22,7 +24,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -35,210 +37,221 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import mass_sun_cgs
+from yt.frontends.art.definitions import *
 
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
-
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as np
+for f in fluid_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)])
 
-#these are just the hydro fields
-known_art_fields = [ 'Density','TotalEnergy',
-                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-                     'Pressure','Gamma','GasEnergy',
-                     'MetalDensitySNII', 'MetalDensitySNIa',
-                     'PotentialNew','PotentialOld']
-
-#Add the fields, then later we'll individually defined units and names
-for f in known_art_fields:
+for f in particle_fields:
     add_art_field(f, function=NullFunc, take_log=True,
-              validators = [ValidateDataField(f)])
-
-#Hydro Fields that are verified to be OK unit-wise:
-#Density
-#Temperature
-#metallicities
-#MetalDensity SNII + SNia
-
-#Hydro Fields that need to be tested:
-#TotalEnergy
-#XYZMomentum
-#Pressure
-#Gamma
-#GasEnergy
-#Potentials
-#xyzvelocity
-
-#Particle fields that are tested:
-#particle_position_xyz
-#particle_type
-#particle_index
-#particle_mass
-#particle_mass_initial
-#particle_age
-#particle_velocity
-#particle_metallicity12
-
-#Particle fields that are untested:
-#NONE
-
-#Other checks:
-#CellMassMsun == Density * CellVolume
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
 
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._convert_function = _convertDensity
 
 def _convertTotalEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
-KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._convert_function = _convertTotalEnergy
 
 def _convertXMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+KnownARTFields["XMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["XMomentumDensity"]._convert_function = _convertXMomentumDensity
 
 def _convertYMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+KnownARTFields["YMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["YMomentumDensity"]._convert_function = _convertYMomentumDensity
 
 def _convertZMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["ZMomentumDensity"]._convert_function = _convertZMomentumDensity
 
 def _convertPressure(data):
     return data.convert("Pressure")
-KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{s}^2/\rm{cm}^1"
 KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
-KnownARTFields["Pressure"]._convert_function=_convertPressure
+KnownARTFields["Pressure"]._convert_function = _convertPressure
 
 def _convertGamma(data):
     return 1.0
 KnownARTFields["Gamma"]._units = r""
 KnownARTFields["Gamma"]._projected_units = r""
-KnownARTFields["Gamma"]._convert_function=_convertGamma
+KnownARTFields["Gamma"]._convert_function = _convertGamma
 
 def _convertGasEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._projected_units = r""
-KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["GasEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["GasEnergy"]._convert_function = _convertGasEnergy
 
 def _convertMetalDensitySNII(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+KnownARTFields["MetalDensitySNII"]._convert_function = _convertMetalDensitySNII
 
 def _convertMetalDensitySNIa(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+KnownARTFields["MetalDensitySNIa"]._convert_function = _convertMetalDensitySNIa
 
 def _convertPotentialNew(data):
     return data.convert("Potential")
-KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
+KnownARTFields["PotentialNew"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialNew"]._convert_function = _convertPotentialNew
 
 def _convertPotentialOld(data):
     return data.convert("Potential")
-KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
+KnownARTFields["PotentialOld"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialOld"]._convert_function = _convertPotentialOld
 
 ####### Derived fields
+def _temperature(field, data):
+    tr = data["GasEnergy"]/data["Density"]
+    tr /= data.pf.conversion_factors["GasEnergy"]
+    tr *= data.pf.conversion_factors["Density"]
+    tr *= data.pf.conversion_factors['tr']
+    return tr
 
-def _temperature(field, data):
-    dg = data["GasEnergy"] #.astype('float64')
-    dg /= data.pf.conversion_factors["GasEnergy"]
-    dd = data["Density"] #.astype('float64')
-    dd /= data.pf.conversion_factors["Density"]
-    tr = dg/dd*data.pf.conversion_factors['tr']
-    #ghost cells have zero density?
-    tr[np.isnan(tr)] = 0.0
-    #dd[di] = -1.0
-    #if data.id==460:
-    #tr[di] = -1.0 #replace the zero-density points with zero temp
-    #print tr.min()
-    #assert np.all(np.isfinite(tr))
-    return tr
 def _converttemperature(data):
-    #x = data.pf.conversion_factors["Temperature"]
-    x = 1.0
-    return x
-add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+    return 1.0
+add_field("Temperature", function=_temperature,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
-    tr  = data["MetalDensitySNII"] / data["Density"]
+    tr = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNII", function=_metallicity_snII,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNII"]._units = r""
 ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
-    tr  = data["MetalDensitySNIa"] / data["Density"]
+    tr = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNIa", function=_metallicity_snIa,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNIa"]._units = r""
 ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
 
 def _metallicity(field, data):
-    tr  = data["Metal_Density"] / data["Density"]
+    tr = data["Metal_Density"] / data["Density"]
     return tr
-add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity", function=_metallicity,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(field,data):
-    tr  = data["XMomentumDensity"]/data["Density"]
+def _x_velocity(field, data):
+    tr = data["XMomentumDensity"]/data["Density"]
     return tr
-add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("x-velocity", function=_x_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(field,data):
-    tr  = data["YMomentumDensity"]/data["Density"]
+def _y_velocity(field, data):
+    tr = data["YMomentumDensity"]/data["Density"]
     return tr
-add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("y-velocity", function=_y_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(field,data):
-    tr  = data["ZMomentumDensity"]/data["Density"]
+def _z_velocity(field, data):
+    tr = data["ZMomentumDensity"]/data["Density"]
     return tr
-add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("z-velocity", function=_z_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _metal_density(field, data):
-    tr  = data["MetalDensitySNIa"]
+    tr = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
     return tr
-add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metal_Density"]._units = r""
-ARTFieldInfo["Metal_Density"]._projected_units = r""
+add_field("Metal_Density", function=_metal_density,
+          units=r"\mathrm{K}", take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
+# Particle fields
+def _particle_age(field, data):
+    tr = data["particle_creation_time"]
+    return data.pf.current_time - tr
+add_field("particle_age", function=_particle_age, units=r"\mathrm{s}",
+          take_log=True, particle_type=True)
 
-#Particle fields
+def spread_ages(ages, spread=1.0e7*365*24*3600):
+    # stars are formed in lumps; spread out the ages linearly
+    da = np.diff(ages)
+    assert np.all(da <= 0)
+    # ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da < 0)
+    idx += 1  # mark the right edges
+    # spread this age evenly out to the next age
+    lidx = 0
+    lage = 0
+    for i in idx:
+        n = i-lidx  # n stars affected
+        rage = ages[i]
+        lage = max(rage-spread, 0.0)
+        agesd[lidx:i] = np.linspace(lage, rage, n)
+        lidx = i
+        # lage=rage
+    # we didn't get the last iter
+    n = agesd.shape[0]-lidx
+    rage = ages[-1]
+    lage = max(rage-spread, 0.0)
+    agesd[lidx:] = np.linspace(lage, rage, n)
+    return agesd
+
+def _particle_age_spread(field, data):
+    tr = data["particle_creation_time"]
+    return spread_ages(data.pf.current_time - tr)
+
+add_field("particle_age_spread", function=_particle_age_spread,
+          particle_type=True, take_log=True, units=r"\rm{s}")
+
+def _ParticleMassMsun(field, data):
+    return data["particle_mass"]/mass_sun_cgs
+add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
+          take_log=True, units=r"\rm{Msun}")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b521b6bb599c/
Changeset:   b521b6bb599c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-15 19:30:28
Summary:     Merging in removal of shape and size from Doug.
Affected #:  2 files

diff -r 94eeca5052506364a8db0afcc76d53ed2f36ff7b -r b521b6bb599c1e8bfd36c85f88ca303a9b667546 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -426,8 +426,6 @@
     _sort_by = None
     _selector = None
     _current_chunk = None
-    size = None
-    shape = None
 
     def __init__(self, *args, **kwargs):
         super(YTSelectionContainer, self).__init__(*args, **kwargs)
@@ -524,17 +522,20 @@
         # There are several items that need to be swapped out
         # field_data, size, shape
         old_field_data, self.field_data = self.field_data, YTFieldData()
-        old_size, self.size = self.size, chunk.data_size
         old_chunk, self._current_chunk = self._current_chunk, chunk
         old_locked, self._locked = self._locked, False
-        #self.shape = (self.size,)
         yield
         self.field_data = old_field_data
-        self.size = old_size
-        #self.shape = (old_size,)
         self._current_chunk = old_chunk
         self._locked = old_locked
 
+#    @property   
+#    def size(self) :
+#        if self._current_chunk is None :
+##            self.hierarchy._identify_base_chunk(self)
+#            return 0
+#        return self._current_chunk.data_size
+
     @property
     def icoords(self):
         if self._current_chunk is None:

diff -r 94eeca5052506364a8db0afcc76d53ed2f36ff7b -r b521b6bb599c1e8bfd36c85f88ca303a9b667546 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)


https://bitbucket.org/yt_analysis/yt/commits/1cc8469fd8f1/
Changeset:   1cc8469fd8f1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-04 22:03:29
Summary:     Merging from mainline yt dev.

Note that 10 tests fail.  A subsequent commit will actually disable them, as
they are not appropriate yet -- nine of them are for mixed particle/fluid
fields, which we can't quite yet support with fake_random_pf yet, and the other
one uses boolean regions which we also do not yet support.
Affected #:  88 files

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,8 +4,10 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/oct_container.c
@@ -35,6 +37,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -34,7 +35,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +77,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -170,6 +213,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +237,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +248,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +264,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +334,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -362,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -383,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -424,7 +490,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +552,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +580,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +634,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +652,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +670,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +707,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +720,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +739,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,14 +760,14 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-3.0-hg ] 
+    elif [ ! -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-3.0/ ./yt-3.0-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
     elif [ -e yt-3.0-hg ] 
@@ -714,7 +778,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +788,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +816,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +840,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +855,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -165,7 +165,7 @@
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c % self.pf.domain_width + self.pf.domain_left_edge
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -1062,7 +1062,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1097,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
@@ -1457,7 +1458,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1656,7 +1657,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2008,13 +2009,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2031,13 +2030,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2058,10 +2055,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2090,15 +2084,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2131,7 +2122,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2460,7 +2451,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2565,7 +2556,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True
@@ -337,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -217,7 +217,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -105,7 +105,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], 1e-90, 
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -586,7 +587,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -632,6 +633,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -67,8 +67,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -33,6 +33,8 @@
 import shelve
 from exceptions import ValueError, KeyError
 from functools import wraps
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger
@@ -779,6 +781,240 @@
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
 
+    @property
+    def triangles(self):
+        if self.vertices is None:
+            self.get_data()
+        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
+        for i in range(3):
+            for j in range(3):
+                vv[:,i,j] = self.vertices[j,i::3]
+        return vv
+ 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -832,16 +1068,6 @@
             arr["green"][:] = cs[0,:,1]
             arr["blue"][:] = cs[0,:,2]
 
-    @property
-    def triangles(self):
-        if self.vertices is None:
-            self.get_data()
-        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
-        for i in range(3):
-            for j in range(3):
-                vv[:,i,j] = self.vertices[j,i::3]
-        return vv
- 
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -366,10 +366,9 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
-    def __repr__(self, clean = False):
+    def __repr__(self):
         # We'll do this the slow way to be clear what's going on
-        if clean: s = "%s: " % (self.__class__.__name__)
-        else: s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
                        for i in self._con_args])
         return s
@@ -979,25 +978,49 @@
         return self.quantities["TotalQuantity"]("CellVolume")[0] * \
             (self.pf[unit] / self.pf['cm']) ** 3.0
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
+
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], YTDataContainer):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
-
+    return ReconstructedObject((pf, obj))
 
 class YTSelectedIndicesBase(YTSelectionContainer3D):
     """An arbitrarily defined data container that allows for selection
@@ -1254,7 +1277,7 @@
             if region in ["OR", "AND", "NOT", "(", ")"]:
                 s += region
             else:
-                s += region.__repr__(clean = True)
+                s += region.__repr__()
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -120,8 +120,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -302,7 +302,9 @@
             self.requested.append(item)
             return self[item]
         self.requested.append(item)
-        return defaultdict.__missing__(self, item)
+        if item not in self:
+            self[item] = self._read_data(item)
+        return self[item]
 
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
@@ -310,7 +312,7 @@
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and FI[field_name].particle_type:
+        if field_name in FI and FI[field_name].particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -989,12 +989,12 @@
         given the tilt about the x axis when e0 was aligned 
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -179,7 +179,7 @@
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30
         good_u = None
-        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'cm']:
+        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
             vv = v*self[unit]
             if vv < max_nu and vv > 1.0:
                 good_u = unit

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b yt/data_objects/tests/test_pickle.py
--- /dev/null
+++ b/yt/data_objects/tests/test_pickle.py
@@ -0,0 +1,69 @@
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import cPickle
+import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_save_load_pickle():
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
+
+    # create extracted region from boolean (fairly complex object)
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
+
+    minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
+
+    # save object
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
+    # load object
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
+
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
+    yield assert_equal, len(contours[1][0]), len(test_load)
+
+    os.remove(cpklfile.name)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5e745eda811b/
Changeset:   5e745eda811b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-04 22:14:27
Summary:     Disabling tests that we cannot yet run.

These fall into two categories:

 * A mixed particle/fluid field, which does not yet currently work for fake_random_pf, as the field dependencies won't be correctly calculated
 * Boolean object tests

All tests that are expected to pass now pass.
Affected #:  2 files

diff -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b -r 5e745eda811bc188d30587bafa6ca9fa01878835 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -89,6 +89,9 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
+        if field.startswith("DensityPerturbation"): continue
+        if field.startswith("Matter_Density"): continue
+        if field.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

diff -r 1cc8469fd8f1cf2acf9ba93cbed5d0518bfd101b -r 5e745eda811bc188d30587bafa6ca9fa01878835 yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -37,6 +37,7 @@
 
 def test_save_load_pickle():
     """Main test for loading pickled objects"""
+    return # Until boolean regions are implemented we can't test this
     test_pf = fake_random_pf(64)
 
     # create extracted region from boolean (fairly complex object)


https://bitbucket.org/yt_analysis/yt/commits/7b55808f11cf/
Changeset:   7b55808f11cf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-04 23:28:54
Summary:     Minor fix to get __repr__ for data_collection to work properly.
Affected #:  1 file

diff -r 5e745eda811bc188d30587bafa6ca9fa01878835 -r 7b55808f11cfd8cdee936fdd95bc547b70be4482 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -894,7 +894,7 @@
     Child cells are not returned.
     """
     _type_name = "data_collection"
-    _con_args = ("obj_list",)
+    _con_args = ("_obj_list",)
     def __init__(self, center, obj_list, pf = None, field_parameters = None):
         YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],


https://bitbucket.org/yt_analysis/yt/commits/cf67430914e7/
Changeset:   cf67430914e7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 00:16:27
Summary:     This change enables deferral of fields that cannot be generated.

The next step will be refactoring the "can I generate" code into its own
routine and attempting to speed it up.  After that we can address the idea of
chunks that are IO oriented preloading spatial chunks.
Affected #:  1 file

diff -r 7b55808f11cfd8cdee936fdd95bc547b70be4482 -r cf67430914e72b7fb9892980f856f029122983da yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -477,8 +477,23 @@
         if fields is None: return
         fields = self._determine_fields(fields)
         # Now we collect all our fields
-        fields_to_get = [f for f in fields if f not in self.field_data]
-        if len(fields_to_get) == 0:
+        # Here is where we need to perform a validation step, so that if we
+        # have a field requested that we actually *can't* yet get, we put it
+        # off until the end.  This prevents double-reading fields that will
+        # need to be used in spatial fields later on.
+        fields_to_get = []
+        # This will be pre-populated with spatial fields
+        fields_to_generate = [] 
+        for field in self._determine_fields(fields):
+            if field in self.field_data: continue
+            finfo = self.pf._get_field_info(*field)
+            try:
+                finfo.check_available(self)
+            except NeedsGridType:
+                fields_to_generate.append(field)
+                continue
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0 and fields_to_generate == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -502,12 +517,18 @@
         read_particles, gen_particles = self.hierarchy._read_particle_fields(
                                         particles, self, self._current_chunk)
         self.field_data.update(read_particles)
-        fields_to_generate = gen_fluids + gen_particles
+        fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
 
     def _generate_fields(self, fields_to_generate):
         index = 0
         with self._field_lock():
+            # At this point, we assume that any fields that are necessary to
+            # *generate* a field are in fact already available to us.  Note
+            # that we do not make any assumption about whether or not the
+            # fields have a spatial requirement.  This will be checked inside
+            # _generate_field, at which point additional dependencies may
+            # actually be noted.
             while any(f not in self.field_data for f in fields_to_generate):
                 field = fields_to_generate[index % len(fields_to_generate)]
                 index += 1


https://bitbucket.org/yt_analysis/yt/commits/cc638afa6942/
Changeset:   cc638afa6942
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 13:58:29
Summary:     Using TINY instead of 1e-90.

As Britton noted, this was brought in because TINY was not available in yt 2.5.
Affected #:  1 file

diff -r 5e745eda811bc188d30587bafa6ca9fa01878835 -r cc638afa69428aef4f8de538d72b4614fb1c950a yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -27,6 +27,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.physical_constants import TINY
 
 def VirialFilter(profile, overdensity_field='ActualOverdensity',
                  virial_overdensity=200., must_be_virialized=True,
@@ -105,7 +106,7 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(np.clip(temp_profile[field], 1e-90, 
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
                                                    max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)


https://bitbucket.org/yt_analysis/yt/commits/f824c732862d/
Changeset:   f824c732862d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 13:59:01
Summary:     Fixing AMRHierarchy -> GridGeometryHandler.

thanks, Kacper!
Affected #:  1 file

diff -r cc638afa69428aef4f8de538d72b4614fb1c950a -r f824c732862d69f94f79240b8041b571edb8a9fa yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -96,7 +96,7 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = h5py.File(self.hierarchy_filename,'r')
-        AMRHierarchy.__init__(self,pf,data_style)
+        GridGeometryHandler.__init__(self,pf,data_style)
 
         self._fhandle.close()
 


https://bitbucket.org/yt_analysis/yt/commits/443f359e2598/
Changeset:   443f359e2598
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 19:43:52
Summary:     Fixing projected_units for particle deposition fields in Enzo and SPH.
Affected #:  2 files

diff -r f824c732862d69f94f79240b8041b571edb8a9fa -r 443f359e25980fe2574652a4c873b5ca9a702169 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -694,5 +694,5 @@
          validators = [ValidateSpatial()],
          display_name = "\\mathrm{%s Density}" % "all",
          units = r"\mathrm{g}/\mathrm{cm}^{3}",
-         projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')

diff -r f824c732862d69f94f79240b8041b571edb8a9fa -r 443f359e25980fe2574652a4c873b5ca9a702169 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -93,7 +93,7 @@
              validators = [ValidateSpatial()],
              display_name = "\\mathrm{%s Density}" % ptype,
              units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
              projection_conversion = 'cm')
 
     # Now some translation functions.


https://bitbucket.org/yt_analysis/yt/commits/10c24e37517f/
Changeset:   10c24e37517f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 20:55:17
Summary:     Moving the OWLSStaticOutput below Gadget, subclassing, and setting up fields.

This brings OWLS mostly in feature parity with Gadget, although I noticed that
I had to make some odd changes to the cm units to make it work the same way.
Affected #:  2 files

diff -r 443f359e25980fe2574652a4c873b5ca9a702169 -r 10c24e37517f0d437f700786f50e5068bc1ad78d yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -163,71 +163,6 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
-class OWLSStaticOutput(StaticOutput):
-    _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
-    _fieldinfo_fallback = OWLSFieldInfo
-    _fieldinfo_known = KnownOWLSFields
-
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
-        self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style)
-
-    def __repr__(self):
-        return os.path.basename(self.parameter_filename).split(".")[0]
-
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.conversion_factors = {}
-        DW = self.domain_right_edge - self.domain_left_edge
-        self.units["unitary"] = 1.0 / DW.max()
-
-    def _parse_parameter_file(self):
-        handle = h5py.File(self.parameter_filename)
-        hvals = {}
-        hvals.update(handle["/Header"].attrs)
-
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = "sph"
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # Set standard values
-        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
-        self.domain_left_edge = np.zeros(3, "float64")
-        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
-        self.cosmological_simulation = 1
-        self.periodicity = (True, True, True)
-        self.current_redshift = hvals["Redshift"]
-        self.omega_lambda = hvals["OmegaLambda"]
-        self.omega_matter = hvals["Omega0"]
-        self.hubble_constant = hvals["HubbleParam"]
-        self.parameters = hvals
-
-        prefix = self.parameter_filename.split(".", 1)[0]
-        suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
-
-        handle.close()
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            if "Constants" in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
-        except:
-            pass
-        return False
-
 class GadgetBinaryDomainFile(ParticleDomainFile):
     def __init__(self, pf, io, domain_filename, domain_id):
         with open(domain_filename, "rb") as f:
@@ -404,6 +339,74 @@
         # We do not allow load() of these files.
         return False
 
+class OWLSStaticOutput(GadgetStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _domain_class = ParticleDomainFile
+    _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
+    _fieldinfo_known = KnownOWLSFields
+    _header_spec = None # Override so that there's no confusion
+
+    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
+        self._root_dimensions = root_dimensions
+        # Set up the template for domain files
+        self.storage_filename = None
+        super(OWLSStaticOutput, self).__init__(filename, data_style,
+                                               root_dimensions,
+                                               unit_base = None)
+
+    def __repr__(self):
+        return os.path.basename(self.parameter_filename).split(".")[0]
+
+    def _parse_parameter_file(self):
+        handle = h5py.File(self.parameter_filename)
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # Set standard values
+        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
+        self.domain_left_edge = np.zeros(3, "float64")
+        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+        self.parameters = hvals
+
+        prefix = self.parameter_filename.split(".", 1)[0]
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.domain_count = hvals["NumFilesPerSnapshot"]
+
+        # To avoid having to open files twice
+        self._unit_base = {}
+        self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        # Comoving cm is given in the Units
+        self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+
+        handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if "Constants" in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False
+
 class TipsyDomainFile(ParticleDomainFile):
 
     def _calculate_offsets(self, field_list):

diff -r 443f359e25980fe2574652a4c873b5ca9a702169 -r 10c24e37517f0d437f700786f50e5068bc1ad78d yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -247,3 +247,60 @@
         GadgetFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
 
+# OWLS
+# ====
+
+# I am optimistic that some day we will be able to get rid of much of this, and
+# make OWLS a subclass of Gadget fields.
+
+_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
+                "PartType4")
+
+for fname in ["Coordinates", "Velocities", "ParticleIDs",
+              # Note: Mass, not Masses
+              "Mass"]:
+    func = _field_concat(fname)
+    OWLSFieldInfo.add_field(("all", fname), function=func,
+            particle_type = True)
+
+def _owls_particle_fields(ptype):
+    def _Mass(field, data):
+        pind = _owls_ptypes.index(ptype)
+        if data.pf["MassTable"][pind] == 0.0:
+            raise RuntimeError
+        mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
+        # Note that this is an alias, which is why we need to apply conversion
+        # here.  Otherwise we'd have an asymmetry.
+        mass *= data.pf["MassTable"][pind] 
+        return mass
+    OWLSFieldInfo.add_field((ptype, "Mass"), function=_Mass,
+                            convert_function = _get_conv("mass"),
+                            particle_type = True)
+
+for ptype in _owls_ptypes:
+    # Note that this adds a "Known" Mass field and a "Derived" Mass field.
+    # This way the "Known" will get used, and if it's not there, it will use
+    # the derived.
+    KnownOWLSFields.add_field((ptype, "Mass"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("mass"),
+        units = r"\mathrm{g}")
+    _owls_particle_fields(ptype)
+    KnownOWLSFields.add_field((ptype, "Velocities"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("velocity"),
+        units = r"\mathrm{cm}/\mathrm{s}")
+    _particle_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
+        particle_type = True)
+_particle_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
+
+# Now we have to manually apply the splits for "all", since we don't want to
+# use the splits defined above.
+
+for iname, oname in [("Coordinates", "particle_position_"),
+                     ("Velocities", "particle_velocity_")]:
+    for axi, ax in enumerate("xyz"):
+        func = _field_concat_slice(iname, axi)
+        OWLSFieldInfo.add_field(("all", oname + ax), function=func,
+                particle_type = True)


https://bitbucket.org/yt_analysis/yt/commits/2dd46f58a1f4/
Changeset:   2dd46f58a1f4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 21:42:54
Summary:     Fixing "MaxValue" and "MaxPosition" naming for fields that are tuples.
Affected #:  1 file

diff -r 10c24e37517f0d437f700786f50e5068bc1ad78d -r 2dd46f58a1f4e08f360680cb83954ce207873328 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -78,7 +78,7 @@
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)
-        self.pf.parameters["Max%sValue" % (field)] = max_val
-        self.pf.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
+        self.pf.parameters["Max%sValue" % (field,)] = max_val
+        self.pf.parameters["Max%sPos" % (field,)] = "%s" % ((mx,my,mz),)
         return max_val, np.array((mx,my,mz), dtype='float64')
 


https://bitbucket.org/yt_analysis/yt/commits/8ee5a448456b/
Changeset:   8ee5a448456b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 22:26:38
Summary:     Enable deposition into covering grids.

This removes the deposit() method on DataContainers and moves it to the
CoveringGrid.  I've added a LeftEdge property as well for inside the
particle_deposit routine.  Particle fields can now be obtained for
CoveringGrids, and deposition can be conducted as well.
Affected #:  2 files

diff -r 2dd46f58a1f4e08f360680cb83954ce207873328 -r 8ee5a448456bea82a56021dc11992d3c3d6dd6e0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -53,6 +53,7 @@
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, parallel_root_only, ParallelAnalysisInterface
+import yt.geometry.particle_deposit as particle_deposit
 
 from .field_info_container import\
     NeedsGridType,\
@@ -433,20 +434,31 @@
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)
         if len(fields_to_get) == 0: return
-        fill, gen = self._split_fields(fields_to_get)
+        fill, gen, part = self._split_fields(fields_to_get)
+        if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         if len(gen) > 0: self._generate_fields(gen)
 
     def _split_fields(self, fields_to_get):
         fill, gen = self.pf.h._split_fields(fields_to_get)
+        particles = []
         for field in gen:
             finfo = self.pf._get_field_info(*field)
             try:
                 finfo.check_available(self)
             except NeedsOriginalGrid:
                 fill.append(field)
+        for field in fill:
+            finfo = self.pf._get_field_info(*field)
+            if finfo.particle_type:
+                particles.append(field)
         gen = [f for f in gen if f not in fill]
-        return fill, gen
+        fill = [f for f in fill if f not in particles]
+        return fill, gen, particles
+
+    def _fill_particles(self, part):
+        for p in part:
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
@@ -485,6 +497,20 @@
             raise KeyError(field)
         return rv
 
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    def deposit(self, positions, fields = None, method = None):
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
 class LevelState(object):
     current_dx = None
     current_dims = None

diff -r 2dd46f58a1f4e08f360680cb83954ce207873328 -r 8ee5a448456bea82a56021dc11992d3c3d6dd6e0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -519,11 +519,6 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
-    def deposit(self, positions, fields, op):
-        assert(self._current_chunk.chunk_type == "spatial")
-        fields = ensure_list(fields)
-        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
-
     @contextmanager
     def _field_lock(self):
         self._locked = True


https://bitbucket.org/yt_analysis/yt/commits/eb2f5ab4af3f/
Changeset:   eb2f5ab4af3f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 22:39:39
Summary:     First attempt at porting Bobby's smoothing kernel to particle_deposit.

Note that combined with the covering_grid fixes, one can now apply smoothing
kernels to particles that are collected from diverse regions.  I've tested this
for SPH kernels applied to full-domain SPH simulations, and even at relatively
small counts it's unmanageably slow.
Affected #:  2 files

diff -r 8ee5a448456bea82a56021dc11992d3c3d6dd6e0 -r eb2f5ab4af3fe683977390a2fc10943883b158f5 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
@@ -39,6 +40,21 @@
 cdef inline int gind(int i, int j, int k, int dims[3]):
     return ((k*dims[1])+j)*dims[0]+i
 
+
+####################################################
+# Standard SPH kernel for use with the Grid method #
+####################################################
+
+cdef inline np.float64_t sph_kernel(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef np.int64_t nvals

diff -r 8ee5a448456bea82a56021dc11992d3c3d6dd6e0 -r eb2f5ab4af3fe683977390a2fc10943883b158f5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, \
@@ -104,6 +105,7 @@
             left_edge[i] = gobj.LeftEdge[i]
             dims[i] = gobj.ActiveDimensions[i]
         for i in range(positions.shape[0]):
+            if i % 10000 == 0: print i, positions.shape[0]
             # Now we process
             for j in range(nf):
                 field_vals[j] = field_pointers[j][i]
@@ -145,6 +147,68 @@
 
 deposit_count = CountParticles
 
+cdef class SimpleSmooth(ParticleDepositOperation):
+    # Note that this does nothing at the edges.  So it will give a poor
+    # estimate there, and since Octrees are mostly edges, this will be a very
+    # poor SPH kernel.
+    cdef np.float64_t *data
+    cdef public object odata
+    cdef np.float64_t *temp
+    cdef public object otemp
+
+    def initialize(self):
+        self.odata = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.odata
+        self.data = <np.float64_t*> arr.data
+        self.otemp = np.zeros(self.nvals, dtype="float64")
+        arr = self.otemp
+        self.temp = <np.float64_t*> arr.data
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset,
+                      np.float64_t ppos[3],
+                      np.float64_t *fields
+                      ):
+        cdef int ii[3], half_len, ib0[3], ib1[3]
+        cdef int i, j, k
+        cdef np.float64_t idist[3], kernel_sum, dist
+        # Smoothing length is fields[0]
+        kernel_sum = 0.0
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+            half_len = <int>(fields[0]/dds[i]) + 1
+            ib0[i] = ii[i] - half_len
+            ib1[i] = ii[i] + half_len
+            if ib0[i] >= dim[i] or ib1[i] <0:
+                return
+            ib0[i] = iclip(ib0[i], 0, dim[i] - 1)
+            ib1[i] = iclip(ib1[i], 0, dim[i] - 1)
+        for i from ib0[0] <= i <= ib1[0]:
+            idist[0] = (ii[0] - i) * (ii[0] - i) * dds[0]
+            for j from ib0[1] <= j <= ib1[1]:
+                idist[1] = (ii[1] - j) * (ii[1] - j) * dds[1] 
+                for k from ib0[2] <= k <= ib1[2]:
+                    idist[2] = (ii[2] - k) * (ii[2] - k) * dds[2]
+                    dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
+                    dist = sqrt(dist) / fields[0]
+                    self.temp[gind(i,j,k,dim) + offset] = sph_kernel(dist)
+                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+        # Having found the kernel, deposit accordingly into gdata
+        for i from ib0[0] <= i <= ib1[0]:
+            for j from ib0[1] <= j <= ib1[1]:
+                for k from ib0[2] <= k <= ib1[2]:
+                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
+                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+        
+    def finalize(self):
+        return self.odata
+
+deposit_simple_smooth = SimpleSmooth
+
 cdef class SumParticleField(ParticleDepositOperation):
     cdef np.float64_t *sum
     cdef public object osum


https://bitbucket.org/yt_analysis/yt/commits/8c8ef2d6c645/
Changeset:   8c8ef2d6c645
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-05 22:48:42
Summary:     Removed print statement.
Affected #:  1 file

diff -r eb2f5ab4af3fe683977390a2fc10943883b158f5 -r 8c8ef2d6c64544d46670ce506b8e8a5d07a004f3 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -105,7 +105,6 @@
             left_edge[i] = gobj.LeftEdge[i]
             dims[i] = gobj.ActiveDimensions[i]
         for i in range(positions.shape[0]):
-            if i % 10000 == 0: print i, positions.shape[0]
             # Now we process
             for j in range(nf):
                 field_vals[j] = field_pointers[j][i]


https://bitbucket.org/yt_analysis/yt/commits/ce7fd214cdcd/
Changeset:   ce7fd214cdcd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-06 13:38:02
Summary:     Removing unused OpenMP constructs from selection_routines.pyx and setup.py.
Affected #:  2 files

diff -r 443f359e25980fe2574652a4c873b5ca9a702169 -r ce7fd214cdcd2355cfffc5a81496cda31a044105 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -28,7 +28,6 @@
 cimport cython
 from libc.stdlib cimport malloc, free
 from fp_utils cimport fclip, iclip
-from cython.parallel import prange, parallel, threadid
 from selection_routines cimport SelectorObject
 from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
 #from geometry_utils cimport point_to_hilbert

diff -r 443f359e25980fe2574652a4c873b5ca9a702169 -r ce7fd214cdcd2355cfffc5a81496cda31a044105 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -16,8 +16,6 @@
                          "yt/geometry/selection_routines.pxd"])
     config.add_extension("selection_routines", 
                 ["yt/geometry/selection_routines.pyx"],
-                extra_compile_args=['-fopenmp'],
-                extra_link_args=['-fopenmp'],
                 include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",


https://bitbucket.org/yt_analysis/yt/commits/c8bb677dde4d/
Changeset:   c8bb677dde4d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-06 19:47:02
Summary:     Adding fix_axis to axis specification for 2D objects.

Fixes #585 .
Affected #:  1 file

diff -r ce7fd214cdcd2355cfffc5a81496cda31a044105 -r c8bb677dde4d65fd7723357d02d3fe509e9cb16b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -590,7 +590,7 @@
     _spatial = False
     def __init__(self, axis, pf, field_parameters):
         ParallelAnalysisInterface.__init__(self)
-        self.axis = axis
+        self.axis = fix_axis(axis)
         super(YTSelectionContainer2D, self).__init__(
             pf, field_parameters)
         self.set_field_parameter("axis", axis)


https://bitbucket.org/yt_analysis/yt/commits/f7317e3cdd3d/
Changeset:   f7317e3cdd3d
Branch:      yt-3.0
User:        samskillman
Date:        2013-06-03 23:13:12
Summary:     Sometimes FLASH datasets can have 0 grids on a level, but still have grids on higher levels. Also put the mask back in now that the get_vcd function is a bit smarter, and don't rely onthe data source having left and right edges.
Affected #:  1 file

diff -r f2995289fe1af1a07a7d2d12802410e374f2a873 -r f7317e3cdd3dc3dbbaece1d394c99bba3eae4c4b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -91,8 +91,8 @@
         self.max_level = max_level
         self.comm_rank = comm_rank
         self.comm_size = comm_size
-        left_edge = self.data_source.left_edge
-        right_edge= self.data_source.right_edge
+        left_edge = np.array([-np.inf]*3)
+        right_edge = np.array([np.inf]*3)
         self.trunk = Node(None, None, None,
                 left_edge, right_edge, None, 1)
         self.build()
@@ -109,7 +109,7 @@
         for lvl in lvl_range:
             #grids = self.data_source.select_grids(lvl)
             grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
-            if len(grids) == 0: break
+            if len(grids) == 0: continue 
             self.add_grids(grids)
 
     def check_tree(self):
@@ -274,12 +274,12 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
-            #mask = make_vcd(grid.child_mask)
-            #mask = np.clip(mask, 0.0, 1.0)
-            #mask[mask<1.0] = np.inf
+            mask = make_vcd(grid.child_mask)
+            mask = np.clip(mask, 0.0, 1.0)
+            mask[mask<0.5] = np.inf
             for i,field in enumerate(self.fields):
                 vcd = make_vcd(grid[field], log=self.log_fields[i])
-                #vcd *= mask
+                vcd *= mask
                 if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(grid)


https://bitbucket.org/yt_analysis/yt/commits/c28cee1987ea/
Changeset:   c28cee1987ea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-04 22:21:14
Summary:     Merged in samskillman/yt-3.0 (pull request #42)

Bring back data_source renders, bugfix for FLASH renders.
Affected #:  1 file

diff -r 479b20ef010f5f444c2b35754a30576736cd8b75 -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -91,8 +91,8 @@
         self.max_level = max_level
         self.comm_rank = comm_rank
         self.comm_size = comm_size
-        left_edge = self.data_source.left_edge
-        right_edge= self.data_source.right_edge
+        left_edge = np.array([-np.inf]*3)
+        right_edge = np.array([np.inf]*3)
         self.trunk = Node(None, None, None,
                 left_edge, right_edge, None, 1)
         self.build()
@@ -109,7 +109,7 @@
         for lvl in lvl_range:
             #grids = self.data_source.select_grids(lvl)
             grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
-            if len(grids) == 0: break
+            if len(grids) == 0: continue 
             self.add_grids(grids)
 
     def check_tree(self):
@@ -274,12 +274,12 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
-            #mask = make_vcd(grid.child_mask)
-            #mask = np.clip(mask, 0.0, 1.0)
-            #mask[mask<1.0] = np.inf
+            mask = make_vcd(grid.child_mask)
+            mask = np.clip(mask, 0.0, 1.0)
+            mask[mask<0.5] = np.inf
             for i,field in enumerate(self.fields):
                 vcd = make_vcd(grid[field], log=self.log_fields[i])
-                #vcd *= mask
+                vcd *= mask
                 if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(grid)


https://bitbucket.org/yt_analysis/yt/commits/fd8ca5d271fb/
Changeset:   fd8ca5d271fb
Branch:      yt-3.0
User:        samskillman
Date:        2013-06-06 19:57:08
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #43)

Merging with mainline yt tip
Affected #:  92 files

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,8 +4,10 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/oct_container.c
@@ -35,6 +37,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -34,7 +35,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +77,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -170,6 +213,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +237,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +248,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +264,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +334,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -362,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -383,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -424,7 +490,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +552,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +580,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +634,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +652,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +670,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +707,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +720,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +739,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,14 +760,14 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-3.0-hg ] 
+    elif [ ! -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-3.0/ ./yt-3.0-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
     elif [ -e yt-3.0-hg ] 
@@ -714,7 +778,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +788,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +816,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +840,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +855,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -165,7 +165,7 @@
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c % self.pf.domain_width + self.pf.domain_left_edge
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -1062,7 +1062,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1097,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
@@ -1457,7 +1458,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1656,7 +1657,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2008,13 +2009,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2031,13 +2030,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2058,10 +2055,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2090,15 +2084,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2131,7 +2122,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2460,7 +2451,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2565,7 +2556,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True
@@ -337,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -217,7 +217,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -27,6 +27,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.physical_constants import TINY
 
 def VirialFilter(profile, overdensity_field='ActualOverdensity',
                  virial_overdensity=200., must_be_virialized=True,
@@ -105,7 +106,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -586,7 +587,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -632,6 +633,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -67,8 +67,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -33,6 +33,8 @@
 import shelve
 from exceptions import ValueError, KeyError
 from functools import wraps
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger
@@ -779,6 +781,240 @@
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
 
+    @property
+    def triangles(self):
+        if self.vertices is None:
+            self.get_data()
+        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
+        for i in range(3):
+            for j in range(3):
+                vv[:,i,j] = self.vertices[j,i::3]
+        return vv
+ 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -832,16 +1068,6 @@
             arr["green"][:] = cs[0,:,1]
             arr["blue"][:] = cs[0,:,2]
 
-    @property
-    def triangles(self):
-        if self.vertices is None:
-            self.get_data()
-        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
-        for i in range(3):
-            for j in range(3):
-                vv[:,i,j] = self.vertices[j,i::3]
-        return vv
- 
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -366,10 +366,9 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
-    def __repr__(self, clean = False):
+    def __repr__(self):
         # We'll do this the slow way to be clear what's going on
-        if clean: s = "%s: " % (self.__class__.__name__)
-        else: s = "%s (%s): " % (self.__class__.__name__, self.pf)
+        s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
                        for i in self._con_args])
         return s
@@ -979,25 +978,49 @@
         return self.quantities["TotalQuantity"]("CellVolume")[0] * \
             (self.pf[unit] / self.pf['cm']) ** 3.0
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
+
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], YTDataContainer):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
-
+    return ReconstructedObject((pf, obj))
 
 class YTSelectedIndicesBase(YTSelectionContainer3D):
     """An arbitrarily defined data container that allows for selection
@@ -1254,7 +1277,7 @@
             if region in ["OR", "AND", "NOT", "(", ")"]:
                 s += region
             else:
-                s += region.__repr__(clean = True)
+                s += region.__repr__()
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -120,8 +120,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -302,7 +302,9 @@
             self.requested.append(item)
             return self[item]
         self.requested.append(item)
-        return defaultdict.__missing__(self, item)
+        if item not in self:
+            self[item] = self._read_data(item)
+        return self[item]
 
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
@@ -310,7 +312,7 @@
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and FI[field_name].particle_type:
+        if field_name in FI and FI[field_name].particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -989,12 +989,12 @@
         given the tilt about the x axis when e0 was aligned 
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -179,7 +179,7 @@
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30
         good_u = None
-        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'cm']:
+        for unit in ['mpc', 'kpc', 'pc', 'au', 'rsun', 'km', 'cm']:
             vv = v*self[unit]
             if vv < max_nu and vv > 1.0:
                 good_u = unit

diff -r c28cee1987ea6649290a1261f9ad0f471d5bcc00 -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -89,6 +89,9 @@
         if field.startswith("particle"): continue
         if field.startswith("CIC"): continue
         if field.startswith("WeakLensingConvergence"): continue
+        if field.startswith("DensityPerturbation"): continue
+        if field.startswith("Matter_Density"): continue
+        if field.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a392d0b00fa3/
Changeset:   a392d0b00fa3
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-06 20:27:53
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #44)

Adding fix_axis to axis specification for 2D objects.
Affected #:  1 file

diff -r fd8ca5d271fb9d21890b4f4fe61ecb416503c4e7 -r a392d0b00fa31a49aeb9e81c8e629ca64cdf8ca1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -590,7 +590,7 @@
     _spatial = False
     def __init__(self, axis, pf, field_parameters):
         ParallelAnalysisInterface.__init__(self)
-        self.axis = axis
+        self.axis = fix_axis(axis)
         super(YTSelectionContainer2D, self).__init__(
             pf, field_parameters)
         self.set_field_parameter("axis", axis)


https://bitbucket.org/yt_analysis/yt/commits/5260b868fa0c/
Changeset:   5260b868fa0c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-07 16:09:18
Summary:     Merging in some misc changes.
Affected #:  2 files

diff -r a392d0b00fa31a49aeb9e81c8e629ca64cdf8ca1 -r 5260b868fa0c432905808d6180aee828fd4bdbe8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -477,8 +477,23 @@
         if fields is None: return
         fields = self._determine_fields(fields)
         # Now we collect all our fields
-        fields_to_get = [f for f in fields if f not in self.field_data]
-        if len(fields_to_get) == 0:
+        # Here is where we need to perform a validation step, so that if we
+        # have a field requested that we actually *can't* yet get, we put it
+        # off until the end.  This prevents double-reading fields that will
+        # need to be used in spatial fields later on.
+        fields_to_get = []
+        # This will be pre-populated with spatial fields
+        fields_to_generate = [] 
+        for field in self._determine_fields(fields):
+            if field in self.field_data: continue
+            finfo = self.pf._get_field_info(*field)
+            try:
+                finfo.check_available(self)
+            except NeedsGridType:
+                fields_to_generate.append(field)
+                continue
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0 and fields_to_generate == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -502,12 +517,18 @@
         read_particles, gen_particles = self.hierarchy._read_particle_fields(
                                         particles, self, self._current_chunk)
         self.field_data.update(read_particles)
-        fields_to_generate = gen_fluids + gen_particles
+        fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
 
     def _generate_fields(self, fields_to_generate):
         index = 0
         with self._field_lock():
+            # At this point, we assume that any fields that are necessary to
+            # *generate* a field are in fact already available to us.  Note
+            # that we do not make any assumption about whether or not the
+            # fields have a spatial requirement.  This will be checked inside
+            # _generate_field, at which point additional dependencies may
+            # actually be noted.
             while any(f not in self.field_data for f in fields_to_generate):
                 field = fields_to_generate[index % len(fields_to_generate)]
                 index += 1

diff -r a392d0b00fa31a49aeb9e81c8e629ca64cdf8ca1 -r 5260b868fa0c432905808d6180aee828fd4bdbe8 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -894,7 +894,7 @@
     Child cells are not returned.
     """
     _type_name = "data_collection"
-    _con_args = ("obj_list",)
+    _con_args = ("_obj_list",)
     def __init__(self, center, obj_list, pf = None, field_parameters = None):
         YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],


https://bitbucket.org/yt_analysis/yt/commits/ef8a2cfed946/
Changeset:   ef8a2cfed946
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-07 16:41:18
Summary:     Merging in changes to the OWLS frontend and simple_smooth.
Affected #:  7 files

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -53,6 +53,7 @@
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, parallel_root_only, ParallelAnalysisInterface
+import yt.geometry.particle_deposit as particle_deposit
 
 from .field_info_container import\
     NeedsGridType,\
@@ -433,20 +434,31 @@
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)
         if len(fields_to_get) == 0: return
-        fill, gen = self._split_fields(fields_to_get)
+        fill, gen, part = self._split_fields(fields_to_get)
+        if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         if len(gen) > 0: self._generate_fields(gen)
 
     def _split_fields(self, fields_to_get):
         fill, gen = self.pf.h._split_fields(fields_to_get)
+        particles = []
         for field in gen:
             finfo = self.pf._get_field_info(*field)
             try:
                 finfo.check_available(self)
             except NeedsOriginalGrid:
                 fill.append(field)
+        for field in fill:
+            finfo = self.pf._get_field_info(*field)
+            if finfo.particle_type:
+                particles.append(field)
         gen = [f for f in gen if f not in fill]
-        return fill, gen
+        fill = [f for f in fill if f not in particles]
+        return fill, gen, particles
+
+    def _fill_particles(self, part):
+        for p in part:
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
@@ -485,6 +497,20 @@
             raise KeyError(field)
         return rv
 
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    def deposit(self, positions, fields = None, method = None):
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
 class LevelState(object):
     current_dx = None
     current_dims = None

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -540,11 +540,6 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
-    def deposit(self, positions, fields, op):
-        assert(self._current_chunk.chunk_type == "spatial")
-        fields = ensure_list(fields)
-        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
-
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -163,71 +163,6 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
-class OWLSStaticOutput(StaticOutput):
-    _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
-    _fieldinfo_fallback = OWLSFieldInfo
-    _fieldinfo_known = KnownOWLSFields
-
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
-        self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style)
-
-    def __repr__(self):
-        return os.path.basename(self.parameter_filename).split(".")[0]
-
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.conversion_factors = {}
-        DW = self.domain_right_edge - self.domain_left_edge
-        self.units["unitary"] = 1.0 / DW.max()
-
-    def _parse_parameter_file(self):
-        handle = h5py.File(self.parameter_filename)
-        hvals = {}
-        hvals.update(handle["/Header"].attrs)
-
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = "sph"
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # Set standard values
-        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
-        self.domain_left_edge = np.zeros(3, "float64")
-        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
-        self.cosmological_simulation = 1
-        self.periodicity = (True, True, True)
-        self.current_redshift = hvals["Redshift"]
-        self.omega_lambda = hvals["OmegaLambda"]
-        self.omega_matter = hvals["Omega0"]
-        self.hubble_constant = hvals["HubbleParam"]
-        self.parameters = hvals
-
-        prefix = self.parameter_filename.split(".", 1)[0]
-        suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
-
-        handle.close()
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            if "Constants" in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
-        except:
-            pass
-        return False
-
 class GadgetBinaryDomainFile(ParticleDomainFile):
     def __init__(self, pf, io, domain_filename, domain_id):
         with open(domain_filename, "rb") as f:
@@ -404,6 +339,74 @@
         # We do not allow load() of these files.
         return False
 
+class OWLSStaticOutput(GadgetStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _domain_class = ParticleDomainFile
+    _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
+    _fieldinfo_known = KnownOWLSFields
+    _header_spec = None # Override so that there's no confusion
+
+    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
+        self._root_dimensions = root_dimensions
+        # Set up the template for domain files
+        self.storage_filename = None
+        super(OWLSStaticOutput, self).__init__(filename, data_style,
+                                               root_dimensions,
+                                               unit_base = None)
+
+    def __repr__(self):
+        return os.path.basename(self.parameter_filename).split(".")[0]
+
+    def _parse_parameter_file(self):
+        handle = h5py.File(self.parameter_filename)
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # Set standard values
+        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
+        self.domain_left_edge = np.zeros(3, "float64")
+        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+        self.parameters = hvals
+
+        prefix = self.parameter_filename.split(".", 1)[0]
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.domain_count = hvals["NumFilesPerSnapshot"]
+
+        # To avoid having to open files twice
+        self._unit_base = {}
+        self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        # Comoving cm is given in the Units
+        self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+
+        handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if "Constants" in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False
+
 class TipsyDomainFile(ParticleDomainFile):
 
     def _calculate_offsets(self, field_list):

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -247,3 +247,60 @@
         GadgetFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
 
+# OWLS
+# ====
+
+# I am optimistic that some day we will be able to get rid of much of this, and
+# make OWLS a subclass of Gadget fields.
+
+_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
+                "PartType4")
+
+for fname in ["Coordinates", "Velocities", "ParticleIDs",
+              # Note: Mass, not Masses
+              "Mass"]:
+    func = _field_concat(fname)
+    OWLSFieldInfo.add_field(("all", fname), function=func,
+            particle_type = True)
+
+def _owls_particle_fields(ptype):
+    def _Mass(field, data):
+        pind = _owls_ptypes.index(ptype)
+        if data.pf["MassTable"][pind] == 0.0:
+            raise RuntimeError
+        mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
+        # Note that this is an alias, which is why we need to apply conversion
+        # here.  Otherwise we'd have an asymmetry.
+        mass *= data.pf["MassTable"][pind] 
+        return mass
+    OWLSFieldInfo.add_field((ptype, "Mass"), function=_Mass,
+                            convert_function = _get_conv("mass"),
+                            particle_type = True)
+
+for ptype in _owls_ptypes:
+    # Note that this adds a "Known" Mass field and a "Derived" Mass field.
+    # This way the "Known" will get used, and if it's not there, it will use
+    # the derived.
+    KnownOWLSFields.add_field((ptype, "Mass"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("mass"),
+        units = r"\mathrm{g}")
+    _owls_particle_fields(ptype)
+    KnownOWLSFields.add_field((ptype, "Velocities"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("velocity"),
+        units = r"\mathrm{cm}/\mathrm{s}")
+    _particle_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
+        particle_type = True)
+_particle_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
+
+# Now we have to manually apply the splits for "all", since we don't want to
+# use the splits defined above.
+
+for iname, oname in [("Coordinates", "particle_position_"),
+                     ("Velocities", "particle_velocity_")]:
+    for axi, ax in enumerate("xyz"):
+        func = _field_concat_slice(iname, axi)
+        OWLSFieldInfo.add_field(("all", oname + ax), function=func,
+                particle_type = True)

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -78,7 +78,7 @@
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)
-        self.pf.parameters["Max%sValue" % (field)] = max_val
-        self.pf.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
+        self.pf.parameters["Max%sValue" % (field,)] = max_val
+        self.pf.parameters["Max%sPos" % (field,)] = "%s" % ((mx,my,mz),)
         return max_val, np.array((mx,my,mz), dtype='float64')
 

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
@@ -39,6 +40,21 @@
 cdef inline int gind(int i, int j, int k, int dims[3]):
     return ((k*dims[1])+j)*dims[0]+i
 
+
+####################################################
+# Standard SPH kernel for use with the Grid method #
+####################################################
+
+cdef inline np.float64_t sph_kernel(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef np.int64_t nvals

diff -r 5260b868fa0c432905808d6180aee828fd4bdbe8 -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, \
@@ -145,6 +146,68 @@
 
 deposit_count = CountParticles
 
+cdef class SimpleSmooth(ParticleDepositOperation):
+    # Note that this does nothing at the edges.  So it will give a poor
+    # estimate there, and since Octrees are mostly edges, this will be a very
+    # poor SPH kernel.
+    cdef np.float64_t *data
+    cdef public object odata
+    cdef np.float64_t *temp
+    cdef public object otemp
+
+    def initialize(self):
+        self.odata = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.odata
+        self.data = <np.float64_t*> arr.data
+        self.otemp = np.zeros(self.nvals, dtype="float64")
+        arr = self.otemp
+        self.temp = <np.float64_t*> arr.data
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset,
+                      np.float64_t ppos[3],
+                      np.float64_t *fields
+                      ):
+        cdef int ii[3], half_len, ib0[3], ib1[3]
+        cdef int i, j, k
+        cdef np.float64_t idist[3], kernel_sum, dist
+        # Smoothing length is fields[0]
+        kernel_sum = 0.0
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+            half_len = <int>(fields[0]/dds[i]) + 1
+            ib0[i] = ii[i] - half_len
+            ib1[i] = ii[i] + half_len
+            if ib0[i] >= dim[i] or ib1[i] <0:
+                return
+            ib0[i] = iclip(ib0[i], 0, dim[i] - 1)
+            ib1[i] = iclip(ib1[i], 0, dim[i] - 1)
+        for i from ib0[0] <= i <= ib1[0]:
+            idist[0] = (ii[0] - i) * (ii[0] - i) * dds[0]
+            for j from ib0[1] <= j <= ib1[1]:
+                idist[1] = (ii[1] - j) * (ii[1] - j) * dds[1] 
+                for k from ib0[2] <= k <= ib1[2]:
+                    idist[2] = (ii[2] - k) * (ii[2] - k) * dds[2]
+                    dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
+                    dist = sqrt(dist) / fields[0]
+                    self.temp[gind(i,j,k,dim) + offset] = sph_kernel(dist)
+                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+        # Having found the kernel, deposit accordingly into gdata
+        for i from ib0[0] <= i <= ib1[0]:
+            for j from ib0[1] <= j <= ib1[1]:
+                for k from ib0[2] <= k <= ib1[2]:
+                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
+                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+        
+    def finalize(self):
+        return self.odata
+
+deposit_simple_smooth = SimpleSmooth
+
 cdef class SumParticleField(ParticleDepositOperation):
     cdef np.float64_t *sum
     cdef public object osum


https://bitbucket.org/yt_analysis/yt/commits/570136cac0ee/
Changeset:   570136cac0ee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-07 18:45:55
Summary:     Adding arbitrary_grid object.

This object will return particles as would a .region, but will also be able to
deposit fields from those particles as well.  It will not return fluid fields
that would require any fields on disk.
Affected #:  1 file

diff -r ef8a2cfed946999667f5e45eb7cea5d4634d36ab -r 570136cac0ee3abd07d3310f09b41819e38e844a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -511,6 +511,53 @@
         vals = op.finalize()
         return vals.reshape(self.ActiveDimensions, order="F")
 
+class YTArbitraryGridBase(YTCoveringGridBase):
+    """A 3D region with arbitrary bounds and dimensions.
+
+    In contrast to the Covering Grid, this object accepts a left edge, a right
+    edge, and dimensions.  This allows it to be used for creating 3D particle
+    deposition fields that are independent of the underlying mesh, whether that
+    is yt-generated or from the simulation data.  For example, arbitrary boxes
+    around particles can be drawn and particle deposition fields can be
+    created.  This object will refuse to generate any fluid fields.
+    
+    Parameters
+    ----------
+    left_edge : array_like
+        The left edge of the region to be extracted
+    rigth_edge : array_like
+        The left edge of the region to be extracted
+    dims : array_like
+        Number of cells along each axis of resulting grid.
+
+    Examples
+    --------
+    >>> obj = pf.h.arbitrary_grid([0.0, 0.0, 0.0], [0.99, 0.99, 0.99],
+    ...                          dims=[128, 128, 128])
+    """
+    _spatial = True
+    _type_name = "arbitrary_grid"
+    _con_args = ('left_edge', 'right_edge', 'ActiveDimensions')
+    _container_fields = ("dx", "dy", "dz", "x", "y", "z")
+    def __init__(self, left_edge, right_edge, dims,
+                 pf = None, field_parameters = None):
+        if field_parameters is None:
+            center = None
+        else:
+            center = field_parameters.get("center", None)
+        YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
+        self.ActiveDimensions = np.array(dims, dtype='int32')
+        if self.ActiveDimensions.size == 1:
+            self.ActiveDimensions = np.array([dims, dims, dims], dtype="int32")
+        self.dds = (self.right_edge - self.left_edge)/self.ActiveDimensions
+        self.level = 99
+        self._setup_data_source()
+
+    def _fill_fields(self, fields):
+        raise NotImplementedError
+
 class LevelState(object):
     current_dx = None
     current_dims = None


https://bitbucket.org/yt_analysis/yt/commits/5e472283912f/
Changeset:   5e472283912f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-07 18:46:27
Summary:     Adding CIC deposit method to particle_deposit.

This gives bitwise identical results to particle_density in those frontends
that support it.
Affected #:  3 files

diff -r 570136cac0ee3abd07d3310f09b41819e38e844a -r 5e472283912f5343de5a875e2a3f68d7f907ef6c yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -696,3 +696,15 @@
          units = r"\mathrm{g}/\mathrm{cm}^{3}",
          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')
+
+def particle_cic(field, data):
+    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
+    d = data.deposit(pos, [data["ParticleMass"]], method="cic")
+    d /= data["CellVolume"]
+    return d
+add_field(("deposit", "all_cic"), function=particle_cic,
+          validators=[ValidateSpatial()],
+          display_name = "\\mathrm{All CIC Density}",
+          units = r"\mathrm{g}/\mathrm{cm}^{3}",
+          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+          projection_conversion = 'cm')

diff -r 570136cac0ee3abd07d3310f09b41819e38e844a -r 5e472283912f5343de5a875e2a3f68d7f907ef6c yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -96,6 +96,20 @@
              projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
              projection_conversion = 'cm')
 
+    def particle_cic(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+        d /= data["CellVolume"]
+        return d
+
+    registry.add_field(("deposit", "%s_cic" % ptype),
+             function = particle_cic,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+             projection_conversion = 'cm')
+
     # Now some translation functions.
 
     registry.add_field((ptype, "ParticleMass"),

diff -r 570136cac0ee3abd07d3310f09b41819e38e844a -r 5e472283912f5343de5a875e2a3f68d7f907ef6c yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -295,3 +295,44 @@
 
 deposit_std = StdParticleField
 
+cdef class CICDeposit(ParticleDepositOperation):
+    cdef np.float64_t *field
+    cdef public object ofield
+    def initialize(self):
+        self.ofield = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.ofield
+        self.field = <np.float64_t *> arr.data
+
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        
+        cdef int i, j, k, ind[3], ii
+        cdef np.float64_t rpos[3], rdds[3][2]
+        cdef np.float64_t fact, edge0, edge1, edge2
+        cdef np.float64_t le0, le1, le2
+        cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
+
+        # Compute the position of the central cell
+        for i in range(3):
+            rpos[i] = (ppos[i]-left_edge[i])/dds[i]
+            rpos[i] = fclip(rpos[i], 0.5001, dim[i]-0.5001)
+            ind[i] = <int> (rpos[i] + 0.5)
+            # Note these are 1, then 0
+            rdds[i][1] = (<np.float64_t> ind[i]) + 0.5 - rpos[i]
+            rdds[i][0] = 1.0 - rdds[i][1]
+
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    ii = gind(ind[0] - i, ind[1] - j, ind[2] - k, dim) + offset
+                    self.field[ii] += fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
+
+    def finalize(self):
+        return self.ofield
+
+deposit_cic = CICDeposit


https://bitbucket.org/yt_analysis/yt/commits/81b64de38898/
Changeset:   81b64de38898
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-07 03:21:13
Summary:     Adding some autogenerated c routines to hgignore.
Affected #:  1 file

diff -r 443f359e25980fe2574652a4c873b5ca9a702169 -r 81b64de38898d9a05f08f5fd110da51c511316f1 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -10,7 +10,9 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
+yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/particle_deposit.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h


https://bitbucket.org/yt_analysis/yt/commits/9066c985be06/
Changeset:   9066c985be06
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-07 03:21:42
Summary:     Patching the off axis velocity callback.  Closes #587.
Affected #:  1 file

diff -r 81b64de38898d9a05f08f5fd110da51c511316f1 -r 9066c985be063b09e5691e94ba08155985c9f093 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -884,9 +884,9 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,np.newaxis]
-    return np.dot(x_vec, v_vec)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']).transpose() \
+                - bulk_velocity[np.newaxis,...]
+    return np.sum(x_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -897,9 +897,9 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,np.newaxis]
-    return np.dot(y_vec, v_vec)
+    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']).transpose() \
+                - bulk_velocity[np.newaxis,...]
+    return np.sum(y_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)


https://bitbucket.org/yt_analysis/yt/commits/c24c2883dd31/
Changeset:   c24c2883dd31
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-07 20:49:45
Summary:     Adjusting the velocity vector calculation to avoid a transpose.
Affected #:  1 file

diff -r 9066c985be063b09e5691e94ba08155985c9f093 -r c24c2883dd316ee2d125ca176dff1263aa5492fc yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -884,8 +884,8 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']).transpose() \
-                - bulk_velocity[np.newaxis,...]
+    v_vec = np.array([data["%s-velocity" % ax] - bv \
+                for ax, bv in zip('xyz', bulk_velocity)])
     return np.sum(x_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
@@ -897,8 +897,8 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']).transpose() \
-                - bulk_velocity[np.newaxis,...]
+    v_vec = np.array([data["%s-velocity" % ax] - bv \
+                for ax, bv in zip('xyz', bulk_velocity)])
     return np.sum(y_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,


https://bitbucket.org/yt_analysis/yt/commits/790d2eccfd06/
Changeset:   790d2eccfd06
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-10 16:52:29
Summary:     Merged in ngoldbaum/yt-3.0 (pull request #45)

Patching the off axis velocity callback.
Affected #:  2 files

diff -r a392d0b00fa31a49aeb9e81c8e629ca64cdf8ca1 -r 790d2eccfd06764fe7f2e6ca5b749ff246da4390 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -10,7 +10,9 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
+yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/particle_deposit.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r a392d0b00fa31a49aeb9e81c8e629ca64cdf8ca1 -r 790d2eccfd06764fe7f2e6ca5b749ff246da4390 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -884,9 +884,9 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,np.newaxis]
-    return np.dot(x_vec, v_vec)
+    v_vec = np.array([data["%s-velocity" % ax] - bv \
+                for ax, bv in zip('xyz', bulk_velocity)])
+    return np.sum(x_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
           validators=[ValidateParameter("cp_%s_vec" % ax)
@@ -897,9 +897,9 @@
     bulk_velocity = data.get_field_parameter("bulk_velocity")
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
-    v_vec = np.array([data["%s-velocity" % ax] for ax in 'xyz']) \
-                - bulk_velocity[...,np.newaxis]
-    return np.dot(y_vec, v_vec)
+    v_vec = np.array([data["%s-velocity" % ax] - bv \
+                for ax, bv in zip('xyz', bulk_velocity)])
+    return np.sum(y_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,
           validators=[ValidateParameter("cp_%s_vec" % ax)


https://bitbucket.org/yt_analysis/yt/commits/7a79dd9c7ebe/
Changeset:   7a79dd9c7ebe
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-10 21:03:12
Summary:     Fixing the off axis velocity callback. (Really, I swear).
Affected #:  1 file

diff -r 790d2eccfd06764fe7f2e6ca5b749ff246da4390 -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -886,6 +886,7 @@
         bulk_velocity = np.zeros(3)
     v_vec = np.array([data["%s-velocity" % ax] - bv \
                 for ax, bv in zip('xyz', bulk_velocity)])
+    v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
     return np.sum(x_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityX", 
           function=_CuttingPlaneVelocityX,
@@ -899,6 +900,7 @@
         bulk_velocity = np.zeros(3)
     v_vec = np.array([data["%s-velocity" % ax] - bv \
                 for ax, bv in zip('xyz', bulk_velocity)])
+    v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
     return np.sum(y_vec * v_vec, axis=-1)
 add_field("CuttingPlaneVelocityY", 
           function=_CuttingPlaneVelocityY,


https://bitbucket.org/yt_analysis/yt/commits/e9af6a7b7fa2/
Changeset:   e9af6a7b7fa2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 15:39:59
Summary:     Fixing 1.0/mass_sun_cgs in Enzo frontend.
Affected #:  1 file

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r e9af6a7b7fa22cd4eb45dff093a56776640035de yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -37,7 +37,8 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh
+    mh, \
+    mass_sun_cgs
 from yt.funcs import *
 
 import yt.utilities.lib as amr_utils
@@ -81,7 +82,7 @@
     return data[sp] / _speciesMass[species]
 
 def _convertCellMassMsun(data):
-    return 5.027854e-34 # g^-1
+    return 1.0/mass_sun_cgs # g^-1
 def _ConvertNumberDensity(data):
     return 1.0/mh
 


https://bitbucket.org/yt_analysis/yt/commits/2deb3327354a/
Changeset:   2deb3327354a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 15:42:22
Summary:     Adding Enzo-style species fields to RAMSES
Affected #:  1 file

diff -r e9af6a7b7fa22cd4eb45dff093a56776640035de -r 2deb3327354a362a9692147299fc989b9891303d yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -178,3 +178,71 @@
          units = r"\mathrm{g}/\mathrm{cm}^{3}",
          projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
          projection_conversion = 'cm')
+
+# We'll add a bunch of species fields here.  In the not too distant future,
+# we'll be moving all of these to a unified field location, so they can be
+# shared between various frontends.
+
+# NOTE: No Electron here because I don't know how RAMSES handles them, and if
+# they are handled differently than Enzo does (where they are scaled to mh)
+
+_speciesList = ["HI", "HII",
+                "HeI", "HeII", "HeIII",
+                "H2I", "H2II", "HM",
+                "DI", "DII", "HDI"]
+_speciesMass = {"HI": 1.0, "HII": 1.0,
+                "HeI": 4.0, "HeII": 4.0, "HeIII": 4.0,
+                "H2I": 2.0, "H2II": 2.0, "HM": 1.0,
+                "DI": 2.0, "DII": 2.0, "HDI": 3.0}
+
+def _SpeciesComovingDensity(field, data):
+    sp = field.name.split("_")[0] + "_Density"
+    ef = (1.0 + data.pf.current_redshift)**3.0
+    return data[sp] / ef
+
+def _SpeciesFraction(field, data):
+    sp = field.name.split("_")[0] + "_Density"
+    return data[sp] / data["Density"]
+
+def _SpeciesMass(field, data):
+    sp = field.name.split("_")[0] + "_Density"
+    return data[sp] * data["CellVolume"]
+
+def _SpeciesNumberDensity(field, data):
+    species = field.name.split("_")[0]
+    sp = field.name.split("_")[0] + "_Density"
+    return data[sp] / _speciesMass[species]
+
+def _convertCellMassMsun(data):
+    return 1.0/mass_sun_cgs # g^-1
+def _ConvertNumberDensity(data):
+    return 1.0/mh
+
+for species in _speciesList:
+    add_ramses_field("%s_Density" % species,
+             function = NullFunc,
+             display_name = "%s\/Density" % species,
+             units = r"\rm{g}/\rm{cm}^3",
+             projected_units = r"\rm{g}/\rm{cm}^2")
+    add_field("%s_Fraction" % species,
+             function=_SpeciesFraction,
+             validators=ValidateDataField("%s_Density" % species),
+             display_name="%s\/Fraction" % species)
+    add_field("Comoving_%s_Density" % species,
+             function=_SpeciesComovingDensity,
+             validators=ValidateDataField("%s_Density" % species),
+             display_name="Comoving\/%s\/Density" % species)
+    add_field("%s_Mass" % species, units=r"\rm{g}", 
+              function=_SpeciesMass, 
+              validators=ValidateDataField("%s_Density" % species),
+              display_name="%s\/Mass" % species)
+    add_field("%s_MassMsun" % species, units=r"M_{\odot}", 
+              function=_SpeciesMass, 
+              convert_function=_convertCellMassMsun,
+              validators=ValidateDataField("%s_Density" % species),
+              display_name="%s\/Mass" % species)
+    if _speciesMass.has_key(species):
+        add_field("%s_NumberDensity" % species,
+                  function=_SpeciesNumberDensity,
+                  convert_function=_ConvertNumberDensity,
+                  validators=ValidateDataField("%s_Density" % species))


https://bitbucket.org/yt_analysis/yt/commits/3565d03875fe/
Changeset:   3565d03875fe
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 15:44:02
Summary:     We need a convert function for the density fields in RAMSES.
Affected #:  1 file

diff -r 2deb3327354a362a9692147299fc989b9891303d -r 3565d03875fed7118299145e6c2b9480a7e90ecb yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -222,6 +222,7 @@
     add_ramses_field("%s_Density" % species,
              function = NullFunc,
              display_name = "%s\/Density" % species,
+             convert_function = _convertDensity,
              units = r"\rm{g}/\rm{cm}^3",
              projected_units = r"\rm{g}/\rm{cm}^2")
     add_field("%s_Fraction" % species,


https://bitbucket.org/yt_analysis/yt/commits/ea5291e8fa1b/
Changeset:   ea5291e8fa1b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 15:32:40
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #46)

OWLS, kernel smoothing and arbitrary grids
Affected #:  9 files

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -53,6 +53,7 @@
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, parallel_root_only, ParallelAnalysisInterface
+import yt.geometry.particle_deposit as particle_deposit
 
 from .field_info_container import\
     NeedsGridType,\
@@ -433,20 +434,31 @@
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)
         if len(fields_to_get) == 0: return
-        fill, gen = self._split_fields(fields_to_get)
+        fill, gen, part = self._split_fields(fields_to_get)
+        if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         if len(gen) > 0: self._generate_fields(gen)
 
     def _split_fields(self, fields_to_get):
         fill, gen = self.pf.h._split_fields(fields_to_get)
+        particles = []
         for field in gen:
             finfo = self.pf._get_field_info(*field)
             try:
                 finfo.check_available(self)
             except NeedsOriginalGrid:
                 fill.append(field)
+        for field in fill:
+            finfo = self.pf._get_field_info(*field)
+            if finfo.particle_type:
+                particles.append(field)
         gen = [f for f in gen if f not in fill]
-        return fill, gen
+        fill = [f for f in fill if f not in particles]
+        return fill, gen, particles
+
+    def _fill_particles(self, part):
+        for p in part:
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
@@ -485,6 +497,67 @@
             raise KeyError(field)
         return rv
 
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    def deposit(self, positions, fields = None, method = None):
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
+class YTArbitraryGridBase(YTCoveringGridBase):
+    """A 3D region with arbitrary bounds and dimensions.
+
+    In contrast to the Covering Grid, this object accepts a left edge, a right
+    edge, and dimensions.  This allows it to be used for creating 3D particle
+    deposition fields that are independent of the underlying mesh, whether that
+    is yt-generated or from the simulation data.  For example, arbitrary boxes
+    around particles can be drawn and particle deposition fields can be
+    created.  This object will refuse to generate any fluid fields.
+    
+    Parameters
+    ----------
+    left_edge : array_like
+        The left edge of the region to be extracted
+    rigth_edge : array_like
+        The left edge of the region to be extracted
+    dims : array_like
+        Number of cells along each axis of resulting grid.
+
+    Examples
+    --------
+    >>> obj = pf.h.arbitrary_grid([0.0, 0.0, 0.0], [0.99, 0.99, 0.99],
+    ...                          dims=[128, 128, 128])
+    """
+    _spatial = True
+    _type_name = "arbitrary_grid"
+    _con_args = ('left_edge', 'right_edge', 'ActiveDimensions')
+    _container_fields = ("dx", "dy", "dz", "x", "y", "z")
+    def __init__(self, left_edge, right_edge, dims,
+                 pf = None, field_parameters = None):
+        if field_parameters is None:
+            center = None
+        else:
+            center = field_parameters.get("center", None)
+        YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
+        self.ActiveDimensions = np.array(dims, dtype='int32')
+        if self.ActiveDimensions.size == 1:
+            self.ActiveDimensions = np.array([dims, dims, dims], dtype="int32")
+        self.dds = (self.right_edge - self.left_edge)/self.ActiveDimensions
+        self.level = 99
+        self._setup_data_source()
+
+    def _fill_fields(self, fields):
+        raise NotImplementedError
+
 class LevelState(object):
     current_dx = None
     current_dims = None

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -477,8 +477,23 @@
         if fields is None: return
         fields = self._determine_fields(fields)
         # Now we collect all our fields
-        fields_to_get = [f for f in fields if f not in self.field_data]
-        if len(fields_to_get) == 0:
+        # Here is where we need to perform a validation step, so that if we
+        # have a field requested that we actually *can't* yet get, we put it
+        # off until the end.  This prevents double-reading fields that will
+        # need to be used in spatial fields later on.
+        fields_to_get = []
+        # This will be pre-populated with spatial fields
+        fields_to_generate = [] 
+        for field in self._determine_fields(fields):
+            if field in self.field_data: continue
+            finfo = self.pf._get_field_info(*field)
+            try:
+                finfo.check_available(self)
+            except NeedsGridType:
+                fields_to_generate.append(field)
+                continue
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0 and fields_to_generate == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -502,12 +517,18 @@
         read_particles, gen_particles = self.hierarchy._read_particle_fields(
                                         particles, self, self._current_chunk)
         self.field_data.update(read_particles)
-        fields_to_generate = gen_fluids + gen_particles
+        fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
 
     def _generate_fields(self, fields_to_generate):
         index = 0
         with self._field_lock():
+            # At this point, we assume that any fields that are necessary to
+            # *generate* a field are in fact already available to us.  Note
+            # that we do not make any assumption about whether or not the
+            # fields have a spatial requirement.  This will be checked inside
+            # _generate_field, at which point additional dependencies may
+            # actually be noted.
             while any(f not in self.field_data for f in fields_to_generate):
                 field = fields_to_generate[index % len(fields_to_generate)]
                 index += 1
@@ -519,11 +540,6 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
-    def deposit(self, positions, fields, op):
-        assert(self._current_chunk.chunk_type == "spatial")
-        fields = ensure_list(fields)
-        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
-
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -894,7 +894,7 @@
     Child cells are not returned.
     """
     _type_name = "data_collection"
-    _con_args = ("obj_list",)
+    _con_args = ("_obj_list",)
     def __init__(self, center, obj_list, pf = None, field_parameters = None):
         YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -696,3 +696,15 @@
          units = r"\mathrm{g}/\mathrm{cm}^{3}",
          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')
+
+def particle_cic(field, data):
+    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
+    d = data.deposit(pos, [data["ParticleMass"]], method="cic")
+    d /= data["CellVolume"]
+    return d
+add_field(("deposit", "all_cic"), function=particle_cic,
+          validators=[ValidateSpatial()],
+          display_name = "\\mathrm{All CIC Density}",
+          units = r"\mathrm{g}/\mathrm{cm}^{3}",
+          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+          projection_conversion = 'cm')

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -163,71 +163,6 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
-class OWLSStaticOutput(StaticOutput):
-    _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
-    _fieldinfo_fallback = OWLSFieldInfo
-    _fieldinfo_known = KnownOWLSFields
-
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
-        self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style)
-
-    def __repr__(self):
-        return os.path.basename(self.parameter_filename).split(".")[0]
-
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.conversion_factors = {}
-        DW = self.domain_right_edge - self.domain_left_edge
-        self.units["unitary"] = 1.0 / DW.max()
-
-    def _parse_parameter_file(self):
-        handle = h5py.File(self.parameter_filename)
-        hvals = {}
-        hvals.update(handle["/Header"].attrs)
-
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = "sph"
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # Set standard values
-        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
-        self.domain_left_edge = np.zeros(3, "float64")
-        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
-        self.cosmological_simulation = 1
-        self.periodicity = (True, True, True)
-        self.current_redshift = hvals["Redshift"]
-        self.omega_lambda = hvals["OmegaLambda"]
-        self.omega_matter = hvals["Omega0"]
-        self.hubble_constant = hvals["HubbleParam"]
-        self.parameters = hvals
-
-        prefix = self.parameter_filename.split(".", 1)[0]
-        suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
-
-        handle.close()
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            if "Constants" in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
-        except:
-            pass
-        return False
-
 class GadgetBinaryDomainFile(ParticleDomainFile):
     def __init__(self, pf, io, domain_filename, domain_id):
         with open(domain_filename, "rb") as f:
@@ -404,6 +339,74 @@
         # We do not allow load() of these files.
         return False
 
+class OWLSStaticOutput(GadgetStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _domain_class = ParticleDomainFile
+    _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
+    _fieldinfo_known = KnownOWLSFields
+    _header_spec = None # Override so that there's no confusion
+
+    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
+        self._root_dimensions = root_dimensions
+        # Set up the template for domain files
+        self.storage_filename = None
+        super(OWLSStaticOutput, self).__init__(filename, data_style,
+                                               root_dimensions,
+                                               unit_base = None)
+
+    def __repr__(self):
+        return os.path.basename(self.parameter_filename).split(".")[0]
+
+    def _parse_parameter_file(self):
+        handle = h5py.File(self.parameter_filename)
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # Set standard values
+        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
+        self.domain_left_edge = np.zeros(3, "float64")
+        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+        self.parameters = hvals
+
+        prefix = self.parameter_filename.split(".", 1)[0]
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.domain_count = hvals["NumFilesPerSnapshot"]
+
+        # To avoid having to open files twice
+        self._unit_base = {}
+        self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        # Comoving cm is given in the Units
+        self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+
+        handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if "Constants" in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False
+
 class TipsyDomainFile(ParticleDomainFile):
 
     def _calculate_offsets(self, field_list):

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -96,6 +96,20 @@
              projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
              projection_conversion = 'cm')
 
+    def particle_cic(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+        d /= data["CellVolume"]
+        return d
+
+    registry.add_field(("deposit", "%s_cic" % ptype),
+             function = particle_cic,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+             projection_conversion = 'cm')
+
     # Now some translation functions.
 
     registry.add_field((ptype, "ParticleMass"),
@@ -247,3 +261,60 @@
         GadgetFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
 
+# OWLS
+# ====
+
+# I am optimistic that some day we will be able to get rid of much of this, and
+# make OWLS a subclass of Gadget fields.
+
+_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
+                "PartType4")
+
+for fname in ["Coordinates", "Velocities", "ParticleIDs",
+              # Note: Mass, not Masses
+              "Mass"]:
+    func = _field_concat(fname)
+    OWLSFieldInfo.add_field(("all", fname), function=func,
+            particle_type = True)
+
+def _owls_particle_fields(ptype):
+    def _Mass(field, data):
+        pind = _owls_ptypes.index(ptype)
+        if data.pf["MassTable"][pind] == 0.0:
+            raise RuntimeError
+        mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
+        # Note that this is an alias, which is why we need to apply conversion
+        # here.  Otherwise we'd have an asymmetry.
+        mass *= data.pf["MassTable"][pind] 
+        return mass
+    OWLSFieldInfo.add_field((ptype, "Mass"), function=_Mass,
+                            convert_function = _get_conv("mass"),
+                            particle_type = True)
+
+for ptype in _owls_ptypes:
+    # Note that this adds a "Known" Mass field and a "Derived" Mass field.
+    # This way the "Known" will get used, and if it's not there, it will use
+    # the derived.
+    KnownOWLSFields.add_field((ptype, "Mass"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("mass"),
+        units = r"\mathrm{g}")
+    _owls_particle_fields(ptype)
+    KnownOWLSFields.add_field((ptype, "Velocities"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("velocity"),
+        units = r"\mathrm{cm}/\mathrm{s}")
+    _particle_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
+        particle_type = True)
+_particle_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
+
+# Now we have to manually apply the splits for "all", since we don't want to
+# use the splits defined above.
+
+for iname, oname in [("Coordinates", "particle_position_"),
+                     ("Velocities", "particle_velocity_")]:
+    for axi, ax in enumerate("xyz"):
+        func = _field_concat_slice(iname, axi)
+        OWLSFieldInfo.add_field(("all", oname + ax), function=func,
+                particle_type = True)

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -78,7 +78,7 @@
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)
-        self.pf.parameters["Max%sValue" % (field)] = max_val
-        self.pf.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
+        self.pf.parameters["Max%sValue" % (field,)] = max_val
+        self.pf.parameters["Max%sPos" % (field,)] = "%s" % ((mx,my,mz),)
         return max_val, np.array((mx,my,mz), dtype='float64')
 

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
@@ -39,6 +40,21 @@
 cdef inline int gind(int i, int j, int k, int dims[3]):
     return ((k*dims[1])+j)*dims[0]+i
 
+
+####################################################
+# Standard SPH kernel for use with the Grid method #
+####################################################
+
+cdef inline np.float64_t sph_kernel(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef np.int64_t nvals

diff -r 7a79dd9c7ebefddba48dfe0440a47ca3fddd6b98 -r ea5291e8fa1bf7647c3dbdd0052dcb68444fb260 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, \
@@ -145,6 +146,68 @@
 
 deposit_count = CountParticles
 
+cdef class SimpleSmooth(ParticleDepositOperation):
+    # Note that this does nothing at the edges.  So it will give a poor
+    # estimate there, and since Octrees are mostly edges, this will be a very
+    # poor SPH kernel.
+    cdef np.float64_t *data
+    cdef public object odata
+    cdef np.float64_t *temp
+    cdef public object otemp
+
+    def initialize(self):
+        self.odata = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.odata
+        self.data = <np.float64_t*> arr.data
+        self.otemp = np.zeros(self.nvals, dtype="float64")
+        arr = self.otemp
+        self.temp = <np.float64_t*> arr.data
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset,
+                      np.float64_t ppos[3],
+                      np.float64_t *fields
+                      ):
+        cdef int ii[3], half_len, ib0[3], ib1[3]
+        cdef int i, j, k
+        cdef np.float64_t idist[3], kernel_sum, dist
+        # Smoothing length is fields[0]
+        kernel_sum = 0.0
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+            half_len = <int>(fields[0]/dds[i]) + 1
+            ib0[i] = ii[i] - half_len
+            ib1[i] = ii[i] + half_len
+            if ib0[i] >= dim[i] or ib1[i] <0:
+                return
+            ib0[i] = iclip(ib0[i], 0, dim[i] - 1)
+            ib1[i] = iclip(ib1[i], 0, dim[i] - 1)
+        for i from ib0[0] <= i <= ib1[0]:
+            idist[0] = (ii[0] - i) * (ii[0] - i) * dds[0]
+            for j from ib0[1] <= j <= ib1[1]:
+                idist[1] = (ii[1] - j) * (ii[1] - j) * dds[1] 
+                for k from ib0[2] <= k <= ib1[2]:
+                    idist[2] = (ii[2] - k) * (ii[2] - k) * dds[2]
+                    dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
+                    dist = sqrt(dist) / fields[0]
+                    self.temp[gind(i,j,k,dim) + offset] = sph_kernel(dist)
+                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+        # Having found the kernel, deposit accordingly into gdata
+        for i from ib0[0] <= i <= ib1[0]:
+            for j from ib0[1] <= j <= ib1[1]:
+                for k from ib0[2] <= k <= ib1[2]:
+                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
+                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+        
+    def finalize(self):
+        return self.odata
+
+deposit_simple_smooth = SimpleSmooth
+
 cdef class SumParticleField(ParticleDepositOperation):
     cdef np.float64_t *sum
     cdef public object osum
@@ -232,3 +295,44 @@
 
 deposit_std = StdParticleField
 
+cdef class CICDeposit(ParticleDepositOperation):
+    cdef np.float64_t *field
+    cdef public object ofield
+    def initialize(self):
+        self.ofield = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.ofield
+        self.field = <np.float64_t *> arr.data
+
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        
+        cdef int i, j, k, ind[3], ii
+        cdef np.float64_t rpos[3], rdds[3][2]
+        cdef np.float64_t fact, edge0, edge1, edge2
+        cdef np.float64_t le0, le1, le2
+        cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
+
+        # Compute the position of the central cell
+        for i in range(3):
+            rpos[i] = (ppos[i]-left_edge[i])/dds[i]
+            rpos[i] = fclip(rpos[i], 0.5001, dim[i]-0.5001)
+            ind[i] = <int> (rpos[i] + 0.5)
+            # Note these are 1, then 0
+            rdds[i][1] = (<np.float64_t> ind[i]) + 0.5 - rpos[i]
+            rdds[i][0] = 1.0 - rdds[i][1]
+
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    ii = gind(ind[0] - i, ind[1] - j, ind[2] - k, dim) + offset
+                    self.field[ii] += fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
+
+    def finalize(self):
+        return self.ofield
+
+deposit_cic = CICDeposit


https://bitbucket.org/yt_analysis/yt/commits/4fb9244f4260/
Changeset:   4fb9244f4260
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 15:50:05
Summary:     Merging with OWLS and CIC deposition changes.
Affected #:  9 files

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -53,6 +53,7 @@
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, parallel_root_only, ParallelAnalysisInterface
+import yt.geometry.particle_deposit as particle_deposit
 
 from .field_info_container import\
     NeedsGridType,\
@@ -433,20 +434,31 @@
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)
         if len(fields_to_get) == 0: return
-        fill, gen = self._split_fields(fields_to_get)
+        fill, gen, part = self._split_fields(fields_to_get)
+        if len(part) > 0: self._fill_particles(part)
         if len(fill) > 0: self._fill_fields(fill)
         if len(gen) > 0: self._generate_fields(gen)
 
     def _split_fields(self, fields_to_get):
         fill, gen = self.pf.h._split_fields(fields_to_get)
+        particles = []
         for field in gen:
             finfo = self.pf._get_field_info(*field)
             try:
                 finfo.check_available(self)
             except NeedsOriginalGrid:
                 fill.append(field)
+        for field in fill:
+            finfo = self.pf._get_field_info(*field)
+            if finfo.particle_type:
+                particles.append(field)
         gen = [f for f in gen if f not in fill]
-        return fill, gen
+        fill = [f for f in fill if f not in particles]
+        return fill, gen, particles
+
+    def _fill_particles(self, part):
+        for p in part:
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
@@ -485,6 +497,67 @@
             raise KeyError(field)
         return rv
 
+    @property
+    def LeftEdge(self):
+        return self.left_edge
+
+    def deposit(self, positions, fields = None, method = None):
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
+class YTArbitraryGridBase(YTCoveringGridBase):
+    """A 3D region with arbitrary bounds and dimensions.
+
+    In contrast to the Covering Grid, this object accepts a left edge, a right
+    edge, and dimensions.  This allows it to be used for creating 3D particle
+    deposition fields that are independent of the underlying mesh, whether that
+    is yt-generated or from the simulation data.  For example, arbitrary boxes
+    around particles can be drawn and particle deposition fields can be
+    created.  This object will refuse to generate any fluid fields.
+    
+    Parameters
+    ----------
+    left_edge : array_like
+        The left edge of the region to be extracted
+    rigth_edge : array_like
+        The left edge of the region to be extracted
+    dims : array_like
+        Number of cells along each axis of resulting grid.
+
+    Examples
+    --------
+    >>> obj = pf.h.arbitrary_grid([0.0, 0.0, 0.0], [0.99, 0.99, 0.99],
+    ...                          dims=[128, 128, 128])
+    """
+    _spatial = True
+    _type_name = "arbitrary_grid"
+    _con_args = ('left_edge', 'right_edge', 'ActiveDimensions')
+    _container_fields = ("dx", "dy", "dz", "x", "y", "z")
+    def __init__(self, left_edge, right_edge, dims,
+                 pf = None, field_parameters = None):
+        if field_parameters is None:
+            center = None
+        else:
+            center = field_parameters.get("center", None)
+        YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
+        self.left_edge = np.array(left_edge)
+        self.right_edge = np.array(right_edge)
+        self.ActiveDimensions = np.array(dims, dtype='int32')
+        if self.ActiveDimensions.size == 1:
+            self.ActiveDimensions = np.array([dims, dims, dims], dtype="int32")
+        self.dds = (self.right_edge - self.left_edge)/self.ActiveDimensions
+        self.level = 99
+        self._setup_data_source()
+
+    def _fill_fields(self, fields):
+        raise NotImplementedError
+
 class LevelState(object):
     current_dx = None
     current_dims = None

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -477,8 +477,23 @@
         if fields is None: return
         fields = self._determine_fields(fields)
         # Now we collect all our fields
-        fields_to_get = [f for f in fields if f not in self.field_data]
-        if len(fields_to_get) == 0:
+        # Here is where we need to perform a validation step, so that if we
+        # have a field requested that we actually *can't* yet get, we put it
+        # off until the end.  This prevents double-reading fields that will
+        # need to be used in spatial fields later on.
+        fields_to_get = []
+        # This will be pre-populated with spatial fields
+        fields_to_generate = [] 
+        for field in self._determine_fields(fields):
+            if field in self.field_data: continue
+            finfo = self.pf._get_field_info(*field)
+            try:
+                finfo.check_available(self)
+            except NeedsGridType:
+                fields_to_generate.append(field)
+                continue
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0 and fields_to_generate == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
@@ -502,12 +517,18 @@
         read_particles, gen_particles = self.hierarchy._read_particle_fields(
                                         particles, self, self._current_chunk)
         self.field_data.update(read_particles)
-        fields_to_generate = gen_fluids + gen_particles
+        fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
 
     def _generate_fields(self, fields_to_generate):
         index = 0
         with self._field_lock():
+            # At this point, we assume that any fields that are necessary to
+            # *generate* a field are in fact already available to us.  Note
+            # that we do not make any assumption about whether or not the
+            # fields have a spatial requirement.  This will be checked inside
+            # _generate_field, at which point additional dependencies may
+            # actually be noted.
             while any(f not in self.field_data for f in fields_to_generate):
                 field = fields_to_generate[index % len(fields_to_generate)]
                 index += 1
@@ -519,11 +540,6 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
-    def deposit(self, positions, fields, op):
-        assert(self._current_chunk.chunk_type == "spatial")
-        fields = ensure_list(fields)
-        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
-
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -894,7 +894,7 @@
     Child cells are not returned.
     """
     _type_name = "data_collection"
-    _con_args = ("obj_list",)
+    _con_args = ("_obj_list",)
     def __init__(self, center, obj_list, pf = None, field_parameters = None):
         YTSelectionContainer3D.__init__(self, center, pf, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -697,3 +697,15 @@
          units = r"\mathrm{g}/\mathrm{cm}^{3}",
          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')
+
+def particle_cic(field, data):
+    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
+    d = data.deposit(pos, [data["ParticleMass"]], method="cic")
+    d /= data["CellVolume"]
+    return d
+add_field(("deposit", "all_cic"), function=particle_cic,
+          validators=[ValidateSpatial()],
+          display_name = "\\mathrm{All CIC Density}",
+          units = r"\mathrm{g}/\mathrm{cm}^{3}",
+          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+          projection_conversion = 'cm')

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -163,71 +163,6 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
-class OWLSStaticOutput(StaticOutput):
-    _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
-    _fieldinfo_fallback = OWLSFieldInfo
-    _fieldinfo_known = KnownOWLSFields
-
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
-        self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style)
-
-    def __repr__(self):
-        return os.path.basename(self.parameter_filename).split(".")[0]
-
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.conversion_factors = {}
-        DW = self.domain_right_edge - self.domain_left_edge
-        self.units["unitary"] = 1.0 / DW.max()
-
-    def _parse_parameter_file(self):
-        handle = h5py.File(self.parameter_filename)
-        hvals = {}
-        hvals.update(handle["/Header"].attrs)
-
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = "sph"
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        # Set standard values
-        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
-        self.domain_left_edge = np.zeros(3, "float64")
-        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
-        self.cosmological_simulation = 1
-        self.periodicity = (True, True, True)
-        self.current_redshift = hvals["Redshift"]
-        self.omega_lambda = hvals["OmegaLambda"]
-        self.omega_matter = hvals["Omega0"]
-        self.hubble_constant = hvals["HubbleParam"]
-        self.parameters = hvals
-
-        prefix = self.parameter_filename.split(".", 1)[0]
-        suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
-
-        handle.close()
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            if "Constants" in fileh["/"].keys() and \
-               "Header" in fileh["/"].keys():
-                fileh.close()
-                return True
-            fileh.close()
-        except:
-            pass
-        return False
-
 class GadgetBinaryDomainFile(ParticleDomainFile):
     def __init__(self, pf, io, domain_filename, domain_id):
         with open(domain_filename, "rb") as f:
@@ -404,6 +339,74 @@
         # We do not allow load() of these files.
         return False
 
+class OWLSStaticOutput(GadgetStaticOutput):
+    _hierarchy_class = ParticleGeometryHandler
+    _domain_class = ParticleDomainFile
+    _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
+    _fieldinfo_known = KnownOWLSFields
+    _header_spec = None # Override so that there's no confusion
+
+    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
+        self._root_dimensions = root_dimensions
+        # Set up the template for domain files
+        self.storage_filename = None
+        super(OWLSStaticOutput, self).__init__(filename, data_style,
+                                               root_dimensions,
+                                               unit_base = None)
+
+    def __repr__(self):
+        return os.path.basename(self.parameter_filename).split(".")[0]
+
+    def _parse_parameter_file(self):
+        handle = h5py.File(self.parameter_filename)
+        hvals = {}
+        hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.parameters["HydroMethod"] = "sph"
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # Set standard values
+        self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
+        self.domain_left_edge = np.zeros(3, "float64")
+        self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
+        self.current_redshift = hvals["Redshift"]
+        self.omega_lambda = hvals["OmegaLambda"]
+        self.omega_matter = hvals["Omega0"]
+        self.hubble_constant = hvals["HubbleParam"]
+        self.parameters = hvals
+
+        prefix = self.parameter_filename.split(".", 1)[0]
+        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.domain_count = hvals["NumFilesPerSnapshot"]
+
+        # To avoid having to open files twice
+        self._unit_base = {}
+        self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        # Comoving cm is given in the Units
+        self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
+
+        handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if "Constants" in fileh["/"].keys() and \
+               "Header" in fileh["/"].keys():
+                fileh.close()
+                return True
+            fileh.close()
+        except:
+            pass
+        return False
+
 class TipsyDomainFile(ParticleDomainFile):
 
     def _calculate_offsets(self, field_list):

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -96,6 +96,20 @@
              projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
              projection_conversion = 'cm')
 
+    def particle_cic(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+        d /= data["CellVolume"]
+        return d
+
+    registry.add_field(("deposit", "%s_cic" % ptype),
+             function = particle_cic,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+             projection_conversion = 'cm')
+
     # Now some translation functions.
 
     registry.add_field((ptype, "ParticleMass"),
@@ -247,3 +261,60 @@
         GadgetFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
 
+# OWLS
+# ====
+
+# I am optimistic that some day we will be able to get rid of much of this, and
+# make OWLS a subclass of Gadget fields.
+
+_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
+                "PartType4")
+
+for fname in ["Coordinates", "Velocities", "ParticleIDs",
+              # Note: Mass, not Masses
+              "Mass"]:
+    func = _field_concat(fname)
+    OWLSFieldInfo.add_field(("all", fname), function=func,
+            particle_type = True)
+
+def _owls_particle_fields(ptype):
+    def _Mass(field, data):
+        pind = _owls_ptypes.index(ptype)
+        if data.pf["MassTable"][pind] == 0.0:
+            raise RuntimeError
+        mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
+        # Note that this is an alias, which is why we need to apply conversion
+        # here.  Otherwise we'd have an asymmetry.
+        mass *= data.pf["MassTable"][pind] 
+        return mass
+    OWLSFieldInfo.add_field((ptype, "Mass"), function=_Mass,
+                            convert_function = _get_conv("mass"),
+                            particle_type = True)
+
+for ptype in _owls_ptypes:
+    # Note that this adds a "Known" Mass field and a "Derived" Mass field.
+    # This way the "Known" will get used, and if it's not there, it will use
+    # the derived.
+    KnownOWLSFields.add_field((ptype, "Mass"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("mass"),
+        units = r"\mathrm{g}")
+    _owls_particle_fields(ptype)
+    KnownOWLSFields.add_field((ptype, "Velocities"), function=NullFunc,
+        particle_type = True,
+        convert_function=_get_conv("velocity"),
+        units = r"\mathrm{cm}/\mathrm{s}")
+    _particle_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
+        particle_type = True)
+_particle_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
+
+# Now we have to manually apply the splits for "all", since we don't want to
+# use the splits defined above.
+
+for iname, oname in [("Coordinates", "particle_position_"),
+                     ("Velocities", "particle_velocity_")]:
+    for axi, ax in enumerate("xyz"):
+        func = _field_concat_slice(iname, axi)
+        OWLSFieldInfo.add_field(("all", oname + ax), function=func,
+                particle_type = True)

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -78,7 +78,7 @@
             source.quantities["MaxLocation"](field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", 
               max_val, mx, my, mz)
-        self.pf.parameters["Max%sValue" % (field)] = max_val
-        self.pf.parameters["Max%sPos" % (field)] = "%s" % ((mx,my,mz),)
+        self.pf.parameters["Max%sValue" % (field,)] = max_val
+        self.pf.parameters["Max%sPos" % (field,)] = "%s" % ((mx,my,mz),)
         return max_val, np.array((mx,my,mz), dtype='float64')
 

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
@@ -39,6 +40,21 @@
 cdef inline int gind(int i, int j, int k, int dims[3]):
     return ((k*dims[1])+j)*dims[0]+i
 
+
+####################################################
+# Standard SPH kernel for use with the Grid method #
+####################################################
+
+cdef inline np.float64_t sph_kernel(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef np.int64_t nvals

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -29,6 +29,7 @@
 import numpy as np
 from libc.stdlib cimport malloc, free
 cimport cython
+from libc.math cimport sqrt
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, \
@@ -145,6 +146,68 @@
 
 deposit_count = CountParticles
 
+cdef class SimpleSmooth(ParticleDepositOperation):
+    # Note that this does nothing at the edges.  So it will give a poor
+    # estimate there, and since Octrees are mostly edges, this will be a very
+    # poor SPH kernel.
+    cdef np.float64_t *data
+    cdef public object odata
+    cdef np.float64_t *temp
+    cdef public object otemp
+
+    def initialize(self):
+        self.odata = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.odata
+        self.data = <np.float64_t*> arr.data
+        self.otemp = np.zeros(self.nvals, dtype="float64")
+        arr = self.otemp
+        self.temp = <np.float64_t*> arr.data
+
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset,
+                      np.float64_t ppos[3],
+                      np.float64_t *fields
+                      ):
+        cdef int ii[3], half_len, ib0[3], ib1[3]
+        cdef int i, j, k
+        cdef np.float64_t idist[3], kernel_sum, dist
+        # Smoothing length is fields[0]
+        kernel_sum = 0.0
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+            half_len = <int>(fields[0]/dds[i]) + 1
+            ib0[i] = ii[i] - half_len
+            ib1[i] = ii[i] + half_len
+            if ib0[i] >= dim[i] or ib1[i] <0:
+                return
+            ib0[i] = iclip(ib0[i], 0, dim[i] - 1)
+            ib1[i] = iclip(ib1[i], 0, dim[i] - 1)
+        for i from ib0[0] <= i <= ib1[0]:
+            idist[0] = (ii[0] - i) * (ii[0] - i) * dds[0]
+            for j from ib0[1] <= j <= ib1[1]:
+                idist[1] = (ii[1] - j) * (ii[1] - j) * dds[1] 
+                for k from ib0[2] <= k <= ib1[2]:
+                    idist[2] = (ii[2] - k) * (ii[2] - k) * dds[2]
+                    dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
+                    dist = sqrt(dist) / fields[0]
+                    self.temp[gind(i,j,k,dim) + offset] = sph_kernel(dist)
+                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+        # Having found the kernel, deposit accordingly into gdata
+        for i from ib0[0] <= i <= ib1[0]:
+            for j from ib0[1] <= j <= ib1[1]:
+                for k from ib0[2] <= k <= ib1[2]:
+                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
+                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+        
+    def finalize(self):
+        return self.odata
+
+deposit_simple_smooth = SimpleSmooth
+
 cdef class SumParticleField(ParticleDepositOperation):
     cdef np.float64_t *sum
     cdef public object osum
@@ -232,3 +295,44 @@
 
 deposit_std = StdParticleField
 
+cdef class CICDeposit(ParticleDepositOperation):
+    cdef np.float64_t *field
+    cdef public object ofield
+    def initialize(self):
+        self.ofield = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.ofield
+        self.field = <np.float64_t *> arr.data
+
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3],
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        
+        cdef int i, j, k, ind[3], ii
+        cdef np.float64_t rpos[3], rdds[3][2]
+        cdef np.float64_t fact, edge0, edge1, edge2
+        cdef np.float64_t le0, le1, le2
+        cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
+
+        # Compute the position of the central cell
+        for i in range(3):
+            rpos[i] = (ppos[i]-left_edge[i])/dds[i]
+            rpos[i] = fclip(rpos[i], 0.5001, dim[i]-0.5001)
+            ind[i] = <int> (rpos[i] + 0.5)
+            # Note these are 1, then 0
+            rdds[i][1] = (<np.float64_t> ind[i]) + 0.5 - rpos[i]
+            rdds[i][0] = 1.0 - rdds[i][1]
+
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    ii = gind(ind[0] - i, ind[1] - j, ind[2] - k, dim) + offset
+                    self.field[ii] += fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
+
+    def finalize(self):
+        return self.ofield
+
+deposit_cic = CICDeposit


https://bitbucket.org/yt_analysis/yt/commits/a4bbc055e03e/
Changeset:   a4bbc055e03e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 17:07:39
Summary:     Adding a particle fields registry.
Affected #:  1 file

diff -r 4fb9244f42602d769afa0d55bbd903a49e592ee5 -r a4bbc055e03edf2c8eccdf0988b7c463a27656e9 yt/data_objects/particle_fields.py
--- /dev/null
+++ b/yt/data_objects/particle_fields.py
@@ -0,0 +1,147 @@
+"""
+These are common particle deposition fields.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+from yt.utilities.physical_constants import \
+    mass_hydrogen_cgs, \
+    mass_sun_cgs, \
+    mh
+
+def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+    def particle_count(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, method = "count")
+        return d
+    registry.add_field(("deposit", "%s_count" % ptype),
+             function = particle_count,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Count}" % ptype,
+             projection_conversion = '1')
+
+    def particle_mass(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        return d
+
+    registry.add_field(("deposit", "%s_mass" % ptype),
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % ptype,
+             units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}\/\mathrm{cm}",
+             projection_conversion = 'cm')
+
+    def particle_density(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d /= data["CellVolume"]
+        return d
+
+    registry.add_field(("deposit", "%s_density" % ptype),
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Density}" % ptype,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+             projection_conversion = 'cm')
+
+    def particle_cic(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
+        d /= data["CellVolume"]
+        return d
+
+    registry.add_field(("deposit", "%s_cic" % ptype),
+             function = particle_cic,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+             projection_conversion = 'cm')
+
+    # Now some translation functions.
+
+    registry.add_field((ptype, "ParticleMass"),
+            function = TranslationFunc((ptype, mass_name)),
+            particle_type = True,
+            units = r"\mathrm{g}")
+
+    def _ParticleMassMsun(field, data):
+        return data[ptype, mass_name].copy()
+    def _conv_Msun(data):
+        return 1.0/mass_sun_cgs
+
+    registry.add_field((ptype, "ParticleMassMsun"),
+            function = _ParticleMassMsun,
+            convert_function = _conv_Msun,
+            particle_type = True,
+            units = r"\mathrm{M}_\odot")
+
+def particle_scalar_functions(ptype, coord_name, vel_name, registry):
+
+    # Now we have to set up the various velocity and coordinate things.  In the
+    # future, we'll actually invert this and use the 3-component items
+    # elsewhere, and stop using these.
+    
+    # Note that we pass in _ptype here so that it's defined inside the closure.
+    def _get_coord_funcs(axi, _ptype):
+        def _particle_velocity(field, data):
+            return data[_ptype, vel_name][:,axi]
+        def _particle_position(field, data):
+            return data[_ptype, coord_name][:,axi]
+        return _particle_velocity, _particle_position
+    for axi, ax in enumerate("xyz"):
+        v, p = _get_coord_funcs(axi, ptype)
+        registry.add_field((ptype, "particle_velocity_%s" % ax),
+            particle_type = True, function = v)
+        registry.add_field((ptype, "particle_position_%s" % ax),
+            particle_type = True, function = p)
+
+def particle_vector_functions(ptype, coord_names, vel_names, registry):
+
+    # This will column_stack a set of scalars to create vector fields.
+
+    def _get_vec_func(_ptype, names):
+        def particle_vectors(field, data):
+            return np.column_stack([data[_ptype, name] for name in names])
+        return particle_vectors
+    registry.add_field((ptype, "Coordinates"),
+                       function=_get_vec_func(ptype, coord_names),
+                       particle_type=True)
+    registry.add_field((ptype, "Velocities"),
+                       function=_get_vec_func(ptype, vel_names),
+                       particle_type=True)


https://bitbucket.org/yt_analysis/yt/commits/27f5b715b5cf/
Changeset:   27f5b715b5cf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 17:09:29
Summary:     This converts Enzo to use the particle field registry.

Note that I also change a couple of the ways fields are detected, so that
fields which are found in the HDF5 files but are in fact particle fields will
(I believe) be correctly identified as such.  We now also explicitly add
("all", "whatever") fields.
Affected #:  2 files

diff -r a4bbc055e03edf2c8eccdf0988b7c463a27656e9 -r 27f5b715b5cf07575fe4c42bef402e9f9039f798 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -491,7 +491,13 @@
         else:
             field_list = None
         field_list = self.comm.mpi_bcast(field_list)
-        self.field_list = list(field_list)
+        self.field_list = []
+        # Now we will, avoiding the problem of particle types not having names.
+        for field in field_list:
+            if ("all", field) in KnownEnzoFields:
+                self.field_list.append(("all", field))
+            else:
+                self.field_list.append(field)
 
     def _generate_random_grids(self):
         if self.num_grids > 40:

diff -r a4bbc055e03edf2c8eccdf0988b7c463a27656e9 -r 27f5b715b5cf07575fe4c42bef402e9f9039f798 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -36,6 +36,9 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
 from yt.utilities.physical_constants import \
     mh, \
     mass_sun_cgs
@@ -528,11 +531,11 @@
 
 for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
-    add_enzo_field("particle_%s" % pf, NullFunc, particle_type=True)
+    add_enzo_field(("all", "particle_%s" % pf), NullFunc, particle_type=True)
     
 def _convRetainInt(data):
     return 1
-add_enzo_field("particle_index", function=NullFunc,
+add_enzo_field(("all", "particle_index"), function=NullFunc,
           particle_type=True, convert_function=_convRetainInt)
 
 def _get_vel_convert(ax):
@@ -542,11 +545,11 @@
 for ax in 'xyz':
     pf = "particle_velocity_%s" % ax
     cfunc = _get_vel_convert(ax)
-    add_enzo_field(pf, function=NullFunc, convert_function=cfunc,
+    add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
 for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    add_enzo_field(pf, function=NullFunc,
+    add_enzo_field(("all", pf), function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
 add_field("particle_mass", function=NullFunc, particle_type=True)
@@ -661,51 +664,8 @@
                function=TranslationFunc(("CenOstriker","position_%s" % ax)),
                particle_type = True)
 
-def particle_count(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, method = "count")
-    return d
-EnzoFieldInfo.add_field(("deposit", "%s_count" % "all"),
-         function = particle_count,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Count}" % "all",
-         projection_conversion = '1')
-
-def particle_mass(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, [data["ParticleMass"]], method = "sum")
-    return d
-
-EnzoFieldInfo.add_field(("deposit", "%s_mass" % "all"),
-         function = particle_mass,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Mass}" % "all",
-         units = r"\mathrm{g}",
-         projected_units = r"\mathrm{g}\/\mathrm{cm}",
-         projection_conversion = 'cm')
-
-def particle_density(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, [data["ParticleMass"]], method = "sum")
-    d /= data["CellVolume"]
-    return d
-
-EnzoFieldInfo.add_field(("deposit", "%s_density" % "all"),
-         function = particle_density,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Density}" % "all",
-         units = r"\mathrm{g}/\mathrm{cm}^{3}",
-         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-         projection_conversion = 'cm')
-
-def particle_cic(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, [data["ParticleMass"]], method="cic")
-    d /= data["CellVolume"]
-    return d
-add_field(("deposit", "all_cic"), function=particle_cic,
-          validators=[ValidateSpatial()],
-          display_name = "\\mathrm{All CIC Density}",
-          units = r"\mathrm{g}/\mathrm{cm}^{3}",
-          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-          projection_conversion = 'cm')
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
+                          EnzoFieldInfo)
+particle_deposition_functions("all", "Coordinates", "ParticleMass",
+                               EnzoFieldInfo)


https://bitbucket.org/yt_analysis/yt/commits/358f9ca48941/
Changeset:   358f9ca48941
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 17:10:14
Summary:     Convert RAMSES to use ("all", "field_name") for particle fields and the
particle field registry.
Affected #:  3 files

diff -r 27f5b715b5cf07575fe4c42bef402e9f9039f798 -r 358f9ca489417c9a0800ec302e6729be9ee34c76 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -152,8 +152,8 @@
         _pfields = {}
         for field, vtype in particle_fields:
             if f.tell() >= flen: break
-            field_offsets[field] = f.tell()
-            _pfields[field] = vtype
+            field_offsets["all", field] = f.tell()
+            _pfields["all", field] = vtype
             fpu.skip(f, 1)
         self.particle_field_offsets = field_offsets
         self.particle_field_types = _pfields

diff -r 27f5b715b5cf07575fe4c42bef402e9f9039f798 -r 358f9ca489417c9a0800ec302e6729be9ee34c76 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -34,10 +34,14 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
-    mass_sun_cgs
+    mass_sun_cgs, \
+    mh
 import numpy as np
 
 RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "RFI")
@@ -106,29 +110,19 @@
 ]
 
 for f in known_ramses_particle_fields:
-    if f not in KnownRAMSESFields:
-        add_ramses_field(f, function=NullFunc, take_log=True,
-                  validators = [ValidateDataField(f)],
-                  particle_type = True)
+    add_ramses_field(("all", f), function=NullFunc, take_log=True,
+              particle_type = True)
 
 for ax in 'xyz':
-    KnownRAMSESFields["particle_velocity_%s" % ax]._convert_function = \
+    KnownRAMSESFields["all", "particle_velocity_%s" % ax]._convert_function = \
         _convertVelocity
 
 def _convertParticleMass(data):
     return data.convert("mass")
 
-KnownRAMSESFields["particle_mass"]._convert_function = \
+KnownRAMSESFields["all", "particle_mass"]._convert_function = \
         _convertParticleMass
-KnownRAMSESFields["particle_mass"]._units = r"\mathrm{g}"
-
-def _convertParticleMassMsun(data):
-    return 1.0/mass_sun_cgs
-add_field("ParticleMass", function=TranslationFunc("particle_mass"), 
-          particle_type=True)
-add_field("ParticleMassMsun",
-          function=TranslationFunc("particle_mass"), 
-          particle_type=True, convert_function=_convertParticleMassMsun)
+KnownRAMSESFields["all", "particle_mass"]._units = r"\mathrm{g}"
 
 def _Temperature(field, data):
     rv = data["Pressure"]/data["Density"]
@@ -136,49 +130,6 @@
     return rv
 add_field("Temperature", function=_Temperature, units=r"\rm{K}")
 
-
-# We now set up a couple particle fields.  This should eventually be abstracted
-# into a single particle field function that adds them all on and is used
-# across frontends, but that will need to wait until moving to using
-# Coordinates, or vector fields.
-
-def particle_count(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, method = "count")
-    return d
-RAMSESFieldInfo.add_field(("deposit", "%s_count" % "all"),
-         function = particle_count,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Count}" % "all",
-         projection_conversion = '1')
-
-def particle_mass(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, [data["ParticleMass"]], method = "sum")
-    return d
-
-RAMSESFieldInfo.add_field(("deposit", "%s_mass" % "all"),
-         function = particle_mass,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Mass}" % "all",
-         units = r"\mathrm{g}",
-         projected_units = r"\mathrm{g}\/\mathrm{cm}",
-         projection_conversion = 'cm')
-
-def particle_density(field, data):
-    pos = np.column_stack([data["particle_position_%s" % ax] for ax in 'xyz'])
-    d = data.deposit(pos, [data["ParticleMass"]], method = "sum")
-    d /= data["CellVolume"]
-    return d
-
-RAMSESFieldInfo.add_field(("deposit", "%s_density" % "all"),
-         function = particle_density,
-         validators = [ValidateSpatial()],
-         display_name = "\\mathrm{%s Density}" % "all",
-         units = r"\mathrm{g}/\mathrm{cm}^{3}",
-         projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
-         projection_conversion = 'cm')
-
 # We'll add a bunch of species fields here.  In the not too distant future,
 # we'll be moving all of these to a unified field location, so they can be
 # shared between various frontends.
@@ -247,3 +198,10 @@
                   function=_SpeciesNumberDensity,
                   convert_function=_ConvertNumberDensity,
                   validators=ValidateDataField("%s_Density" % species))
+
+# PARTICLE FIELDS
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
+                          RAMSESFieldInfo)
+particle_deposition_functions("all", "Coordinates", "particle_mass",
+                               RAMSESFieldInfo)

diff -r 27f5b715b5cf07575fe4c42bef402e9f9039f798 -r 358f9ca489417c9a0800ec302e6729be9ee34c76 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -86,7 +86,7 @@
                 for field in fields:
                     ti = selection.pop(field)[mask]
                     if field not in tr:
-                        dt = subset.domain.particle_field_types[field[1]]
+                        dt = subset.domain.particle_field_types[field]
                         tr[field] = np.empty(size, dt)
                     tr[field][pos:pos+count] = ti
                 pos += count
@@ -98,7 +98,7 @@
         tr = {}
         #for field in sorted(fields, key=lambda a:foffsets[a]):
         for field in fields:
-            f.seek(foffsets[field[1]])
-            dt = subset.domain.particle_field_types[field[1]]
+            f.seek(foffsets[field])
+            dt = subset.domain.particle_field_types[field]
             tr[field] = fpu.read_vector(f, dt)
         return tr


https://bitbucket.org/yt_analysis/yt/commits/00a361746464/
Changeset:   00a361746464
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 17:10:28
Summary:     Convert SPH to use the particle field registry.
Affected #:  1 file

diff -r 358f9ca489417c9a0800ec302e6729be9ee34c76 -r 00a361746464f7adf59927eb04fbbc49bb145d45 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -37,6 +37,9 @@
     NullFunc, \
     TranslationFunc
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_scalar_functions
 from yt.utilities.physical_constants import \
     mass_sun_cgs
 
@@ -58,98 +61,6 @@
 KnownTipsyFields = FieldInfoContainer()
 add_tipsy_field = KnownTipsyFields.add_field
 
-def _particle_functions(ptype, coord_name, mass_name, registry):
-    def particle_count(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, method = "count")
-        return d
-    registry.add_field(("deposit", "%s_count" % ptype),
-             function = particle_count,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype,
-             projection_conversion = '1')
-
-    def particle_mass(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    registry.add_field(("deposit", "%s_mass" % ptype),
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
-
-    def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["CellVolume"]
-        return d
-
-    registry.add_field(("deposit", "%s_density" % ptype),
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-             projection_conversion = 'cm')
-
-    def particle_cic(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
-        d /= data["CellVolume"]
-        return d
-
-    registry.add_field(("deposit", "%s_cic" % ptype),
-             function = particle_cic,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
-             projection_conversion = 'cm')
-
-    # Now some translation functions.
-
-    registry.add_field((ptype, "ParticleMass"),
-            function = TranslationFunc((ptype, mass_name)),
-            particle_type = True,
-            units = r"\mathrm{g}")
-
-    def _ParticleMassMsun(field, data):
-        return data[ptype, mass_name].copy()
-    def _conv_Msun(data):
-        return 1.0/mass_sun_cgs
-
-    registry.add_field((ptype, "ParticleMassMsun"),
-            function = _ParticleMassMsun,
-            convert_function = _conv_Msun,
-            particle_type = True,
-            units = r"\mathrm{M}_\odot")
-
-    # For 'all', which is a special field, we skip adding a few types.
-    
-    if ptype == "all": return
-
-    # Now we have to set up the various velocity and coordinate things.  In the
-    # future, we'll actually invert this and use the 3-component items
-    # elsewhere, and stop using these.
-    
-    # Note that we pass in _ptype here so that it's defined inside the closure.
-    def _get_coord_funcs(axi, _ptype):
-        def _particle_velocity(field, data):
-            return data[_ptype, "Velocities"][:,axi]
-        def _particle_position(field, data):
-            return data[_ptype, "Coordinates"][:,axi]
-        return _particle_velocity, _particle_position
-    for axi, ax in enumerate("xyz"):
-        v, p = _get_coord_funcs(axi, ptype)
-        registry.add_field((ptype, "particle_velocity_%s" % ax),
-            particle_type = True, function = v)
-        registry.add_field((ptype, "particle_position_%s" % ax),
-            particle_type = True, function = p)
-
 # Here are helper functions for things like vector fields and so on.
 
 def _get_conv(cf):
@@ -191,8 +102,11 @@
         units = r"\mathrm{cm}/\mathrm{s}")
     # Note that we have to do this last so that TranslationFunc operates
     # correctly.
-    _particle_functions(ptype, "Coordinates", "Mass", TipsyFieldInfo)
-_particle_functions("all", "Coordinates", "Mass", TipsyFieldInfo)
+    particle_deposition_functions(ptype, "Coordinates", "Mass",
+                                  TipsyFieldInfo)
+    particle_scalar_functions(ptype, "Coordinates", "Velocities",
+                              TipsyFieldInfo)
+particle_deposition_functions("all", "Coordinates", "Mass", TipsyFieldInfo)
 
 for fname in ["Coordinates", "Velocities", "ParticleIDs", "Mass",
               "Epsilon", "Phi"]:
@@ -246,10 +160,11 @@
         particle_type = True,
         convert_function=_get_conv("velocity"),
         units = r"\mathrm{cm}/\mathrm{s}")
-    _particle_functions(ptype, "Coordinates", "Mass", GadgetFieldInfo)
+    particle_deposition_functions(ptype, "Coordinates", "Mass", GadgetFieldInfo)
+    particle_scalar_functions(ptype, "Coordinates", "Velocities", GadgetFieldInfo)
     KnownGadgetFields.add_field((ptype, "Coordinates"), function=NullFunc,
         particle_type = True)
-_particle_functions("all", "Coordinates", "Mass", GadgetFieldInfo)
+particle_deposition_functions("all", "Coordinates", "Mass", GadgetFieldInfo)
 
 # Now we have to manually apply the splits for "all", since we don't want to
 # use the splits defined above.
@@ -304,10 +219,11 @@
         particle_type = True,
         convert_function=_get_conv("velocity"),
         units = r"\mathrm{cm}/\mathrm{s}")
-    _particle_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    particle_deposition_functions(ptype, "Coordinates", "Mass", OWLSFieldInfo)
+    particle_scalar_functions(ptype, "Coordinates", "Velocities", OWLSFieldInfo)
     KnownOWLSFields.add_field((ptype, "Coordinates"), function=NullFunc,
         particle_type = True)
-_particle_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
+particle_deposition_functions("all", "Coordinates", "Mass", OWLSFieldInfo)
 
 # Now we have to manually apply the splits for "all", since we don't want to
 # use the splits defined above.


https://bitbucket.org/yt_analysis/yt/commits/bf2d02dc1963/
Changeset:   bf2d02dc1963
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 17:14:56
Summary:     Missed mh import in RAMSES fields.
Affected #:  1 file

diff -r 3565d03875fed7118299145e6c2b9480a7e90ecb -r bf2d02dc1963e41d795e85f594949619fed44101 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -37,7 +37,8 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
-    mass_sun_cgs
+    mass_sun_cgs, \
+    mh
 import numpy as np
 
 RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "RFI")


https://bitbucket.org/yt_analysis/yt/commits/1547cd15d3f4/
Changeset:   1547cd15d3f4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 19:56:27
Summary:     Adding first pass at parallel_ring iterator.
Affected #:  1 file

diff -r 00a361746464f7adf59927eb04fbbc49bb145d45 -r 1547cd15d3f47456071fcd44d62b61adec86338a yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -458,6 +458,44 @@
     if barrier:
         my_communicator.barrier()
 
+def parallel_ring(objects, generator_func, mutable = False):
+    # Mutable governs whether or not we will be modifying the arrays, or
+    # whether they are identical to generator_func(obj)
+    if mutable: raise NotImplementedError
+    my_comm = communication_system.communicators[-1]
+    my_size = my_comm.size
+    my_rank = my_comm.rank # This will also be the first object we access
+    if not parallel_capable and not mutable:
+        for obj in objects:
+            yield generator_func(obj)
+        return
+    if len(objects) != my_size or mutable:
+        raise NotImplementedError
+    generate_endpoints = len(objects) > my_size
+    # Now we need to do pairwise sends
+    source = (my_rank - 1) % my_size
+    dest = (my_rank + 1) % my_size
+    oiter = itertools.islice(itertools.cycle(objects),
+                             my_rank, my_rank+len(objects))
+    idata = None
+    isize = np.zeros((1,), dtype="int64")
+    osize = np.zeros((1,), dtype="int64")
+    for obj in oiter:
+        if generate_endpoints and my_rank in (0, my_size) or idata is None:
+            idata = generator_func(obj)
+        yield obj, idata
+        # We first send to the previous processor
+        osize[0] = idata.size
+        t1 = my_comm.mpi_nonblocking_recv(isize, source)
+        t2 = my_comm.mpi_nonblocking_send(osize, dest)
+        my_comm.mpi_Request_Waitall([t1, t2])
+        odata = idata
+        idata = np.empty(isize[0], dtype=odata.dtype)
+        t3 = my_comm.mpi_nonblocking_send(odata, dest, dtype=odata.dtype)
+        t4 = my_comm.mpi_nonblocking_recv(idata, source, dtype=odata.dtype)
+        my_comm.mpi_Request_Waitall([t3, t4])
+        del odata
+
 class CommunicationSystem(object):
     communicators = []
 


https://bitbucket.org/yt_analysis/yt/commits/29f50d773524/
Changeset:   29f50d773524
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 20:08:45
Summary:     Enable parallel_ring to use non-standard dtypes.
Affected #:  1 file

diff -r 1547cd15d3f47456071fcd44d62b61adec86338a -r 29f50d773524887654280e1b0f6cfa572bd71c35 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -60,7 +60,8 @@
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
             int32   = MPI.INT,
-            int64   = MPI.LONG
+            int64   = MPI.LONG,
+            c       = MPI.CHAR,
     )
     op_names = dict(
         sum = MPI.SUM,
@@ -73,7 +74,8 @@
             float32 = "MPI.FLOAT",
             float64 = "MPI.DOUBLE",
             int32   = "MPI.INT",
-            int64   = "MPI.LONG"
+            int64   = "MPI.LONG",
+            c       = "MPI.CHAR",
     )
     op_names = dict(
             sum = "MPI.SUM",
@@ -483,6 +485,10 @@
     for obj in oiter:
         if generate_endpoints and my_rank in (0, my_size) or idata is None:
             idata = generator_func(obj)
+            idtype = odtype = get_mpi_type(idata.dtype)
+            if idtype is None:
+                idtype = 'c'
+                odtype = idata.dtype
         yield obj, idata
         # We first send to the previous processor
         osize[0] = idata.size
@@ -490,9 +496,9 @@
         t2 = my_comm.mpi_nonblocking_send(osize, dest)
         my_comm.mpi_Request_Waitall([t1, t2])
         odata = idata
-        idata = np.empty(isize[0], dtype=odata.dtype)
-        t3 = my_comm.mpi_nonblocking_send(odata, dest, dtype=odata.dtype)
-        t4 = my_comm.mpi_nonblocking_recv(idata, source, dtype=odata.dtype)
+        idata = np.empty(isize[0], dtype=odtype)
+        t3 = my_comm.mpi_nonblocking_send(odata.view(idtype), dest, dtype=idtype)
+        t4 = my_comm.mpi_nonblocking_recv(idata.view(idtype), source, dtype=idtype)
         my_comm.mpi_Request_Waitall([t3, t4])
         del odata
 


https://bitbucket.org/yt_analysis/yt/commits/5c72cf16a741/
Changeset:   5c72cf16a741
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 20:14:22
Summary:     Few more dtype check fixes.
Affected #:  1 file

diff -r 29f50d773524887654280e1b0f6cfa572bd71c35 -r 5c72cf16a741c271f1bc357390d227d573dc5e0e yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -485,10 +485,9 @@
     for obj in oiter:
         if generate_endpoints and my_rank in (0, my_size) or idata is None:
             idata = generator_func(obj)
-            idtype = odtype = get_mpi_type(idata.dtype)
-            if idtype is None:
+            idtype = odtype = idata.dtype
+            if get_mpi_type(idtype) is None:
                 idtype = 'c'
-                odtype = idata.dtype
         yield obj, idata
         # We first send to the previous processor
         osize[0] = idata.size


https://bitbucket.org/yt_analysis/yt/commits/9e49ef42526e/
Changeset:   9e49ef42526e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 20:36:58
Summary:     Implementing non-mutable storage for nobjects != my_rank.
Affected #:  1 file

diff -r 5c72cf16a741c271f1bc357390d227d573dc5e0e -r 9e49ef42526eac7ee233c6161996328219a9f4a9 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -471,9 +471,11 @@
         for obj in objects:
             yield generator_func(obj)
         return
-    if len(objects) != my_size or mutable:
+    generate_endpoints = len(objects) != my_size
+    if generate_endpoints and mutable:
         raise NotImplementedError
-    generate_endpoints = len(objects) > my_size
+    gforw = generate_endpoints and my_rank == 0
+    gback = generate_endpoints and my_rank == my_size - 1
     # Now we need to do pairwise sends
     source = (my_rank - 1) % my_size
     dest = (my_rank + 1) % my_size
@@ -483,22 +485,30 @@
     isize = np.zeros((1,), dtype="int64")
     osize = np.zeros((1,), dtype="int64")
     for obj in oiter:
-        if generate_endpoints and my_rank in (0, my_size) or idata is None:
+        if idata is None or gforw:
             idata = generator_func(obj)
             idtype = odtype = idata.dtype
             if get_mpi_type(idtype) is None:
                 idtype = 'c'
         yield obj, idata
         # We first send to the previous processor
-        osize[0] = idata.size
-        t1 = my_comm.mpi_nonblocking_recv(isize, source)
-        t2 = my_comm.mpi_nonblocking_send(osize, dest)
-        my_comm.mpi_Request_Waitall([t1, t2])
+        tags = []
+        if not gforw:
+            tags.append(my_comm.mpi_nonblocking_recv(isize, source))
+        if not gback:
+            osize[0] = idata.size
+            tags.append(my_comm.mpi_nonblocking_send(osize, dest))
+        my_comm.mpi_Request_Waitall(tags)
         odata = idata
-        idata = np.empty(isize[0], dtype=odtype)
-        t3 = my_comm.mpi_nonblocking_send(odata.view(idtype), dest, dtype=idtype)
-        t4 = my_comm.mpi_nonblocking_recv(idata.view(idtype), source, dtype=idtype)
-        my_comm.mpi_Request_Waitall([t3, t4])
+        tags = []
+        if not gforw:
+            idata = np.empty(isize[0], dtype=odtype)
+            tags.append(my_comm.mpi_nonblocking_recv(
+                          idata.view(idtype), source, dtype=idtype))
+        if not gback:
+            tags.append(my_comm.mpi_nonblocking_send(
+                          odata.view(idtype), dest, dtype=idtype))
+        my_comm.mpi_Request_Waitall(tags)
         del odata
 
 class CommunicationSystem(object):


https://bitbucket.org/yt_analysis/yt/commits/b8b862125837/
Changeset:   b8b862125837
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-11 20:43:18
Summary:     Adding a docstring to parallel_ring and fixing a bug in non parallel calls.
Affected #:  1 file

diff -r 9e49ef42526eac7ee233c6161996328219a9f4a9 -r b8b8621258376d8a8e694fb58358f5d381639382 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -461,15 +461,65 @@
         my_communicator.barrier()
 
 def parallel_ring(objects, generator_func, mutable = False):
-    # Mutable governs whether or not we will be modifying the arrays, or
-    # whether they are identical to generator_func(obj)
+    r"""This function loops in a ring around a set of objects, yielding the
+    results of generator_func and passing from one processor to another to
+    avoid IO or expensive computation.
+
+    This function is designed to operate in sequence on a set of objects, where
+    the creation of those objects might be expensive.  For instance, this could
+    be a set of particles that are costly to read from disk.  Processor N will
+    run generator_func on an object, and the results of that will both be
+    yielded and passed to processor N-1.  If the length of the objects is not
+    equal to the number of processors, then the final processor in the top
+    communicator will re-generate the data as needed.
+
+    In all likelihood, this function will only be useful internally to yt.
+
+    Parameters
+    ----------
+    objects : iterable
+        The list of objects to operate on.
+    generator_func : callable
+        This function will be called on each object, and the results yielded.
+        It must return a single NumPy array; for multiple values, it needs to
+        have a custom dtype.
+    mutable : bool
+        Should the arrays be considered mutable?  Currently, this will only
+        work if the number of processors equals the number of objects.
+    dynamic : bool
+        This governs whether or not dynamic load balancing will be enabled.
+        This requires one dedicated processor; if this is enabled with a set of
+        128 processors available, only 127 will be available to iterate over
+        objects as one will be load balancing the rest.
+
+
+    Examples
+    --------
+    Here is a simple example of a ring loop around a set of integers, with a
+    custom dtype.
+
+    >>> dt = numpy.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
+    >>> def gfunc(o):
+    ...     numpy.random.seed(o)
+    ...     rv = np.empty(1000, dtype=dt)
+    ...     rv['x'] = numpy.random.random(1000)
+    ...     rv['y'] = numpy.random.random(1000)
+    ...     rv['z'] = numpy.random.random(1000)
+    ...     return rv
+    ...
+    >>> obj = range(8)
+    >>> for obj, arr in parallel_ring(obj, gfunc):
+    ...     print arr['x'].sum(), arr['y'].sum(), arr['z'].sum()
+    ...
+
+    """
     if mutable: raise NotImplementedError
     my_comm = communication_system.communicators[-1]
     my_size = my_comm.size
     my_rank = my_comm.rank # This will also be the first object we access
     if not parallel_capable and not mutable:
         for obj in objects:
-            yield generator_func(obj)
+            yield obj, generator_func(obj)
         return
     generate_endpoints = len(objects) != my_size
     if generate_endpoints and mutable:


https://bitbucket.org/yt_analysis/yt/commits/d07242c7c90b/
Changeset:   d07242c7c90b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-12 00:17:16
Summary:     Adding a better comment and fixing a typo.
Affected #:  1 file

diff -r 00a361746464f7adf59927eb04fbbc49bb145d45 -r d07242c7c90beb454362e7779ce2ae0790f7a69b yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -492,7 +492,11 @@
             field_list = None
         field_list = self.comm.mpi_bcast(field_list)
         self.field_list = []
-        # Now we will, avoiding the problem of particle types not having names.
+        # Now we will iterate over all fields, trying to avoid the problem of
+        # particle types not having names.  This should convert all known
+        # particle fields that exist in Enzo outputs into the construction
+        # ("all", field) and should not otherwise affect ActiveParticle
+        # simulations.
         for field in field_list:
             if ("all", field) in KnownEnzoFields:
                 self.field_list.append(("all", field))


https://bitbucket.org/yt_analysis/yt/commits/61b473b27a55/
Changeset:   61b473b27a55
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-12 04:30:31
Summary:     Adding Morton indices function.
Affected #:  1 file

diff -r b8b8621258376d8a8e694fb58358f5d381639382 -r 61b473b27a558774070d4e00a450a2492c6e1398 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -300,6 +300,40 @@
             positions[i, j] = p[j]
     return positions
 
+cdef np.uint64_t _const20 = 0x000001FFC00003FF
+cdef np.uint64_t _const10 = 0x0007E007C00F801F
+cdef np.uint64_t _const04 = 0x00786070C0E181C3
+cdef np.uint64_t _const2a = 0x0199219243248649
+cdef np.uint64_t _const2b = 0x0649249249249249
+cdef np.uint64_t _const2c = 0x1249249249249249
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef inline np.uint64_t spread_bits(np.uint64_t x):
+    # This magic comes from http://stackoverflow.com/questions/1024754/how-to-compute-a-3d-morton-number-interleave-the-bits-of-3-ints
+    x=(x|(x<<20))&_const20
+    x=(x|(x<<10))&_const10
+    x=(x|(x<<4))&_const04
+    x=(x|(x<<2))&_const2a
+    x=(x|(x<<2))&_const2b
+    x=(x|(x<<2))&_const2c
+    return x
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def get_morton_indices(np.ndarray[np.int64_t, ndim=2] left_index):
+    cdef np.int64_t i, mi
+    cdef np.ndarray[np.uint64_t, ndim=1] morton_indices
+    morton_indices = np.zeros(left_index.shape[0], 'uint64')
+    for i in range(left_index.shape[0]):
+        mi = 0
+        mi |= spread_bits(left_index[i,0])<<0
+        mi |= spread_bits(left_index[i,1])<<1
+        mi |= spread_bits(left_index[i,2])<<2
+        morton_indices[i] = mi
+    return morton_indices
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/67ee5eb29734/
Changeset:   67ee5eb29734
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-12 22:15:18
Summary:     Removing ParticleArrays object.  Convert to using Morton Indices for ParticleOctrees.

This is a major change, and one that is not fully baked; additional changes
will need to be made to the particle frontends themselves, where we need to
convert to generating the Morton index, using a Barnes-Hut type indexing for
the particles themselves, and then doing IO in a different way based on that.

The octree generated for the OWLS dataset is identical to the one generated
previously.
Affected #:  2 files

diff -r 61b473b27a558774070d4e00a450a2492c6e1398 -r 67ee5eb297349e6e874cac0db007020dfa1aab08 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -26,8 +26,6 @@
 cimport numpy as np
 from fp_utils cimport *
 
-cdef struct ParticleArrays
-
 cdef struct Oct
 cdef struct Oct:
     np.int64_t file_ind     # index with respect to the order in which it was
@@ -39,7 +37,6 @@
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
-    ParticleArrays *sd
     Oct *children[2][2][2]
     Oct *parent
 
@@ -73,9 +70,3 @@
     cdef OctAllocationContainer **domains
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
-
-cdef struct ParticleArrays:
-    Oct *oct
-    ParticleArrays *next
-    np.float64_t **pos
-    np.int64_t np

diff -r 61b473b27a558774070d4e00a450a2492c6e1398 -r 67ee5eb297349e6e874cac0db007020dfa1aab08 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -33,6 +33,8 @@
 from selection_routines cimport SelectorObject
 cimport cython
 
+DEF ORDER_MAX=20
+
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -815,11 +817,6 @@
     elif o1.domain > o2.domain: return 1
 
 cdef class ParticleOctreeContainer(OctreeContainer):
-    #Each ParticleArrays contains an Oct
-    #a reference to the next ParticleArrays
-    #its index and the number of particles 
-    cdef ParticleArrays *first_sd
-    cdef ParticleArrays *last_sd
     cdef Oct** oct_list
     #The starting oct index of each domain
     cdef np.int64_t *dom_offsets 
@@ -860,11 +857,6 @@
                 for k in range(2):
                     if o.children[i][j][k] == NULL: continue
                     self.visit_free(o.children[i][j][k])
-        if o.sd.np >= 0:
-            if o.sd.pos != NULL:
-                for i in range(3):
-                    free(o.sd.pos[i])
-                free(o.sd.pos)
         free(o)
 
     def __iter__(self):
@@ -984,36 +976,29 @@
         #every domain
         cdef int max_level = 0
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
-        cdef np.int64_t i = 0
-        cdef np.int64_t dom_ind
-        cdef ParticleArrays *c = self.first_sd
-        while c != NULL:
-            self.oct_list[i] = c.oct
-            max_level = imax(max_level, c.oct.level)
-            if c.np >= 0:
-                for j in range(3):
-                    free(c.pos[j])
-                free(c.pos)
-                c.pos = NULL
-            c = c.next
-            i += 1
+        cdef np.int64_t i = 0, lpos = 0
         self.max_level = max_level
-        qsort(self.oct_list, self.nocts, sizeof(Oct*), &compare_octs)
         cdef int cur_dom = -1
         # We always need at least 2, and if max_domain is 0, we need 3.
-        self.dom_offsets = <np.int64_t *>malloc(sizeof(np.int64_t) *
-                                                (self.max_domain + 3))
-        self.dom_offsets[0] = 0
-        dom_ind = 0
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    self.visit_assign(self.root_mesh[i][j][k], &lpos)
+        assert(lpos == self.nocts)
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
-            self.oct_list[i].file_ind = dom_ind
-            dom_ind += 1
-            if self.oct_list[i].domain > cur_dom:
-                cur_dom = self.oct_list[i].domain
-                self.dom_offsets[cur_dom + 1] = i
-                dom_ind = 0
-        self.dom_offsets[cur_dom + 2] = self.nocts
+            self.oct_list[i].file_ind = -1
+
+    cdef visit_assign(self, Oct *o, np.int64_t *lpos):
+        cdef int i, j, k
+        self.oct_list[lpos[0]] = o
+        lpos[0] += 1
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] != NULL:
+                        self.visit_assign(o.children[i][j][k], lpos)
+        return
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return self.dom_offsets[domain_id + 1]
@@ -1024,45 +1009,19 @@
         #track of how many are used with np initially 0
         self.nocts += 1
         cdef Oct *my_oct = <Oct*> malloc(sizeof(Oct))
-        cdef ParticleArrays *sd = <ParticleArrays*> \
-            malloc(sizeof(ParticleArrays))
         cdef int i, j, k
-        my_oct.file_ind = my_oct.domain = -1
+        my_oct.domain = -1
+        my_oct.file_ind = 0
         my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
-        my_oct.sd = sd
         for i in range(2):
             for j in range(2):
                 for k in range(2):
                     my_oct.children[i][j][k] = NULL
         my_oct.parent = NULL
-        if self.first_sd == NULL:
-            self.first_sd = sd
-        if self.last_sd != NULL:
-            self.last_sd.next = sd
-        self.last_sd = sd
-        sd.oct = my_oct
-        sd.next = NULL
-        sd.pos = <np.float64_t **> malloc(sizeof(np.float64_t*) * 3)
-        for i in range(3):
-            sd.pos[i] = <np.float64_t *> malloc(sizeof(np.float64_t) * self.n_ref)
-        for i in range(self.n_ref):
-            sd.pos[0][i] = sd.pos[1][i] = sd.pos[2][i] = 0.0
-        sd.np = 0
         return my_oct
 
-    def linearly_count(self):
-        #Without visiting oct and cells
-        #jump from particle arrays to the next one
-        #counting the total # of particles en route
-        cdef np.int64_t total = 0
-        cdef ParticleArrays *c = self.first_sd
-        while c != NULL:
-            total += 1
-            c = c.next
-        return total
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -1083,105 +1042,73 @@
                 level_count[o.level] += 1
         return level_count
 
-    def add(self, np.ndarray[np.float64_t, ndim=2] pos, np.int64_t domain_id):
+    def add(self, np.ndarray[np.uint64_t, ndim=1] indices, np.int64_t domain_id):
         #Add this particle to the root oct
         #Then if that oct has children, add it to them recursively
         #If the child needs to be refined because of max particles, do so
-        cdef int no = pos.shape[0]
-        cdef int p, i, level
-        cdef np.float64_t dds[3], cp[3], pp[3]
-        cdef int ind[3]
-        self.max_domain = max(self.max_domain, domain_id)
-        cdef int mid, mad
+        cdef np.int64_t no = indices.shape[0], p, index
+        cdef int i, level, ind[3]
         if self.root_mesh[0][0][0] == NULL: self.allocate_root()
+        cdef np.uint64_t *data = <np.uint64_t *> indices.data
         for p in range(no):
+            # We have morton indices, which means we choose left and right by
+            # looking at (MAX_ORDER - level) & with the values 1, 2, 8.
             level = 0
+            index = indices[p]
             for i in range(3):
-                #PP Calculate the unitary position, 
-                #DDS Domain dimensions
-                #IND Corresponding integer index on the root octs
-                #CP Center  point of that oct
-                pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-                ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-                cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+                ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
+                assert(ind[i] < self.nn[i])
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
             if cur == NULL:
                 raise RuntimeError
-            if self._check_refine(cur, cp, domain_id) == 1:
-                self.refine_oct(cur, cp)
-            while cur.sd.np < 0:
-                if level > 100:
-                    raise RuntimeError
+            while cur.file_ind >= self.n_ref:
+                if level >= ORDER_MAX: break # Just dump it here.
+                level += 1
                 for i in range(3):
-                    dds[i] = dds[i] / 2.0
-                    if cp[i] > pp[i]:
-                        ind[i] = 0
-                        cp[i] -= dds[i] / 2.0
-                    else:
-                        ind[i] = 1
-                        cp[i] += dds[i]/2.0
+                    ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
+                if cur.children[ind[0]][ind[1]][ind[2]] == NULL:
+                    self.refine_oct(cur, data, p)
                 cur = cur.children[ind[0]][ind[1]][ind[2]]
-                level += 1
-                if self._check_refine(cur, cp, domain_id) == 1:
-                    self.refine_oct(cur, cp)
-            # Now we copy in our particle 
-            cur.level = level
-            for i in range(3):
-                cur.sd.pos[i][cur.sd.np] = pp[i]
-            cur.domain = domain_id
-            cur.sd.np += 1
+            cur.file_ind += 1
 
-    cdef int _check_refine(self, Oct *cur, np.float64_t cp[3], int domain_id):
-        #Answers: should we refine this oct?
-        #False if refined, 
-        #False if not refined, but doesn't need refinement
-        #True if particles need refinement, 
-        #True if not in domain
-        if cur.children[0][0][0] != NULL:
-            return 0
-        elif cur.sd.np == 0:
-            return 0
-        elif cur.sd.np >= self.n_ref:
-            return 1
-        elif cur.domain >= 0 and cur.domain != domain_id:
-            return 1
-        return 0
-
-    cdef void refine_oct(self, Oct *o, np.float64_t pos[3]):
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void refine_oct(self, Oct *o, np.uint64_t *data, np.int64_t p):
         #Allocate and initialize child octs
         #Attach particles to child octs
         #Remove particles from this oct entirely
         cdef int i, j, k, m, ind[3]
         cdef Oct *noct
+        cdef np.uint64_t prefix2
         for i in range(2):
             for j in range(2):
                 for k in range(2):
                     noct = self.allocate_oct()
                     noct.domain = o.domain
+                    noct.file_ind = 0
                     noct.level = o.level + 1
                     noct.pos[0] = (o.pos[0] << 1) + i
                     noct.pos[1] = (o.pos[1] << 1) + j
                     noct.pos[2] = (o.pos[2] << 1) + k
                     noct.parent = o
                     o.children[i][j][k] = noct
-        for m in range(o.sd.np):
-            for i in range(3):
-                if o.sd.pos[i][m] < pos[i]:
-                    ind[i] = 0
-                else:
-                    ind[i] = 1
-            noct = o.children[ind[0]][ind[1]][ind[2]]
-            k = noct.sd.np
-            for i in range(3):
-                noct.sd.pos[i][k] = o.sd.pos[i][m]
-            noct.domain = o.domain
-            noct.sd.np += 1
-        o.sd.np = -1
-        o.domain = -1
+        o.file_ind = self.n_ref + 1
         for i in range(3):
-            free(o.sd.pos[i])
-        free(o.sd.pos)
+            ind[i] = (data[p] >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
+        noct = o.children[ind[0]][ind[1]][ind[2]]
+        # Now we look at the last nref particles to decide where they go.
+        n = imin(p, self.n_ref)
+        cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0)
+        # Now we figure out our prefix, which is the oct address at this level.
+        # As long as we're actually in Morton order, we do not need to worry
+        # about *any* of the other children of the oct.
+        prefix1 = data[p] >> (ORDER_MAX - noct.level)*3
+        for i in range(n):
+            prefix2 = arr[i] >> (ORDER_MAX - noct.level)*3
+            if (prefix1 == prefix2):
+                noct.file_ind += 1
+        #print ind[0], ind[1], ind[2], noct.file_ind, noct.level
 
     def recursively_count(self):
         #Visit every cell, accumulate the # of cells per level
@@ -1220,7 +1147,7 @@
         for oi in range(self.nocts):
             m = 0
             o = self.oct_list[oi]
-            if o.sd.np <= 0 or o.domain == -1: continue
+            #if o.sd.np <= 0 or o.domain == -1: continue
             for i in range(8):
                 if mask[oi, i] == 1:
                     m = 1
@@ -1232,24 +1159,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_neighbor_particles(self, oppos):
-        #How many particles are in my neighborhood
-        cdef int i, ni, dl, tnp
-        cdef np.float64_t ppos[3]
-        for i in range(3):
-            ppos[i] = oppos[i]
-        cdef Oct *main = self.get(ppos)
-        cdef Oct* neighbors[27]
-        self.neighbors(main, neighbors)
-        tnp = 0
-        for i in range(27):
-            if neighbors[i].sd != NULL:
-                tnp += neighbors[i].sd.np
-        return tnp
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def count_cells(self, SelectorObject selector,
               np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
         #Count how many cells per level there are


https://bitbucket.org/yt_analysis/yt/commits/e77db2f9573c/
Changeset:   e77db2f9573c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-12 23:33:09
Summary:     Adding function to help with testing parallelism strategies.
Affected #:  1 file

diff -r 67ee5eb297349e6e874cac0db007020dfa1aab08 -r e77db2f9573cce9efe444e0d94a61190e7e769af yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -859,6 +859,23 @@
                     self.visit_free(o.children[i][j][k])
         free(o)
 
+    def clear_fileind(self):
+        cdef i, j, k
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    self.visit_clear(self.root_mesh[i][j][k])
+
+    cdef void visit_clear(self, Oct *o):
+        #Free the memory for this oct recursively
+        cdef int i, j, k
+        o.file_ind = 0
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] == NULL: continue
+                    self.visit_clear(o.children[i][j][k])
+
     def __iter__(self):
         #Get the next oct, will traverse domains
         #Note that oct containers can be sorted 


https://bitbucket.org/yt_analysis/yt/commits/528783b895c8/
Changeset:   528783b895c8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 03:24:41
Summary:     Minor optimizations.
Affected #:  1 file

diff -r e77db2f9573cce9efe444e0d94a61190e7e769af -r 528783b895c83b00ae9a515722e4a5c2edfef15d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1059,6 +1059,9 @@
                 level_count[o.level] += 1
         return level_count
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def add(self, np.ndarray[np.uint64_t, ndim=1] indices, np.int64_t domain_id):
         #Add this particle to the root oct
         #Then if that oct has children, add it to them recursively
@@ -1069,12 +1072,11 @@
         cdef np.uint64_t *data = <np.uint64_t *> indices.data
         for p in range(no):
             # We have morton indices, which means we choose left and right by
-            # looking at (MAX_ORDER - level) & with the values 1, 2, 8.
+            # looking at (MAX_ORDER - level) & with the values 1, 2, 4.
             level = 0
             index = indices[p]
             for i in range(3):
                 ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
-                assert(ind[i] < self.nn[i])
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
             if cur == NULL:
                 raise RuntimeError
@@ -1095,9 +1097,9 @@
         #Allocate and initialize child octs
         #Attach particles to child octs
         #Remove particles from this oct entirely
-        cdef int i, j, k, m, ind[3]
+        cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
-        cdef np.uint64_t prefix2
+        cdef np.uint64_t prefix1, prefix2
         for i in range(2):
             for j in range(2):
                 for k in range(2):


https://bitbucket.org/yt_analysis/yt/commits/960000d1afa9/
Changeset:   960000d1afa9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 03:55:04
Summary:     Resetting particle filtering lets this now work with NREF buffer particles.
Affected #:  1 file

diff -r 528783b895c83b00ae9a515722e4a5c2edfef15d -r 960000d1afa968b92400bb198f05d851416e8692 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1080,20 +1080,26 @@
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
             if cur == NULL:
                 raise RuntimeError
-            while cur.file_ind >= self.n_ref:
+            while (cur.file_ind + 1) > self.n_ref:
                 if level >= ORDER_MAX: break # Just dump it here.
                 level += 1
                 for i in range(3):
                     ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
                 if cur.children[ind[0]][ind[1]][ind[2]] == NULL:
-                    self.refine_oct(cur, data, p)
-                cur = cur.children[ind[0]][ind[1]][ind[2]]
+                    cur = self.refine_oct(cur, index)
+                    self.filter_particles(cur, data, p)
+                else:
+                    cur = cur.children[ind[0]][ind[1]][ind[2]]
+                    if domain_id > 0:
+                        cur.file_ind = 0
+                        self.filter_particles(cur, data, p)
+            if p >= self.n_ref: domain_id = 0
             cur.file_ind += 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void refine_oct(self, Oct *o, np.uint64_t *data, np.int64_t p):
+    cdef Oct *refine_oct(self, Oct *o, np.uint64_t index):
         #Allocate and initialize child octs
         #Attach particles to child octs
         #Remove particles from this oct entirely
@@ -1114,20 +1120,23 @@
                     o.children[i][j][k] = noct
         o.file_ind = self.n_ref + 1
         for i in range(3):
-            ind[i] = (data[p] >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
+            ind[i] = (index >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
         noct = o.children[ind[0]][ind[1]][ind[2]]
+        return noct
+
+    cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p):
         # Now we look at the last nref particles to decide where they go.
-        n = imin(p, self.n_ref)
+        cdef int n = imin(p, self.n_ref)
         cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0)
         # Now we figure out our prefix, which is the oct address at this level.
         # As long as we're actually in Morton order, we do not need to worry
         # about *any* of the other children of the oct.
-        prefix1 = data[p] >> (ORDER_MAX - noct.level)*3
+        prefix1 = data[p] >> (ORDER_MAX - o.level)*3
         for i in range(n):
-            prefix2 = arr[i] >> (ORDER_MAX - noct.level)*3
+            prefix2 = arr[i] >> (ORDER_MAX - o.level)*3
             if (prefix1 == prefix2):
-                noct.file_ind += 1
-        #print ind[0], ind[1], ind[2], noct.file_ind, noct.level
+                o.file_ind += 1
+        #print ind[0], ind[1], ind[2], o.file_ind, o.level
 
     def recursively_count(self):
         #Visit every cell, accumulate the # of cells per level


https://bitbucket.org/yt_analysis/yt/commits/6f22f08f086a/
Changeset:   6f22f08f086a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-12 20:53:04
Summary:     Adding more info about skipping fields and whatnot inside RAMSES.
Affected #:  2 files

diff -r d07242c7c90beb454362e7779ce2ae0790f7a69b -r 6f22f08f086adad2954c0efecd766713008bc66e yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -91,6 +91,7 @@
         hydro_offset = np.zeros(n_levels, dtype='int64')
         hydro_offset -= 1
         level_count = np.zeros(n_levels, dtype='int64')
+        skipped = []
         for level in range(self.amr_header['nlevelmax']):
             for cpu in range(self.amr_header['nboundary'] +
                              self.amr_header['ncpu']):
@@ -101,13 +102,15 @@
                 except AssertionError:
                     print "You are running with the wrong number of fields."
                     print "Please specify these in the load command."
+                    print "We are looking for %s fields." % self.nvar
+                    print "The last set of field sizes was: %s" % skipped
                     raise
                 if hvals['file_ncache'] == 0: continue
                 assert(hvals['file_ilevel'] == level+1)
                 if cpu + 1 == self.domain_id and level >= min_level:
                     hydro_offset[level - min_level] = f.tell()
                     level_count[level - min_level] = hvals['file_ncache']
-                fpu.skip(f, 8 * self.nvar)
+                skipped = fpu.skip(f, 8 * self.nvar)
         self._hydro_offset = hydro_offset
         self._level_count = level_count
         return self._hydro_offset

diff -r d07242c7c90beb454362e7779ce2ae0790f7a69b -r 6f22f08f086adad2954c0efecd766713008bc66e yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -158,7 +158,7 @@
     >>> f = open("fort.3", "rb")
     >>> skip(f, 3)
     """
-    skipped = 0
+    skipped = []
     pos = f.tell()
     for i in range(n):
         fmt = endian+"I"
@@ -167,7 +167,7 @@
         f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
         s2= struct.unpack(fmt, size)[0]
         assert s1==s2 
-        skipped += s1/struct.calcsize(fmt)
+        skipped.append(s1/struct.calcsize(fmt))
     return skipped
 
 def peek_record_size(f,endian='='):


https://bitbucket.org/yt_analysis/yt/commits/f78e2f4c831a/
Changeset:   f78e2f4c831a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 19:43:17
Summary:     Adding particle_position_* to universal_fields.py to make them recognizable as
particle fields.  Fixes #592.
Affected #:  1 file

diff -r 6f22f08f086adad2954c0efecd766713008bc66e -r f78e2f4c831aaa9707f2d71ab18963b9ac53cb47 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -940,6 +940,13 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
+# We add these fields so that the field detector can use them
+for field in ["particle_position_%s" % ax for ax in "xyz"]:
+    # This marker should let everyone know not to use the fields, but NullFunc
+    # should do that, too.
+    add_field(field, function=NullFunc, particle_type = True,
+        units=r"UNDEFINED")
+
 def _pdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank


https://bitbucket.org/yt_analysis/yt/commits/958e5e2c321e/
Changeset:   958e5e2c321e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 19:52:39
Summary:     Switching TotalMass to use particle_density.
Affected #:  1 file

diff -r f78e2f4c831aaa9707f2d71ab18963b9ac53cb47 -r 958e5e2c321e930877cd3b554a979ec16213742e yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -395,7 +395,7 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+    return (data["Density"]+data["particle_density"]) * data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,


https://bitbucket.org/yt_analysis/yt/commits/266b7da460ae/
Changeset:   266b7da460ae
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 21:20:23
Summary:     A few minor fixes, including the most important one of how the QuadProj is
initialized on a chunk-by-chunk basis.
Affected #:  1 file

diff -r 958e5e2c321e930877cd3b554a979ec16213742e -r 266b7da460ae2be60f36d5713ec6676ce46e9c7c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -281,7 +281,7 @@
             chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
         # We do this once
-        for chunk in self.data_source.chunks(None, "io"):
+        for chunk in self.data_source.chunks([], "io"):
             self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
@@ -335,8 +335,8 @@
 
     def _initialize_chunk(self, chunk, tree):
         icoords = chunk.icoords
-        i1 = icoords[:,0]
-        i2 = icoords[:,1]
+        i1 = icoords[:,x_dict[self.axis]]
+        i2 = icoords[:,y_dict[self.axis]]
         ilevel = chunk.ires
         tree.initialize_chunk(i1, i2, ilevel)
 
@@ -430,6 +430,7 @@
         self._data_source.max_level = self.level
 
     def get_data(self, fields = None):
+        if fields is None: return
         fields = self._determine_fields(ensure_list(fields))
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)


https://bitbucket.org/yt_analysis/yt/commits/8f9c4f153c91/
Changeset:   8f9c4f153c91
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 22:55:06
Summary:     First set of steps to rework particle handlers.

This has begun the process of moving away from Octree subsets.
Affected #:  5 files

diff -r 960000d1afa968b92400bb198f05d851416e8692 -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -32,10 +32,8 @@
 
 from yt.utilities.fortran_utils import read_record
 from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
-    OctreeGeometryHandler
-from yt.geometry.oct_container import \
-    ParticleOctreeContainer
+from yt.geometry.particle_geometry_handler import \
+    ParticleGeometryHandler
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
@@ -61,12 +59,12 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
-class ParticleDomainFile(object):
-    def __init__(self, pf, io, domain_filename, domain_id):
+class ParticleFile(object):
+    def __init__(self, pf, io, filename, file_id):
         self.pf = pf
         self.io = weakref.proxy(io)
-        self.domain_filename = domain_filename
-        self.domain_id = domain_id
+        self.filename = filename
+        self.file_id = file_id
         self.total_particles = self.io._count_particles(self)
 
     def select(self, selector):
@@ -78,99 +76,14 @@
     def _calculate_offsets(self, fields):
         pass
 
-class ParticleDomainSubset(OctreeSubset):
-    pass
-
-class ParticleGeometryHandler(OctreeGeometryHandler):
-
-    def __init__(self, pf, data_style):
-        self.data_style = data_style
-        self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
-        self.hierarchy_filename = self.parameter_file.parameter_filename
-        self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = np.float64
-        super(ParticleGeometryHandler, self).__init__(pf, data_style)
-        
-    def _initialize_oct_handler(self):
-        self._setup_data_io()
-        template = self.parameter_file.domain_template
-        ndoms = self.parameter_file.domain_count
-        cls = self.parameter_file._domain_class
-        self.domains = [cls(self.parameter_file, self.io, template % {'num':i}, i)
-                        for i in range(ndoms)]
-        total_particles = sum(sum(d.total_particles.values())
-                              for d in self.domains)
-        self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions/2,
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
-        self.oct_handler.n_ref = 64
-        mylog.info("Allocating for %0.3e particles", total_particles)
-        for dom in self.domains:
-            self.io._initialize_octree(dom, self.oct_handler)
-        self.oct_handler.finalize()
-        self.max_level = self.oct_handler.max_level
-        tot = self.oct_handler.linearly_count()
-        mylog.info("Identified %0.3e octs", tot)
-
-    def _detect_fields(self):
-        # TODO: Add additional fields
-        pfl = []
-        for dom in self.domains:
-            fl = self.io._identify_fields(dom)
-            dom._calculate_offsets(fl)
-            for f in fl:
-                if f not in pfl: pfl.append(f)
-        self.field_list = pfl
-        pf = self.parameter_file
-        pf.particle_types = tuple(set(pt for pt, pf in pfl))
-        pf.particle_types += ('all',)
-    
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        super(ParticleGeometryHandler, self)._setup_classes(dd)
-        self.object_types.sort()
-
-    def _identify_base_chunk(self, dobj):
-        if getattr(dobj, "_chunk_info", None) is None:
-            mask = dobj.selector.select_octs(self.oct_handler)
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            subsets = [ParticleDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
-            dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
-    def _chunk_all(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
-
-    def _chunk_spatial(self, dobj, ngz, sort = None):
-        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
-            if ngz > 0:
-                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
-            else:
-                g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
-
-    def _chunk_io(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
-
-class GadgetBinaryDomainFile(ParticleDomainFile):
-    def __init__(self, pf, io, domain_filename, domain_id):
-        with open(domain_filename, "rb") as f:
+class GadgetBinaryFile(ParticleFile):
+    def __init__(self, pf, io, filename, file_id):
+        with open(filename, "rb") as f:
             self.header = read_record(f, pf._header_spec)
             self._position_offset = f.tell()
 
-        super(GadgetBinaryDomainFile, self).__init__(pf, io,
-                domain_filename, domain_id)
+        super(GadgetBinaryFile, self).__init__(pf, io,
+                filename, file_id)
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_field_offsets(
@@ -210,7 +123,7 @@
 
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = GadgetBinaryDomainFile
+    _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
     _header_spec = (('Npart', 6, 'i'),
@@ -235,7 +148,6 @@
                  additional_fields = (), root_dimensions = 64,
                  unit_base = None):
         self._root_dimensions = root_dimensions
-        # Set up the template for domain files
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -306,11 +218,11 @@
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
 
         if hvals["NumFiles"] > 1:
-            self.domain_template = "%s.%%(num)s" % (prefix)
+            self.filename_template = "%s.%%(num)s" % (prefix)
         else:
-            self.domain_template = self.parameter_filename
+            self.filename_template = self.parameter_filename
 
-        self.domain_count = hvals["NumFiles"]
+        self.file_count = hvals["NumFiles"]
 
         f.close()
 
@@ -341,14 +253,13 @@
 
 class OWLSStaticOutput(GadgetStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
+    _file_class = ParticleFile
     _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
     _fieldinfo_known = KnownOWLSFields
     _header_spec = None # Override so that there's no confusion
 
     def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
         self._root_dimensions = root_dimensions
-        # Set up the template for domain files
         self.storage_filename = None
         super(OWLSStaticOutput, self).__init__(filename, data_style,
                                                root_dimensions,
@@ -383,8 +294,8 @@
 
         prefix = self.parameter_filename.split(".", 1)[0]
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
+        self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.file_count = hvals["NumFilesPerSnapshot"]
 
         # To avoid having to open files twice
         self._unit_base = {}
@@ -407,23 +318,23 @@
             pass
         return False
 
-class TipsyDomainFile(ParticleDomainFile):
+class TipsyFile(ParticleFile):
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_particle_offsets(self)
 
-    def __init__(self, pf, io, domain_filename, domain_id):
+    def __init__(self, pf, io, filename, file_id):
         # To go above 1 domain, we need to include an indexing step in the
         # IOHandler, rather than simply reading from a single file.
-        assert domain_id == 0 
-        super(TipsyDomainFile, self).__init__(pf, io,
-                domain_filename, domain_id)
+        assert file_id == 0
+        super(TipsyFile, self).__init__(pf, io,
+                filename, file_id)
         io._create_dtypes(self)
 
 
 class TipsyStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = TipsyDomainFile
+    _file_class = TipsyFile
     _fieldinfo_fallback = TipsyFieldInfo
     _fieldinfo_known = KnownTipsyFields
     _header_spec = (('time',    'd'),
@@ -443,7 +354,6 @@
                  cosmology_parameters = None):
         self.endian = endian
         self._root_dimensions = root_dimensions
-        # Set up the template for domain files
         self.storage_filename = None
         if domain_left_edge is None:
             domain_left_edge = np.zeros(3, "float64") - 0.5
@@ -506,8 +416,8 @@
 
         self.parameters = hvals
 
-        self.domain_template = self.parameter_filename
-        self.domain_count = 1
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
 
         f.close()
 

diff -r 960000d1afa968b92400bb198f05d851416e8692 -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -32,6 +32,9 @@
     BaseIOHandler
 
 from yt.utilities.fortran_utils import read_record
+from yt.utilities.lib.geometry_utils import get_morton_indices
+
+from yt.geometry.oct_container import _ORDER_MAX as ORDER_MAX
 
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
@@ -373,34 +376,44 @@
                 f.close()
         return rv
 
-    def _initialize_octree(self, domain, octree):
-        pf = domain.pf
-        with open(domain.domain_filename, "rb") as f:
-            f.seek(domain.pf._header_offset)
+    def _initialize_octree(self, data_file, octree):
+        pf = data_file.pf
+        morton = np.empty(sum(data_file.total_particles.values()),
+                          dtype="uint64")
+        ind = 0
+        DLE, DRE = pf.domain_right_edge, pf.domain_left_edge
+        dx = (DRE - DLE) / (2**ORDER_MAX)
+        with open(data_file.filename, "rb") as f:
+            f.seek(pf._header_offset)
             for ptype in self._ptypes:
                 # We'll just add the individual types separately
-                count = domain.total_particles[ptype]
+                count = data_file.total_particles[ptype]
                 if count == 0: continue
                 pp = np.fromfile(f, dtype = self._pdtypes[ptype],
                                  count = count)
-                pos = np.empty((count, 3), dtype="float64")
-                mylog.info("Adding %0.3e %s particles", count, ptype)
-                pos[:,0] = pp['Coordinates']['x']
-                pos[:,1] = pp['Coordinates']['y']
-                pos[:,2] = pp['Coordinates']['z']
-                mylog.debug("Spanning: %0.3e .. %0.3e in x",
-                            pos[:,0].min(), pos[:,0].max())
-                mylog.debug("Spanning: %0.3e .. %0.3e in y",
-                            pos[:,1].min(), pos[:,1].max())
-                mylog.debug("Spanning: %0.3e .. %0.3e in z",
-                            pos[:,2].min(), pos[:,2].max())
-                if np.any(pos.min(axis=0) < pf.domain_left_edge) or \
-                   np.any(pos.max(axis=0) > pf.domain_right_edge):
-                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                mis = np.empty(3, dtype="float64")
+                mas = np.empty(3, dtype="float64")
+                for axi, ax in enumerate('xyz'):
+                    mi = pp["Coordinates"][ax].min()
+                    ma = pp["Coordinates"][ax].max()
+                    mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                    mis[axi] = mi
+                    mas[axi] = ma
+                if np.any(mis < pf.domain_left_edge) or \
+                   np.any(mas > pf.domain_right_edge):
+                    raise YTDomainOverflow(mis, mas,
                                            pf.domain_left_edge,
                                            pf.domain_right_edge)
+                pos = np.empty((count, 3), dtype="uint64")
+                mylog.info("Adding %0.3e %s particles", count, ptype)
+                pos[:,0] = np.floor((pp['Coordinates']['x'] - DLE[0])/dx[0])
+                pos[:,1] = np.floor((pp['Coordinates']['y'] - DLE[1])/dx[1])
+                pos[:,2] = np.floor((pp['Coordinates']['z'] - DLE[2])/dx[2])
                 del pp
-                octree.add(pos, domain.domain_id)
+                morton[ind:ind+count] = get_morton_indices(pos)
+        morton.sort()
+        octree.add(morton, data_file.file_id)
+        print octree.recursively_count()
 
     def _count_particles(self, domain):
         npart = {

diff -r 960000d1afa968b92400bb198f05d851416e8692 -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -35,6 +35,8 @@
 
 DEF ORDER_MAX=20
 
+_ORDER_MAX = ORDER_MAX
+
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)

diff -r 960000d1afa968b92400bb198f05d851416e8692 -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a yt/geometry/particle_geometry_handler.py
--- /dev/null
+++ b/yt/geometry/particle_geometry_handler.py
@@ -0,0 +1,143 @@
+"""
+Particle-only geometry handler
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import numpy as na
+import string, re, gc, time, cPickle
+import weakref
+
+from itertools import chain, izip
+
+from yt.funcs import *
+from yt.utilities.logger import ytLogger as mylog
+from yt.arraytypes import blankRecordArray
+from yt.config import ytcfg
+from yt.data_objects.field_info_container import NullFunc
+from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
+from yt.geometry.oct_container import \
+    ParticleOctreeContainer
+from yt.utilities.definitions import MAXLEVEL
+from yt.utilities.io_handler import io_registry
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, parallel_splitter
+
+from yt.data_objects.data_containers import data_object_registry
+
+class ParticleGeometryHandler(GeometryHandler):
+
+    def __init__(self, pf, data_style):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.float_type = np.float64
+        super(ParticleGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_geometry(self):
+        mylog.debug("Initializing Particle Geometry Handler.")
+        self._initialize_particle_handler()
+
+
+    def get_smallest_dx(self):
+        """
+        Returns (in code units) the smallest cell size in the simulation.
+        """
+        raise NotImplementedError
+
+    def convert(self, unit):
+        return self.parameter_file.conversion_factors[unit]
+
+    def _initialize_particle_handler(self):
+        self._setup_data_io()
+        template = self.parameter_file.filename_template
+        ndoms = self.parameter_file.file_count
+        cls = self.parameter_file._file_class
+        self.data_files = [cls(self.parameter_file, self.io, template % {'num':i}, i)
+                           for i in range(ndoms)]
+        total_particles = sum(sum(d.total_particles.values())
+                              for d in self.data_files)
+        self.oct_handler = ParticleOctreeContainer(
+            [1, 1, 1],
+            self.parameter_file.domain_left_edge,
+            self.parameter_file.domain_right_edge)
+        self.oct_handler.n_ref = 64
+        mylog.info("Allocating for %0.3e particles", total_particles)
+        for dom in self.data_files:
+            self.io._initialize_octree(dom, self.oct_handler)
+        self.oct_handler.finalize()
+        self.max_level = self.oct_handler.max_level
+        tot = self.oct_handler.linearly_count()
+        mylog.info("Identified %0.3e octs", tot)
+
+    def _detect_fields(self):
+        # TODO: Add additional fields
+        pfl = []
+        for dom in self.data_files:
+            fl = self.io._identify_fields(dom)
+            dom._calculate_offsets(fl)
+            for f in fl:
+                if f not in pfl: pfl.append(f)
+        self.field_list = pfl
+        pf = self.parameter_file
+        pf.particle_types = tuple(set(pt for pt, pf in pfl))
+        pf.particle_types += ('all',)
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(ParticleGeometryHandler, self)._setup_classes(dd)
+        self.object_types.sort()
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            mask = dobj.selector.select_octs(self.oct_handler)
+            counts = self.oct_handler.count_cells(dobj.selector, mask)
+            subsets = [ParticleDomainSubset(d, mask, c)
+                       for d, c in zip(self.domains, counts) if c > 0]
+            dobj._chunk_info = subsets
+            dobj.size = sum(counts)
+            dobj.shape = (dobj.size,)
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
+
+    def _chunk_io(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+

diff -r 960000d1afa968b92400bb198f05d851416e8692 -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -323,7 +323,7 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_morton_indices(np.ndarray[np.int64_t, ndim=2] left_index):
+def get_morton_indices(np.ndarray[np.uint64_t, ndim=2] left_index):
     cdef np.int64_t i, mi
     cdef np.ndarray[np.uint64_t, ndim=1] morton_indices
     morton_indices = np.zeros(left_index.shape[0], 'uint64')


https://bitbucket.org/yt_analysis/yt/commits/61f889ee4959/
Changeset:   61f889ee4959
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 23:33:38
Summary:     Correct calculation of DLE,DRE and coordinates.
Affected #:  1 file

diff -r 8f9c4f153c91be9d4e4291d73ed8f9fe798b161a -r 61f889ee49592863806236960e27175571c43c78 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -381,7 +381,7 @@
         morton = np.empty(sum(data_file.total_particles.values()),
                           dtype="uint64")
         ind = 0
-        DLE, DRE = pf.domain_right_edge, pf.domain_left_edge
+        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         dx = (DRE - DLE) / (2**ORDER_MAX)
         with open(data_file.filename, "rb") as f:
             f.seek(pf._header_offset)
@@ -406,14 +406,14 @@
                                            pf.domain_right_edge)
                 pos = np.empty((count, 3), dtype="uint64")
                 mylog.info("Adding %0.3e %s particles", count, ptype)
-                pos[:,0] = np.floor((pp['Coordinates']['x'] - DLE[0])/dx[0])
-                pos[:,1] = np.floor((pp['Coordinates']['y'] - DLE[1])/dx[1])
-                pos[:,2] = np.floor((pp['Coordinates']['z'] - DLE[2])/dx[2])
-                del pp
+                for axi, ax in enumerate("xyz"):
+                    coords = pp['Coordinates'][ax].astype("float64")
+                    coords = np.floor((coords - DLE[axi])/dx[axi])
+                    pos[:,axi] = coords
                 morton[ind:ind+count] = get_morton_indices(pos)
+                del pp, pos
         morton.sort()
         octree.add(morton, data_file.file_id)
-        print octree.recursively_count()
 
     def _count_particles(self, domain):
         npart = {


https://bitbucket.org/yt_analysis/yt/commits/fbed8ce438cc/
Changeset:   fbed8ce438cc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-13 23:41:45
Summary:     Continuing to convert Tipsy.
Affected #:  2 files

diff -r 61f889ee49592863806236960e27175571c43c78 -r fbed8ce438cc9bc6a6a56f3017a6500a4b4a5f9e yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -405,13 +405,13 @@
                                            pf.domain_left_edge,
                                            pf.domain_right_edge)
                 pos = np.empty((count, 3), dtype="uint64")
-                mylog.info("Adding %0.3e %s particles", count, ptype)
                 for axi, ax in enumerate("xyz"):
                     coords = pp['Coordinates'][ax].astype("float64")
                     coords = np.floor((coords - DLE[axi])/dx[axi])
                     pos[:,axi] = coords
                 morton[ind:ind+count] = get_morton_indices(pos)
                 del pp, pos
+        mylog.info("Adding %0.3e particles", morton.size)
         morton.sort()
         octree.add(morton, data_file.file_id)
 

diff -r 61f889ee49592863806236960e27175571c43c78 -r fbed8ce438cc9bc6a6a56f3017a6500a4b4a5f9e yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -89,7 +89,7 @@
             self.io._initialize_octree(dom, self.oct_handler)
         self.oct_handler.finalize()
         self.max_level = self.oct_handler.max_level
-        tot = self.oct_handler.linearly_count()
+        tot = sum(self.oct_handler.recursively_count().values())
         mylog.info("Identified %0.3e octs", tot)
 
     def _detect_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/8dcd631cef53/
Changeset:   8dcd631cef53
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 00:30:37
Summary:     Initial implementation of coarse particle region identifier.
Affected #:  2 files

diff -r fbed8ce438cc9bc6a6a56f3017a6500a4b4a5f9e -r 8dcd631cef53f6e80852d4668adda1d859c5647f yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -34,7 +34,7 @@
 from yt.utilities.fortran_utils import read_record
 from yt.utilities.lib.geometry_utils import get_morton_indices
 
-from yt.geometry.oct_container import _ORDER_MAX as ORDER_MAX
+from yt.geometry.oct_container import _ORDER_MAX, ParticleRegions
 
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
@@ -382,10 +382,13 @@
                           dtype="uint64")
         ind = 0
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        dx = (DRE - DLE) / (2**ORDER_MAX)
+        dx = (DRE - DLE) / (2**_ORDER_MAX)
+        self.regions = ParticleRegions(
+                pf.domain_left_edge, pf.domain_right_edge,
+                [64, 64, 64], len(self._ptypes))
         with open(data_file.filename, "rb") as f:
             f.seek(pf._header_offset)
-            for ptype in self._ptypes:
+            for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
@@ -404,6 +407,12 @@
                     raise YTDomainOverflow(mis, mas,
                                            pf.domain_left_edge,
                                            pf.domain_right_edge)
+                fpos = np.empty((count, 3), dtype="float64")
+                fpos[:,0] = pp["Coordinates"]["x"]
+                fpos[:,1] = pp["Coordinates"]["y"]
+                fpos[:,2] = pp["Coordinates"]["z"]
+                self.regions.add_data_file(fpos, iptype)
+                del fpos
                 pos = np.empty((count, 3), dtype="uint64")
                 for axi, ax in enumerate("xyz"):
                     coords = pp['Coordinates'][ax].astype("float64")

diff -r fbed8ce438cc9bc6a6a56f3017a6500a4b4a5f9e -r 8dcd631cef53f6e80852d4668adda1d859c5647f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1286,3 +1286,40 @@
                 ind[oi] = nm
             nm += use
         return ind
+
+cdef class ParticleRegions:
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t dds[3]
+    cdef np.float64_t idds[3]
+    cdef np.int32_t dims[3]
+    cdef public int nfiles
+    cdef public object masks
+
+    def __init__(self, left_edge, right_edge, dims, nfiles):
+        cdef int i
+        self.nfiles = nfiles
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.dims[i] = dims[i]
+            self.dds[i] = (right_edge[i] - left_edge[i])/dims[i]
+            self.idds[i] = 1.0/self.dds[i]
+        # We use 64-bit masks
+        self.masks = []
+        for i in range(nfiles/64 + 1):
+            self.masks.append(np.zeros(dims, dtype="uint64"))
+
+    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
+        cdef np.int64_t no = pos.shape[0]
+        cdef np.int64_t p
+        cdef int ind[3], i
+        cdef np.ndarray[np.uint64_t, ndim=3] mask
+        mask = self.masks[file_id/64]
+        val = 1 << (file_id - (file_id/64)*64)
+        for p in range(no):
+            # Now we locate the particle
+            for i in range(3):
+                ind[i] = <int> ((pos[p, i] - self.left_edge[i])*self.idds[i])
+            mask[ind[0],ind[1],ind[2]] |= val
+
+    def identify_data_files(self, SelectorObject selector):
+        pass


https://bitbucket.org/yt_analysis/yt/commits/b26e0a320e6f/
Changeset:   b26e0a320e6f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 03:20:57
Summary:     Extending ParticleRegions.  Now lives on the geometry handler and can identify
coarse data file inclusion.
Affected #:  3 files

diff -r 8dcd631cef53f6e80852d4668adda1d859c5647f -r b26e0a320e6ff4a019f4447fb16ba034c0d5e91a yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -92,7 +92,7 @@
                 f.close()
         return rv
 
-    def _initialize_octree(self, domain, octree):
+    def _initialize_index(self, domain, octree):
         f = h5py.File(domain.domain_filename, "r")
         for key in f.keys():
             if not key.startswith("PartType"): continue
@@ -236,7 +236,7 @@
             arr = arr.reshape((count/3, 3), order="C")
         return arr.astype("float64")
 
-    def _initialize_octree(self, domain, octree):
+    def _initialize_index(self, domain, octree):
         count = sum(domain.total_particles.values())
         dt = [("px", "float32"), ("py", "float32"), ("pz", "float32")]
         with open(domain.domain_filename, "rb") as f:
@@ -376,16 +376,13 @@
                 f.close()
         return rv
 
-    def _initialize_octree(self, data_file, octree):
+    def _initialize_index(self, data_file, octree, regions):
         pf = data_file.pf
         morton = np.empty(sum(data_file.total_particles.values()),
                           dtype="uint64")
         ind = 0
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         dx = (DRE - DLE) / (2**_ORDER_MAX)
-        self.regions = ParticleRegions(
-                pf.domain_left_edge, pf.domain_right_edge,
-                [64, 64, 64], len(self._ptypes))
         with open(data_file.filename, "rb") as f:
             f.seek(pf._header_offset)
             for iptype, ptype in enumerate(self._ptypes):
@@ -411,7 +408,7 @@
                 fpos[:,0] = pp["Coordinates"]["x"]
                 fpos[:,1] = pp["Coordinates"]["y"]
                 fpos[:,2] = pp["Coordinates"]["z"]
-                self.regions.add_data_file(fpos, iptype)
+                regions.add_data_file(fpos, data_file.file_id)
                 del fpos
                 pos = np.empty((count, 3), dtype="uint64")
                 for axi, ax in enumerate("xyz"):

diff -r 8dcd631cef53f6e80852d4668adda1d859c5647f -r b26e0a320e6ff4a019f4447fb16ba034c0d5e91a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1322,4 +1322,34 @@
             mask[ind[0],ind[1],ind[2]] |= val
 
     def identify_data_files(self, SelectorObject selector):
-        pass
+        # This is relatively cheap to iterate over.
+        cdef int i, j, k, n
+        cdef np.uint64_t fmask, offset
+        cdef np.float64_t LE[3], RE[3]
+        cdef np.ndarray[np.uint64_t, ndim=3] mask
+        files = []
+        for n in range(len(self.masks)):
+            fmask = 0
+            mask = self.masks[n]
+            LE[0] = self.left_edge[0]
+            RE[0] = LE[0] + self.dds[0]
+            for i in range(self.dims[0]):
+                LE[1] = self.left_edge[1]
+                RE[1] = LE[1] + self.dds[1]
+                for j in range(self.dims[1]):
+                    LE[2] = self.left_edge[2]
+                    RE[2] = LE[2] + self.dds[2]
+                    for k in range(self.dims[2]):
+                        if selector.select_grid(LE, RE, 0) == 0: continue
+                        fmask |= mask[i,j,k]
+                        LE[2] += self.dds[2]
+                        RE[2] += self.dds[2]
+                    LE[1] += self.dds[1]
+                    RE[1] += self.dds[1]
+                LE[0] += self.dds[0]
+                RE[0] += self.dds[0]
+            # Now we iterate through...
+            for i in range(64):
+                if ((fmask >> i) & 1) == 1:
+                    files.append(i + n * 64)
+        return files

diff -r 8dcd631cef53f6e80852d4668adda1d859c5647f -r b26e0a320e6ff4a019f4447fb16ba034c0d5e91a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -37,7 +37,7 @@
 from yt.data_objects.field_info_container import NullFunc
 from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
 from yt.geometry.oct_container import \
-    ParticleOctreeContainer
+    ParticleOctreeContainer, ParticleRegions
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -79,14 +79,17 @@
                            for i in range(ndoms)]
         total_particles = sum(sum(d.total_particles.values())
                               for d in self.data_files)
+        pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
-            [1, 1, 1],
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
+            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
         self.oct_handler.n_ref = 64
         mylog.info("Allocating for %0.3e particles", total_particles)
+        N = len(self.data_files)
+        self.regions = ParticleRegions(
+                pf.domain_left_edge, pf.domain_right_edge,
+                [N, N, N], N)
         for dom in self.data_files:
-            self.io._initialize_octree(dom, self.oct_handler)
+            self.io._initialize_index(dom, self.oct_handler, self.regions)
         self.oct_handler.finalize()
         self.max_level = self.oct_handler.max_level
         tot = sum(self.oct_handler.recursively_count().values())
@@ -113,12 +116,10 @@
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
             mask = dobj.selector.select_octs(self.oct_handler)
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            subsets = [ParticleDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
-            dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
+            file_ids = self.regions.identify_data_files(dobj.selector)
+            dobj._chunk_info = [self.data_files[i] for i in file_ids]
+            #dobj.size = sum(counts)
+            #dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):


https://bitbucket.org/yt_analysis/yt/commits/d6198c6c1a9a/
Changeset:   d6198c6c1a9a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 03:32:16
Summary:     Starting to convert the OWLS format, which will test multi-file support.
Affected #:  1 file

diff -r b26e0a320e6ff4a019f4447fb16ba034c0d5e91a -r d6198c6c1a9a79e05acece5a7f4ae12d45b15a66 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -92,23 +92,33 @@
                 f.close()
         return rv
 
-    def _initialize_index(self, domain, octree):
-        f = h5py.File(domain.domain_filename, "r")
+    def _initialize_index(self, data_file, octree, regions):
+        f = h5py.File(data_file.filename, "r")
+        pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+        morton = np.empty(pcount, dtype='uint64')
+        DLE = data_file.pf.domain_left_edge
+        DRE = data_file.pf.domain_right_edge
+        dx = (DRE - DLE) / 2**_ORDER_MAX
+        ind = 0
         for key in f.keys():
             if not key.startswith("PartType"): continue
             pos = f[key]["Coordinates"][:].astype("float64")
-            octree.add(pos, domain.domain_id)
+            regions.add_data_file(pos, data_file.file_id)
+            pos = np.floor((pos - DLE)/dx).astype("uint64")
+            morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
         f.close()
+        morton.sort()
+        #octree.add(morton, data_file.file_id)
 
-    def _count_particles(self, domain):
-        f = h5py.File(domain.domain_filename, "r")
-        np = f["/Header"].attrs["NumPart_ThisFile"][:]
+    def _count_particles(self, data_file):
+        f = h5py.File(data_file.filename, "r")
+        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
         f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(np)) 
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) 
         return npart
 
-    def _identify_fields(self, domain):
-        f = h5py.File(domain.domain_filename, "r")
+    def _identify_fields(self, data_file):
+        f = h5py.File(data_file.filename, "r")
         fields = []
         for key in f.keys():
             if not key.startswith("PartType"): continue


https://bitbucket.org/yt_analysis/yt/commits/bd419ea73d70/
Changeset:   bd419ea73d70
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 03:40:07
Summary:     Converted some OWLS, Tipsy and OWLS now generate regions correctly.
Affected #:  3 files

diff -r d6198c6c1a9a79e05acece5a7f4ae12d45b15a66 -r bd419ea73d701a908c3e4583afff894f8da568f7 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -92,7 +92,7 @@
                 f.close()
         return rv
 
-    def _initialize_index(self, data_file, octree, regions):
+    def _initialize_index(self, data_file, regions):
         f = h5py.File(data_file.filename, "r")
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
         morton = np.empty(pcount, dtype='uint64')
@@ -107,8 +107,7 @@
             pos = np.floor((pos - DLE)/dx).astype("uint64")
             morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
         f.close()
-        morton.sort()
-        #octree.add(morton, data_file.file_id)
+        return morton
 
     def _count_particles(self, data_file):
         f = h5py.File(data_file.filename, "r")
@@ -386,7 +385,7 @@
                 f.close()
         return rv
 
-    def _initialize_index(self, data_file, octree, regions):
+    def _initialize_index(self, data_file, regions):
         pf = data_file.pf
         morton = np.empty(sum(data_file.total_particles.values()),
                           dtype="uint64")
@@ -428,8 +427,7 @@
                 morton[ind:ind+count] = get_morton_indices(pos)
                 del pp, pos
         mylog.info("Adding %0.3e particles", morton.size)
-        morton.sort()
-        octree.add(morton, data_file.file_id)
+        return morton
 
     def _count_particles(self, domain):
         npart = {

diff -r d6198c6c1a9a79e05acece5a7f4ae12d45b15a66 -r bd419ea73d701a908c3e4583afff894f8da568f7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1064,7 +1064,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def add(self, np.ndarray[np.uint64_t, ndim=1] indices, np.int64_t domain_id):
+    def add(self, np.ndarray[np.uint64_t, ndim=1] indices):
         #Add this particle to the root oct
         #Then if that oct has children, add it to them recursively
         #If the child needs to be refined because of max particles, do so
@@ -1092,10 +1092,6 @@
                     self.filter_particles(cur, data, p)
                 else:
                     cur = cur.children[ind[0]][ind[1]][ind[2]]
-                    if domain_id > 0:
-                        cur.file_ind = 0
-                        self.filter_particles(cur, data, p)
-            if p >= self.n_ref: domain_id = 0
             cur.file_ind += 1
 
     @cython.boundscheck(False)

diff -r d6198c6c1a9a79e05acece5a7f4ae12d45b15a66 -r bd419ea73d701a908c3e4583afff894f8da568f7 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -77,24 +77,45 @@
         cls = self.parameter_file._file_class
         self.data_files = [cls(self.parameter_file, self.io, template % {'num':i}, i)
                            for i in range(ndoms)]
-        total_particles = sum(sum(d.total_particles.values())
-                              for d in self.data_files)
+        self.total_particles = sum(
+                sum(d.total_particles.values()) for d in self.data_files)
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
             [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
         self.oct_handler.n_ref = 64
-        mylog.info("Allocating for %0.3e particles", total_particles)
+        mylog.info("Allocating for %0.3e particles", self.total_particles)
         N = len(self.data_files)
         self.regions = ParticleRegions(
                 pf.domain_left_edge, pf.domain_right_edge,
                 [N, N, N], N)
-        for dom in self.data_files:
-            self.io._initialize_index(dom, self.oct_handler, self.regions)
+        self._initialize_indices()
         self.oct_handler.finalize()
         self.max_level = self.oct_handler.max_level
         tot = sum(self.oct_handler.recursively_count().values())
         mylog.info("Identified %0.3e octs", tot)
 
+    def _initialize_indices(self):
+        # This will be replaced with a parallel-aware iteration step.
+        # Roughly outlined, what we will do is:
+        #   * Generate Morton indices on each set of files that belong to
+        #     an individual processor
+        #   * Create a global, accumulated histogram
+        #   * Cut based on estimated load balancing
+        #   * Pass particles to specific processors, along with NREF buffer
+        #   * Broadcast back a serialized octree to join
+        #
+        # For now we will do this in serial.
+        morton = np.empty(self.total_particles, dtype="uint64")
+        ind = 0
+        for data_file in self.data_files:
+            npart = sum(data_file.total_particles.values())
+            morton[ind:ind + npart] = \
+                self.io._initialize_index(data_file, self.regions)
+        morton.sort()
+        # Now we add them all at once.
+        self.oct_handler.add(morton)
+
+
     def _detect_fields(self):
         # TODO: Add additional fields
         pfl = []


https://bitbucket.org/yt_analysis/yt/commits/e011bb3d32d2/
Changeset:   e011bb3d32d2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 17:14:07
Summary:     Critical error -- need to increment LE/RE regardless of selector.
Affected #:  1 file

diff -r bd419ea73d701a908c3e4583afff894f8da568f7 -r e011bb3d32d21379a4a7bc6c82767eba69263c62 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1336,8 +1336,8 @@
                     LE[2] = self.left_edge[2]
                     RE[2] = LE[2] + self.dds[2]
                     for k in range(self.dims[2]):
-                        if selector.select_grid(LE, RE, 0) == 0: continue
-                        fmask |= mask[i,j,k]
+                        if selector.select_grid(LE, RE, 0) == 1:
+                            fmask |= mask[i,j,k]
                         LE[2] += self.dds[2]
                         RE[2] += self.dds[2]
                     LE[1] += self.dds[1]


https://bitbucket.org/yt_analysis/yt/commits/31ed51b8ccb0/
Changeset:   31ed51b8ccb0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 17:25:02
Summary:     Region selection works for OWLS.
Affected #:  2 files

diff -r e011bb3d32d21379a4a7bc6c82767eba69263c62 -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,8 +53,8 @@
         for ftype, fname in fields:
             ptf[ftype].append(fname)
         for chunk in chunks: # Will be OWLS domains
-            for subset in chunk.objs:
-                f = h5py.File(subset.domain.domain_filename, "r")
+            for data_file in chunk.objs:
+                f = h5py.File(data_file.filename, "r")
                 # This double-reads
                 for ptype, field_list in sorted(ptf.items()):
                     coords = f["/%s/Coordinates" % ptype][:].astype("float64")
@@ -73,8 +73,8 @@
             rv[field] = np.empty(shape, dtype="float64")
             ind[field] = 0
         for chunk in chunks: # Will be OWLS domains
-            for subset in chunk.objs:
-                f = h5py.File(subset.domain.domain_filename, "r")
+            for data_file in chunk.objs:
+                f = h5py.File(data_file.filename, "r")
                 for ptype, field_list in sorted(ptf.items()):
                     g = f["/%s" % ptype]
                     coords = g["Coordinates"][:].astype("float64")

diff -r e011bb3d32d21379a4a7bc6c82767eba69263c62 -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -84,10 +84,11 @@
             [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
         self.oct_handler.n_ref = 64
         mylog.info("Allocating for %0.3e particles", self.total_particles)
-        N = len(self.data_files)
+        # No more than 256^3 in the region finder.
+        N = min(len(self.data_files), 256) 
         self.regions = ParticleRegions(
                 pf.domain_left_edge, pf.domain_right_edge,
-                [N, N, N], N)
+                [N, N, N], len(self.data_files))
         self._initialize_indices()
         self.oct_handler.finalize()
         self.max_level = self.oct_handler.max_level
@@ -139,8 +140,6 @@
             mask = dobj.selector.select_octs(self.oct_handler)
             file_ids = self.regions.identify_data_files(dobj.selector)
             dobj._chunk_info = [self.data_files[i] for i in file_ids]
-            #dobj.size = sum(counts)
-            #dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
@@ -161,5 +160,5 @@
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+            yield YTDataChunk(dobj, "io", [subset], -1)
 


https://bitbucket.org/yt_analysis/yt/commits/52cc6544d609/
Changeset:   52cc6544d609
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 18:36:46
Summary:     Merging in noglobalmesh, to start removing size and shape.
Affected #:  8 files

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2495,7 +2495,7 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
             else:
                 total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -443,7 +443,7 @@
 	    /* Else, this slot was full, go to the next one */
 	    hp++;
 	    if (hp>=smx->hash+smx->nHashLength) hp = smx->hash;
-	    if (++count>1000) {
+	    if (++count>1000000) {
 		fprintf(stderr,"Hash Table is too full.\n");
 		exit(1);
 	    }

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -109,16 +109,19 @@
         """
         # Pick out the stars.
         if self.mode == 'data_source':
-            ct = self._data_source["creation_time"]
+            ct = self._data_source["stars","particle_age"]
+            if ct == None :
+                print 'data source must have particle_age!'
+                sys.exit(1)
             ct_stars = ct[ct > 0]
-            mass_stars = self._data_source["ParticleMassMsun"][ct > 0]
+            mass_stars = self._data_source["stars", "ParticleMassMsun"][ct > 0]
         elif self.mode == 'provided':
             ct_stars = self.star_creation_time
             mass_stars = self.star_mass
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*1.01, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
         inds = np.digitize(ct_stars, self.time_bins) - 1
@@ -131,7 +134,7 @@
         for index in xrange(self.bin_count):
             self.cum_mass_bins[index+1] += self.cum_mass_bins[index]
         # We will want the time taken between bins.
-        self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
+        self.time_bins_dt = self.time_bins[:-1] - self.time_bins[1:]
     
     def attach_arrays(self):
         """
@@ -147,7 +150,7 @@
                 vol = ds.volume('mpc')
         elif self.mode == 'provided':
             vol = self.volume
-        tc = self._pf["Time"]
+        tc = self._pf["Time"] #time to seconds?
         self.time = []
         self.lookback_time = []
         self.redshift = []

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -432,8 +432,6 @@
     _sort_by = None
     _selector = None
     _current_chunk = None
-    size = None
-    shape = None
 
     def __init__(self, *args, **kwargs):
         super(YTSelectionContainer, self).__init__(*args, **kwargs)
@@ -551,16 +549,10 @@
         # There are several items that need to be swapped out
         # field_data, size, shape
         old_field_data, self.field_data = self.field_data, YTFieldData()
-        old_size, self.size = self.size, chunk.data_size
         old_chunk, self._current_chunk = self._current_chunk, chunk
         old_locked, self._locked = self._locked, False
-        if not self._spatial:
-            self.shape = (self.size,)
         yield
         self.field_data = old_field_data
-        self.size = old_size
-        if not self._spatial:
-            self.shape = (old_size,)
         self._current_chunk = old_chunk
         self._locked = old_locked
 

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -341,7 +341,7 @@
                             if selected_mass[ispec] :
                                 count = len(data[selected_mass[ispec]])
                                 data[selected_mass[ispec]].resize(count+1)
-                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"]
+                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"][0]
                         
                     status = artio_particle_read_species_end( self.handle )
                     check_artio_status(status)

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -32,7 +32,7 @@
     artio_is_valid, artio_fileset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
-from .fields import ARTIOFieldInfo, KnownARTIOFields
+from .fields import ARTIOFieldInfo, KnownARTIOFields, b2t
 
 from yt.funcs import *
 from yt.geometry.geometry_handler import \
@@ -145,7 +145,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return (self.parameter_file.domain_width/(2**self.max_level)).min()
+        return  1.0/(2**self.max_level)
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]
@@ -391,7 +391,7 @@
             list(set(art_to_yt[s] for s in
                      self.artio_parameters["particle_species_labels"])))
 
-        self.current_time = self.artio_parameters["tl"][0]
+        self.current_time = b2t(self.artio_parameters["tl"][0])
 
         # detect cosmology
         if "abox" in self.artio_parameters:

diff -r 31ed51b8ccb0f32a4f66bc542e48a9a623d8850a -r 52cc6544d609a36cf117a5bde14206e7ced903da yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -295,12 +295,12 @@
 
 #add_artio_field("creation_time", function=NullFunc, particle_type=True)
 def _particle_age(field, data):
-    pa = b2t(data['creation_time'])
+    pa = b2t(data['stars','creation_time'])
 #    tr = np.zeros(pa.shape,dtype='float')-1.0
 #    tr[pa>0] = pa[pa>0]
     tr = pa
     return tr
-add_field("particle_age", function=_particle_age, units=r"\rm{s}",
+add_field(("stars","particle_age"), function=_particle_age, units=r"\rm{s}",
           particle_type=True)
 
 
@@ -416,10 +416,10 @@
 
 def b2t(tb, n=1e2, logger=None, **kwargs):
     tb = np.array(tb)
-    if isinstance(tb, 1.1):
+    if len(np.atleast_1d(tb)) == 1: 
         return a2t(b2a(tb))
     if tb.shape == ():
-        return a2t(b2a(tb))
+        return None 
     if len(tb) < n:
         n = len(tb)
     age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
@@ -434,7 +434,7 @@
     ages = np.array(ages)
     fb2t = np.interp(tb, tbs, ages)
     #fb2t = interp1d(tbs,ages)
-    return fb2t
+    return fb2t*1e9*31556926
 
 
 def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):


https://bitbucket.org/yt_analysis/yt/commits/a23b3276b87d/
Changeset:   a23b3276b87d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 19:16:29
Summary:     Starting process of removing global mesh.

Note that this turns off global mesh for particle codes, which we may revert.
They do in fact have a global mesh, so they may not be applicable here.  It
also sets up the proposed change of an on-demand iteration for coordinates.
Affected #:  3 files

diff -r 52cc6544d609a36cf117a5bde14206e7ced903da -r a23b3276b87d63bf7d4f39965fc645f97f22355e yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -281,8 +281,9 @@
             chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
         # We do this once
-        for chunk in self.data_source.chunks(None, "io"):
-            self._initialize_chunk(chunk, tree)
+        if self.pf.h._global_mesh:
+            for chunk in self.data_source.chunks(None, "io"):
+                self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 

diff -r 52cc6544d609a36cf117a5bde14206e7ced903da -r a23b3276b87d63bf7d4f39965fc645f97f22355e yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -44,6 +44,7 @@
     ParallelAnalysisInterface, parallel_splitter
 
 class GeometryHandler(ParallelAnalysisInterface):
+    _global_mesh = True
 
     def __init__(self, pf, data_style):
         ParallelAnalysisInterface.__init__(self)
@@ -449,7 +450,7 @@
 
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size, field_type = None):
+    def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
@@ -462,9 +463,23 @@
             self._data_size = self._data_size(self.dobj, self.objs)
         return self._data_size
 
+    def _accumulate_values(self, method):
+        # We call this generically.  It's somewhat slower, since we're doing
+        # costly getattr functions, but this allows us to generalize.
+        mname = "select_%s" % method
+        arrs = []
+        for obj in self.objs:
+            f = getattr(obj, mname)
+            arrs.append(f(self.dobj))
+        arrs = np.concatenate(arrs)
+        self._data_size = arrs.shape[0]
+        return arrs
+
     _fcoords = None
     @property
     def fcoords(self):
+        if self.data_size is None:
+            self._fcoords = self._accumulate_values("fcoords")
         if self._fcoords is not None: return self._fcoords
         ci = np.empty((self.data_size, 3), dtype='float64')
         self._fcoords = ci
@@ -480,6 +495,8 @@
     _icoords = None
     @property
     def icoords(self):
+        if self.data_size is None:
+            self._icoords = self._accumulate_values("icoords")
         if self._icoords is not None: return self._icoords
         ci = np.empty((self.data_size, 3), dtype='int64')
         self._icoords = ci
@@ -495,6 +512,8 @@
     _fwidth = None
     @property
     def fwidth(self):
+        if self.data_size is None:
+            self._fwidth = self._accumulate_values("fwidth")
         if self._fwidth is not None: return self._fwidth
         ci = np.empty((self.data_size, 3), dtype='float64')
         self._fwidth = ci
@@ -510,6 +529,8 @@
     _ires = None
     @property
     def ires(self):
+        if self.data_size is None:
+            self._ires = self._accumulate_values("ires")
         if self._ires is not None: return self._ires
         ci = np.empty(self.data_size, dtype='int64')
         self._ires = ci

diff -r 52cc6544d609a36cf117a5bde14206e7ced903da -r a23b3276b87d63bf7d4f39965fc645f97f22355e yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -46,6 +46,7 @@
 from yt.data_objects.data_containers import data_object_registry
 
 class ParticleGeometryHandler(GeometryHandler):
+    _global_mesh = False
 
     def __init__(self, pf, data_style):
         self.data_style = data_style
@@ -144,7 +145,7 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -160,5 +161,5 @@
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], -1)
+            yield YTDataChunk(dobj, "io", [subset], None)
 


https://bitbucket.org/yt_analysis/yt/commits/777c8f708205/
Changeset:   777c8f708205
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 19:54:24
Summary:     Splitting up the oct_container module, starting work on particle selection.
Affected #:  8 files

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -243,7 +243,7 @@
         return rv
 
     def _generate_spatial_fluid(self, field, ngz):
-        rv = np.empty(self.size, dtype="float64")
+        rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
             for io_chunk in self.chunks([], "io"):

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -59,6 +59,10 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
+class ParticleOctreeSubset(object):
+    def __init__(self):
+        pass
+
 class ParticleFile(object):
     def __init__(self, pf, io, filename, file_id):
         self.pf = pf

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -34,7 +34,7 @@
 from yt.utilities.fortran_utils import read_record
 from yt.utilities.lib.geometry_utils import get_morton_indices
 
-from yt.geometry.oct_container import _ORDER_MAX, ParticleRegions
+from yt.geometry.oct_container import _ORDER_MAX
 
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -26,6 +26,8 @@
 cimport numpy as np
 from fp_utils cimport *
 
+cdef int ORDER_MAX=20
+
 cdef struct Oct
 cdef struct Oct:
     np.int64_t file_ind     # index with respect to the order in which it was

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -29,12 +29,11 @@
 from libc.math cimport floor
 cimport numpy as np
 import numpy as np
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, ORDER_MAX
 from selection_routines cimport SelectorObject
 cimport cython
 
-DEF ORDER_MAX=20
-
 _ORDER_MAX = ORDER_MAX
 
 cdef extern from "stdlib.h":
@@ -818,534 +817,3 @@
         else: return 0
     elif o1.domain > o2.domain: return 1
 
-cdef class ParticleOctreeContainer(OctreeContainer):
-    cdef Oct** oct_list
-    #The starting oct index of each domain
-    cdef np.int64_t *dom_offsets 
-    cdef public int max_level
-    #How many particles do we keep befor refining
-    cdef public int n_ref
-
-    def allocate_root(self):
-        cdef int i, j, k
-        cdef Oct *cur
-        for i in range(self.nn[0]):
-            for j in range(self.nn[1]):
-                for k in range(self.nn[2]):
-                    cur = self.allocate_oct()
-                    cur.level = 0
-                    cur.pos[0] = i
-                    cur.pos[1] = j
-                    cur.pos[2] = k
-                    cur.parent = NULL
-                    self.root_mesh[i][j][k] = cur
-
-    def __dealloc__(self):
-        #Call the freemem ops on every ocy
-        #of the root mesh recursively
-        cdef i, j, k
-        for i in range(self.nn[0]):
-            for j in range(self.nn[1]):
-                for k in range(self.nn[2]):
-                    self.visit_free(self.root_mesh[i][j][k])
-        free(self.oct_list)
-        free(self.dom_offsets)
-
-    cdef void visit_free(self, Oct *o):
-        #Free the memory for this oct recursively
-        cdef int i, j, k
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    if o.children[i][j][k] == NULL: continue
-                    self.visit_free(o.children[i][j][k])
-        free(o)
-
-    def clear_fileind(self):
-        cdef i, j, k
-        for i in range(self.nn[0]):
-            for j in range(self.nn[1]):
-                for k in range(self.nn[2]):
-                    self.visit_clear(self.root_mesh[i][j][k])
-
-    cdef void visit_clear(self, Oct *o):
-        #Free the memory for this oct recursively
-        cdef int i, j, k
-        o.file_ind = 0
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    if o.children[i][j][k] == NULL: continue
-                    self.visit_clear(o.children[i][j][k])
-
-    def __iter__(self):
-        #Get the next oct, will traverse domains
-        #Note that oct containers can be sorted 
-        #so that consecutive octs are on the same domain
-        cdef int oi
-        cdef Oct *o
-        for oi in range(self.nocts):
-            o = self.oct_list[oi]
-            yield (o.file_ind, o.domain_ind, o.domain)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        #Return the integer positions of the cells
-        #Limited to this domain and within the mask
-        #Positions are binary; aside from the root mesh
-        #to each digit we just add a << 1 and a 0 or 1 
-        #for each child recursively
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        cdef int oi, i, ci, ii
-        ci = 0
-        for oi in range(self.nocts):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[oi, ii] == 1:
-                            coords[ci, 0] = (o.pos[0] << 1) + i
-                            coords[ci, 1] = (o.pos[1] << 1) + j
-                            coords[ci, 2] = (o.pos[2] << 1) + k
-                            ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        #Return the 'resolution' of each cell; ie the level
-        cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(cell_count, dtype="int64")
-        cdef int oi, i, ci
-        ci = 0
-        for oi in range(self.nocts):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    res[ci] = o.level
-                    ci += 1
-        return res
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        #Return the floating point unitary position of every cell
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        cdef int oi, i, ci
-        cdef np.float64_t base_dx[3], dx[3], pos[3]
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        ci = 0
-        cdef int proc
-        for oi in range(self.nocts):
-            proc = 0
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    proc = 1
-                    break
-            if proc == 0: continue
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[oi, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci += 1
-        return coords
-
-    def allocate_domains(self, domain_counts):
-        pass
-
-    def finalize(self):
-        #This will sort the octs in the oct list
-        #so that domains appear consecutively
-        #And then find the oct index/offset for
-        #every domain
-        cdef int max_level = 0
-        self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
-        cdef np.int64_t i = 0, lpos = 0
-        self.max_level = max_level
-        cdef int cur_dom = -1
-        # We always need at least 2, and if max_domain is 0, we need 3.
-        for i in range(self.nn[0]):
-            for j in range(self.nn[1]):
-                for k in range(self.nn[2]):
-                    self.visit_assign(self.root_mesh[i][j][k], &lpos)
-        assert(lpos == self.nocts)
-        for i in range(self.nocts):
-            self.oct_list[i].domain_ind = i
-            self.oct_list[i].file_ind = -1
-
-    cdef visit_assign(self, Oct *o, np.int64_t *lpos):
-        cdef int i, j, k
-        self.oct_list[lpos[0]] = o
-        lpos[0] += 1
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    if o.children[i][j][k] != NULL:
-                        self.visit_assign(o.children[i][j][k], lpos)
-        return
-
-    cdef np.int64_t get_domain_offset(self, int domain_id):
-        return self.dom_offsets[domain_id + 1]
-
-    cdef Oct* allocate_oct(self):
-        #Allocate the memory, set to NULL or -1
-        #We reserve space for n_ref particles, but keep
-        #track of how many are used with np initially 0
-        self.nocts += 1
-        cdef Oct *my_oct = <Oct*> malloc(sizeof(Oct))
-        cdef int i, j, k
-        my_oct.domain = -1
-        my_oct.file_ind = 0
-        my_oct.domain_ind = self.nocts - 1
-        my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
-        my_oct.level = -1
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    my_oct.children[i][j][k] = NULL
-        my_oct.parent = NULL
-        return my_oct
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        cdef np.int64_t ndo, doff
-        ndo = self.dom_offsets[domain_id + 2] \
-            - self.dom_offsets[domain_id + 1]
-        doff = self.dom_offsets[domain_id + 1]
-        for oi in range(ndo):
-            o = self.oct_list[oi + doff]
-            for i in range(8):
-                if mask[o.domain_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def add(self, np.ndarray[np.uint64_t, ndim=1] indices):
-        #Add this particle to the root oct
-        #Then if that oct has children, add it to them recursively
-        #If the child needs to be refined because of max particles, do so
-        cdef np.int64_t no = indices.shape[0], p, index
-        cdef int i, level, ind[3]
-        if self.root_mesh[0][0][0] == NULL: self.allocate_root()
-        cdef np.uint64_t *data = <np.uint64_t *> indices.data
-        for p in range(no):
-            # We have morton indices, which means we choose left and right by
-            # looking at (MAX_ORDER - level) & with the values 1, 2, 4.
-            level = 0
-            index = indices[p]
-            for i in range(3):
-                ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
-            cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-            if cur == NULL:
-                raise RuntimeError
-            while (cur.file_ind + 1) > self.n_ref:
-                if level >= ORDER_MAX: break # Just dump it here.
-                level += 1
-                for i in range(3):
-                    ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
-                if cur.children[ind[0]][ind[1]][ind[2]] == NULL:
-                    cur = self.refine_oct(cur, index)
-                    self.filter_particles(cur, data, p)
-                else:
-                    cur = cur.children[ind[0]][ind[1]][ind[2]]
-            cur.file_ind += 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *refine_oct(self, Oct *o, np.uint64_t index):
-        #Allocate and initialize child octs
-        #Attach particles to child octs
-        #Remove particles from this oct entirely
-        cdef int i, j, k, m, n, ind[3]
-        cdef Oct *noct
-        cdef np.uint64_t prefix1, prefix2
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    noct = self.allocate_oct()
-                    noct.domain = o.domain
-                    noct.file_ind = 0
-                    noct.level = o.level + 1
-                    noct.pos[0] = (o.pos[0] << 1) + i
-                    noct.pos[1] = (o.pos[1] << 1) + j
-                    noct.pos[2] = (o.pos[2] << 1) + k
-                    noct.parent = o
-                    o.children[i][j][k] = noct
-        o.file_ind = self.n_ref + 1
-        for i in range(3):
-            ind[i] = (index >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
-        noct = o.children[ind[0]][ind[1]][ind[2]]
-        return noct
-
-    cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p):
-        # Now we look at the last nref particles to decide where they go.
-        cdef int n = imin(p, self.n_ref)
-        cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0)
-        # Now we figure out our prefix, which is the oct address at this level.
-        # As long as we're actually in Morton order, we do not need to worry
-        # about *any* of the other children of the oct.
-        prefix1 = data[p] >> (ORDER_MAX - o.level)*3
-        for i in range(n):
-            prefix2 = arr[i] >> (ORDER_MAX - o.level)*3
-            if (prefix1 == prefix2):
-                o.file_ind += 1
-        #print ind[0], ind[1], ind[2], o.file_ind, o.level
-
-    def recursively_count(self):
-        #Visit every cell, accumulate the # of cells per level
-        cdef int i, j, k
-        cdef np.int64_t counts[128]
-        for i in range(128): counts[i] = 0
-        for i in range(self.nn[0]):
-            for j in range(self.nn[1]):
-                for k in range(self.nn[2]):
-                    if self.root_mesh[i][j][k] != NULL:
-                        self.visit(self.root_mesh[i][j][k], counts)
-        level_counts = {}
-        for i in range(128):
-            if counts[i] == 0: break
-            level_counts[i] = counts[i]
-        return level_counts
-        
-    cdef visit(self, Oct *o, np.int64_t *counts, level = 0):
-        cdef int i, j, k
-        counts[level] += 1
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    if o.children[i][j][k] != NULL:
-                        self.visit(o.children[i][j][k], counts, level + 1)
-        return
-
-    def domain_identify(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        #Return an array of length # of domains
-        #Every element is True if there is at least one
-        #fully refined *cell* in that domain that isn't masked out
-        cdef int i, oi, m
-        cdef Oct *o
-        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] dmask
-        dmask = np.zeros(self.max_domain+1, dtype='uint8')
-        for oi in range(self.nocts):
-            m = 0
-            o = self.oct_list[oi]
-            #if o.sd.np <= 0 or o.domain == -1: continue
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    m = 1
-                    break
-            if m == 0: continue
-            dmask[o.domain] = 1
-        return dmask.astype("bool")
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_cells(self, SelectorObject selector,
-              np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        #Count how many cells per level there are
-        cdef int i, j, k, oi
-        # pos here is CELL center, not OCT center.
-        cdef np.float64_t pos[3]
-        cdef int n = mask.shape[0]
-        cdef int eterm[3]
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain + 1, 'int64')
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain == -1: continue
-            for i in range(8):
-                count[o.domain] += mask[oi,i]
-        return count
-
-    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                   int domain_id):
-        cdef np.int64_t i, oi, n, use
-        cdef Oct *o
-        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
-                np.zeros((mask.shape[0], 8), 'uint8')
-        n = mask.shape[0]
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(8):
-                m2[o.domain_ind, i] = mask[o.domain_ind, i]
-        return m2
-
-    def domain_mask(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        # What distinguishes this one from domain_and is that we have a mask,
-        # which covers the whole domain, but our output will only be of a much
-        # smaller subset of octs that belong to a given domain *and* the mask.
-        # Note also that typically when something calls domain_and, they will 
-        # use a logical_any along the oct axis.  Here we don't do that.
-        # Note also that we change the shape of the returned array.
-        cdef np.int64_t i, j, k, oi, n, nm, use
-        cdef Oct *o
-        n = mask.shape[0]
-        nm = 0
-        # This could perhaps be faster if we 
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            nm += use
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, nm), 'uint8')
-        nm = 0
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        use = m2[i, j, k, nm] = 1
-            nm += use
-        return m2.astype("bool")
-
-    def domain_ind(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        # Here we once again do something similar to the other functions.  We
-        # need a set of indices into the final reduced, masked values.  The
-        # indices will be domain.n long, and will be of type int64.  This way,
-        # we can get the Oct through a .get() call, then use Oct.file_ind as an
-        # index into this newly created array, then finally use the returned
-        # index into the domain subset array for deposition.
-        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
-        cdef Oct *o
-        # For particle octrees, domain 0 is special and means non-leaf nodes.
-        offset = self.dom_offsets[domain_id + 1]
-        noct = self.dom_offsets[domain_id + 2] - offset
-        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
-        nm = 0
-        for oi in range(noct):
-            ind[oi] = -1
-            o = self.oct_list[oi + offset]
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            if use == 1:
-                ind[oi] = nm
-            nm += use
-        return ind
-
-cdef class ParticleRegions:
-    cdef np.float64_t left_edge[3]
-    cdef np.float64_t dds[3]
-    cdef np.float64_t idds[3]
-    cdef np.int32_t dims[3]
-    cdef public int nfiles
-    cdef public object masks
-
-    def __init__(self, left_edge, right_edge, dims, nfiles):
-        cdef int i
-        self.nfiles = nfiles
-        for i in range(3):
-            self.left_edge[i] = left_edge[i]
-            self.dims[i] = dims[i]
-            self.dds[i] = (right_edge[i] - left_edge[i])/dims[i]
-            self.idds[i] = 1.0/self.dds[i]
-        # We use 64-bit masks
-        self.masks = []
-        for i in range(nfiles/64 + 1):
-            self.masks.append(np.zeros(dims, dtype="uint64"))
-
-    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
-        cdef np.int64_t no = pos.shape[0]
-        cdef np.int64_t p
-        cdef int ind[3], i
-        cdef np.ndarray[np.uint64_t, ndim=3] mask
-        mask = self.masks[file_id/64]
-        val = 1 << (file_id - (file_id/64)*64)
-        for p in range(no):
-            # Now we locate the particle
-            for i in range(3):
-                ind[i] = <int> ((pos[p, i] - self.left_edge[i])*self.idds[i])
-            mask[ind[0],ind[1],ind[2]] |= val
-
-    def identify_data_files(self, SelectorObject selector):
-        # This is relatively cheap to iterate over.
-        cdef int i, j, k, n
-        cdef np.uint64_t fmask, offset
-        cdef np.float64_t LE[3], RE[3]
-        cdef np.ndarray[np.uint64_t, ndim=3] mask
-        files = []
-        for n in range(len(self.masks)):
-            fmask = 0
-            mask = self.masks[n]
-            LE[0] = self.left_edge[0]
-            RE[0] = LE[0] + self.dds[0]
-            for i in range(self.dims[0]):
-                LE[1] = self.left_edge[1]
-                RE[1] = LE[1] + self.dds[1]
-                for j in range(self.dims[1]):
-                    LE[2] = self.left_edge[2]
-                    RE[2] = LE[2] + self.dds[2]
-                    for k in range(self.dims[2]):
-                        if selector.select_grid(LE, RE, 0) == 1:
-                            fmask |= mask[i,j,k]
-                        LE[2] += self.dds[2]
-                        RE[2] += self.dds[2]
-                    LE[1] += self.dds[1]
-                    RE[1] += self.dds[1]
-                LE[0] += self.dds[0]
-                RE[0] += self.dds[0]
-            # Now we iterate through...
-            for i in range(64):
-                if ((fmask >> i) & 1) == 1:
-                    files.append(i + n * 64)
-        return files

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -36,7 +36,7 @@
 from yt.config import ytcfg
 from yt.data_objects.field_info_container import NullFunc
 from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
-from yt.geometry.oct_container import \
+from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, ParticleRegions
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
@@ -145,7 +145,7 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
+        yield ParticleDataChunk(self.oct_handler, self.regions, dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -156,10 +156,21 @@
                 g = og
             size = og.cell_count
             if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            yield ParticleDataChunk(self.oct_handler, self.regions,
+                                    dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield ParticleDataChunk(self.oct_handler, self.regions,
+                                    dobj, "io", [subset], None)
 
+class ParticleDataChunk(YTDataChunk):
+    def __init__(self, oct_handler, regions, *args, **kwargs):
+        self.oct_handler = oct_handler
+        self.regions = regions
+        super(ParticleDataChunk, self).__init__(*args, **kwargs)
+
+    def _accumulate_values(self, method):
+        mfunc = getattr(self.oct_handler, "select_%s" % method)
+        return mfunc(self.dobj)

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/geometry/particle_oct_container.pyx
--- /dev/null
+++ b/yt/geometry/particle_oct_container.pyx
@@ -0,0 +1,569 @@
+"""
+Oct container tuned for Particles
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from oct_container cimport OctreeContainer, Oct, OctInfo
+from libc.stdlib cimport malloc, free, qsort
+from libc.math cimport floor
+from fp_utils cimport *
+cimport numpy as np
+import numpy as np
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, ORDER_MAX
+from selection_routines cimport SelectorObject
+cimport cython
+
+cdef class ParticleOctreeContainer(OctreeContainer):
+    cdef Oct** oct_list
+    #The starting oct index of each domain
+    cdef np.int64_t *dom_offsets 
+    cdef public int max_level
+    #How many particles do we keep befor refining
+    cdef public int n_ref
+
+    def allocate_root(self):
+        cdef int i, j, k
+        cdef Oct *cur
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    cur = self.allocate_oct()
+                    cur.level = 0
+                    cur.pos[0] = i
+                    cur.pos[1] = j
+                    cur.pos[2] = k
+                    cur.parent = NULL
+                    self.root_mesh[i][j][k] = cur
+
+    def __dealloc__(self):
+        #Call the freemem ops on every ocy
+        #of the root mesh recursively
+        cdef i, j, k
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    self.visit_free(self.root_mesh[i][j][k])
+        free(self.oct_list)
+        free(self.dom_offsets)
+
+    cdef void visit_free(self, Oct *o):
+        #Free the memory for this oct recursively
+        cdef int i, j, k
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] == NULL: continue
+                    self.visit_free(o.children[i][j][k])
+        free(o)
+
+    def clear_fileind(self):
+        cdef i, j, k
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    self.visit_clear(self.root_mesh[i][j][k])
+
+    cdef void visit_clear(self, Oct *o):
+        #Free the memory for this oct recursively
+        cdef int i, j, k
+        o.file_ind = 0
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] == NULL: continue
+                    self.visit_clear(o.children[i][j][k])
+
+    def __iter__(self):
+        #Get the next oct, will traverse domains
+        #Note that oct containers can be sorted 
+        #so that consecutive octs are on the same domain
+        cdef int oi
+        cdef Oct *o
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            yield (o.file_ind, o.domain_ind, o.domain)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def icoords(self, int domain_id,
+                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
+        #Return the integer positions of the cells
+        #Limited to this domain and within the mask
+        #Positions are binary; aside from the root mesh
+        #to each digit we just add a << 1 and a 0 or 1 
+        #for each child recursively
+        cdef np.ndarray[np.int64_t, ndim=2] coords
+        coords = np.empty((cell_count, 3), dtype="int64")
+        cdef int oi, i, ci, ii
+        ci = 0
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[oi, ii] == 1:
+                            coords[ci, 0] = (o.pos[0] << 1) + i
+                            coords[ci, 1] = (o.pos[1] << 1) + j
+                            coords[ci, 2] = (o.pos[2] << 1) + k
+                            ci += 1
+        return coords
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def ires(self, int domain_id,
+                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
+        #Return the 'resolution' of each cell; ie the level
+        cdef np.ndarray[np.int64_t, ndim=1] res
+        res = np.empty(cell_count, dtype="int64")
+        cdef int oi, i, ci
+        ci = 0
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            for i in range(8):
+                if mask[oi, i] == 1:
+                    res[ci] = o.level
+                    ci += 1
+        return res
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fcoords(self, int domain_id,
+                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
+        #Return the floating point unitary position of every cell
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        coords = np.empty((cell_count, 3), dtype="float64")
+        cdef int oi, i, ci
+        cdef np.float64_t base_dx[3], dx[3], pos[3]
+        for i in range(3):
+            # This is the base_dx, but not the base distance from the center
+            # position.  Note that the positions will also all be offset by
+            # dx/2.0.  This is also for *oct grids*, not cells.
+            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
+        cdef int proc
+        for oi in range(self.nocts):
+            proc = 0
+            for i in range(8):
+                if mask[oi, i] == 1:
+                    proc = 1
+                    break
+            if proc == 0: continue
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            for i in range(3):
+                # This gives the *grid* width for this level
+                dx[i] = base_dx[i] / (1 << o.level)
+                # o.pos is the *grid* index, so pos[i] is the center of the
+                # first cell in the grid
+                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
+                dx[i] = dx[i] / 2.0 # This is now the *offset* 
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[oi, ii] == 0: continue
+                        coords[ci, 0] = pos[0] + dx[0] * i
+                        coords[ci, 1] = pos[1] + dx[1] * j
+                        coords[ci, 2] = pos[2] + dx[2] * k
+                        ci += 1
+        return coords
+
+    def allocate_domains(self, domain_counts):
+        pass
+
+    def finalize(self):
+        #This will sort the octs in the oct list
+        #so that domains appear consecutively
+        #And then find the oct index/offset for
+        #every domain
+        cdef int max_level = 0
+        self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
+        cdef np.int64_t i = 0, lpos = 0
+        self.max_level = max_level
+        cdef int cur_dom = -1
+        # We always need at least 2, and if max_domain is 0, we need 3.
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    self.visit_assign(self.root_mesh[i][j][k], &lpos)
+        assert(lpos == self.nocts)
+        for i in range(self.nocts):
+            self.oct_list[i].domain_ind = i
+            self.oct_list[i].file_ind = -1
+
+    cdef visit_assign(self, Oct *o, np.int64_t *lpos):
+        cdef int i, j, k
+        self.oct_list[lpos[0]] = o
+        lpos[0] += 1
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] != NULL:
+                        self.visit_assign(o.children[i][j][k], lpos)
+        return
+
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return self.dom_offsets[domain_id + 1]
+
+    cdef Oct* allocate_oct(self):
+        #Allocate the memory, set to NULL or -1
+        #We reserve space for n_ref particles, but keep
+        #track of how many are used with np initially 0
+        self.nocts += 1
+        cdef Oct *my_oct = <Oct*> malloc(sizeof(Oct))
+        cdef int i, j, k
+        my_oct.domain = -1
+        my_oct.file_ind = 0
+        my_oct.domain_ind = self.nocts - 1
+        my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
+        my_oct.level = -1
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    my_oct.children[i][j][k] = NULL
+        my_oct.parent = NULL
+        return my_oct
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_levels(self, int max_level, int domain_id,
+                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        cdef np.ndarray[np.int64_t, ndim=1] level_count
+        cdef Oct *o
+        cdef int oi, i
+        level_count = np.zeros(max_level+1, 'int64')
+        cdef np.int64_t ndo, doff
+        ndo = self.dom_offsets[domain_id + 2] \
+            - self.dom_offsets[domain_id + 1]
+        doff = self.dom_offsets[domain_id + 1]
+        for oi in range(ndo):
+            o = self.oct_list[oi + doff]
+            for i in range(8):
+                if mask[o.domain_ind, i] == 0: continue
+                level_count[o.level] += 1
+        return level_count
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def add(self, np.ndarray[np.uint64_t, ndim=1] indices):
+        #Add this particle to the root oct
+        #Then if that oct has children, add it to them recursively
+        #If the child needs to be refined because of max particles, do so
+        cdef np.int64_t no = indices.shape[0], p, index
+        cdef int i, level, ind[3]
+        if self.root_mesh[0][0][0] == NULL: self.allocate_root()
+        cdef np.uint64_t *data = <np.uint64_t *> indices.data
+        for p in range(no):
+            # We have morton indices, which means we choose left and right by
+            # looking at (MAX_ORDER - level) & with the values 1, 2, 4.
+            level = 0
+            index = indices[p]
+            for i in range(3):
+                ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
+            cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
+            if cur == NULL:
+                raise RuntimeError
+            while (cur.file_ind + 1) > self.n_ref:
+                if level >= ORDER_MAX: break # Just dump it here.
+                level += 1
+                for i in range(3):
+                    ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
+                if cur.children[ind[0]][ind[1]][ind[2]] == NULL:
+                    cur = self.refine_oct(cur, index)
+                    self.filter_particles(cur, data, p)
+                else:
+                    cur = cur.children[ind[0]][ind[1]][ind[2]]
+            cur.file_ind += 1
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef Oct *refine_oct(self, Oct *o, np.uint64_t index):
+        #Allocate and initialize child octs
+        #Attach particles to child octs
+        #Remove particles from this oct entirely
+        cdef int i, j, k, m, n, ind[3]
+        cdef Oct *noct
+        cdef np.uint64_t prefix1, prefix2
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    noct = self.allocate_oct()
+                    noct.domain = o.domain
+                    noct.file_ind = 0
+                    noct.level = o.level + 1
+                    noct.pos[0] = (o.pos[0] << 1) + i
+                    noct.pos[1] = (o.pos[1] << 1) + j
+                    noct.pos[2] = (o.pos[2] << 1) + k
+                    noct.parent = o
+                    o.children[i][j][k] = noct
+        o.file_ind = self.n_ref + 1
+        for i in range(3):
+            ind[i] = (index >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
+        noct = o.children[ind[0]][ind[1]][ind[2]]
+        return noct
+
+    cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p):
+        # Now we look at the last nref particles to decide where they go.
+        cdef int n = imin(p, self.n_ref)
+        cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0)
+        # Now we figure out our prefix, which is the oct address at this level.
+        # As long as we're actually in Morton order, we do not need to worry
+        # about *any* of the other children of the oct.
+        prefix1 = data[p] >> (ORDER_MAX - o.level)*3
+        for i in range(n):
+            prefix2 = arr[i] >> (ORDER_MAX - o.level)*3
+            if (prefix1 == prefix2):
+                o.file_ind += 1
+        #print ind[0], ind[1], ind[2], o.file_ind, o.level
+
+    def recursively_count(self):
+        #Visit every cell, accumulate the # of cells per level
+        cdef int i, j, k
+        cdef np.int64_t counts[128]
+        for i in range(128): counts[i] = 0
+        for i in range(self.nn[0]):
+            for j in range(self.nn[1]):
+                for k in range(self.nn[2]):
+                    if self.root_mesh[i][j][k] != NULL:
+                        self.visit(self.root_mesh[i][j][k], counts)
+        level_counts = {}
+        for i in range(128):
+            if counts[i] == 0: break
+            level_counts[i] = counts[i]
+        return level_counts
+        
+    cdef visit(self, Oct *o, np.int64_t *counts, level = 0):
+        cdef int i, j, k
+        counts[level] += 1
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    if o.children[i][j][k] != NULL:
+                        self.visit(o.children[i][j][k], counts, level + 1)
+        return
+
+    def domain_identify(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        #Return an array of length # of domains
+        #Every element is True if there is at least one
+        #fully refined *cell* in that domain that isn't masked out
+        cdef int i, oi, m
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] dmask
+        dmask = np.zeros(self.max_domain+1, dtype='uint8')
+        for oi in range(self.nocts):
+            m = 0
+            o = self.oct_list[oi]
+            #if o.sd.np <= 0 or o.domain == -1: continue
+            for i in range(8):
+                if mask[oi, i] == 1:
+                    m = 1
+                    break
+            if m == 0: continue
+            dmask[o.domain] = 1
+        return dmask.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_cells(self, SelectorObject selector,
+              np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        #Count how many cells per level there are
+        cdef int i, j, k, oi
+        # pos here is CELL center, not OCT center.
+        cdef np.float64_t pos[3]
+        cdef int n = mask.shape[0]
+        cdef int eterm[3]
+        cdef np.ndarray[np.int64_t, ndim=1] count
+        count = np.zeros(self.max_domain + 1, 'int64')
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain == -1: continue
+            for i in range(8):
+                count[o.domain] += mask[oi,i]
+        return count
+
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n, use
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
+        return m2
+
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        # This could perhaps be faster if we 
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.domain_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # Here we once again do something similar to the other functions.  We
+        # need a set of indices into the final reduced, masked values.  The
+        # indices will be domain.n long, and will be of type int64.  This way,
+        # we can get the Oct through a .get() call, then use Oct.file_ind as an
+        # index into this newly created array, then finally use the returned
+        # index into the domain subset array for deposition.
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef Oct *o
+        # For particle octrees, domain 0 is special and means non-leaf nodes.
+        offset = self.dom_offsets[domain_id + 1]
+        noct = self.dom_offsets[domain_id + 2] - offset
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
+        nm = 0
+        for oi in range(noct):
+            ind[oi] = -1
+            o = self.oct_list[oi + offset]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind
+
+cdef class ParticleRegions:
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t dds[3]
+    cdef np.float64_t idds[3]
+    cdef np.int32_t dims[3]
+    cdef public int nfiles
+    cdef public object masks
+
+    def __init__(self, left_edge, right_edge, dims, nfiles):
+        cdef int i
+        self.nfiles = nfiles
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.dims[i] = dims[i]
+            self.dds[i] = (right_edge[i] - left_edge[i])/dims[i]
+            self.idds[i] = 1.0/self.dds[i]
+        # We use 64-bit masks
+        self.masks = []
+        for i in range(nfiles/64 + 1):
+            self.masks.append(np.zeros(dims, dtype="uint64"))
+
+    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
+        cdef np.int64_t no = pos.shape[0]
+        cdef np.int64_t p
+        cdef int ind[3], i
+        cdef np.ndarray[np.uint64_t, ndim=3] mask
+        mask = self.masks[file_id/64]
+        val = 1 << (file_id - (file_id/64)*64)
+        for p in range(no):
+            # Now we locate the particle
+            for i in range(3):
+                ind[i] = <int> ((pos[p, i] - self.left_edge[i])*self.idds[i])
+            mask[ind[0],ind[1],ind[2]] |= val
+
+    def identify_data_files(self, SelectorObject selector):
+        # This is relatively cheap to iterate over.
+        cdef int i, j, k, n
+        cdef np.uint64_t fmask, offset
+        cdef np.float64_t LE[3], RE[3]
+        cdef np.ndarray[np.uint64_t, ndim=3] mask
+        files = []
+        for n in range(len(self.masks)):
+            fmask = 0
+            mask = self.masks[n]
+            LE[0] = self.left_edge[0]
+            RE[0] = LE[0] + self.dds[0]
+            for i in range(self.dims[0]):
+                LE[1] = self.left_edge[1]
+                RE[1] = LE[1] + self.dds[1]
+                for j in range(self.dims[1]):
+                    LE[2] = self.left_edge[2]
+                    RE[2] = LE[2] + self.dds[2]
+                    for k in range(self.dims[2]):
+                        if selector.select_grid(LE, RE, 0) == 1:
+                            fmask |= mask[i,j,k]
+                        LE[2] += self.dds[2]
+                        RE[2] += self.dds[2]
+                    LE[1] += self.dds[1]
+                    RE[1] += self.dds[1]
+                LE[0] += self.dds[0]
+                RE[0] += self.dds[0]
+            # Now we iterate through...
+            for i in range(64):
+                if ((fmask >> i) & 1) == 1:
+                    files.append(i + n * 64)
+        return files

diff -r a23b3276b87d63bf7d4f39965fc645f97f22355e -r 777c8f7082052eb202f9bd61e119c0520e98b19a yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -14,6 +14,13 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd"])
+    config.add_extension("particle_oct_container", 
+                ["yt/geometry/particle_oct_container.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.add_extension("selection_routines", 
                 ["yt/geometry/selection_routines.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt/commits/120a06e24932/
Changeset:   120a06e24932
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 20:25:46
Summary:     Adding get_smallest_dx() to particle geometry handler.
Affected #:  3 files

diff -r 777c8f7082052eb202f9bd61e119c0520e98b19a -r 120a06e24932cdf28c6538774a88d03924403107 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -26,7 +26,7 @@
 cimport numpy as np
 from fp_utils cimport *
 
-cdef int ORDER_MAX=20
+cdef int ORDER_MAX
 
 cdef struct Oct
 cdef struct Oct:

diff -r 777c8f7082052eb202f9bd61e119c0520e98b19a -r 120a06e24932cdf28c6538774a88d03924403107 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -34,6 +34,7 @@
 from selection_routines cimport SelectorObject
 cimport cython
 
+ORDER_MAX = 20
 _ORDER_MAX = ORDER_MAX
 
 cdef extern from "stdlib.h":

diff -r 777c8f7082052eb202f9bd61e119c0520e98b19a -r 120a06e24932cdf28c6538774a88d03924403107 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -66,7 +66,10 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        raise NotImplementedError
+        dx = 1.0/(2**self.oct_handler.max_level)
+        dx *= (self.parameter_file.domain_right_edge -
+               self.parameter_file.domain_left_edge)
+        return dx.min()
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]


https://bitbucket.org/yt_analysis/yt/commits/3c28775ee450/
Changeset:   3c28775ee450
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 20:30:26
Summary:     Adding an oct counting function.  This is just a stopgap for now, as we will
want to provide bounds or some method of cutting into sub-sequences.
Affected #:  2 files

diff -r 120a06e24932cdf28c6538774a88d03924403107 -r 3c28775ee450fe57f7a3c15d51f023bde3d186d8 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -35,6 +35,9 @@
                         np.float64_t pos[3], np.float64_t dds[3],
                         np.ndarray[np.uint8_t, ndim=2] mask,
                         int level = ?)
+    cdef void recursively_count_octs(self, Oct *root,
+                        np.float64_t pos[3], np.float64_t dds[3],
+                        int level, np.int64_t *count)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level) nogil

diff -r 120a06e24932cdf28c6538774a88d03924403107 -r 3c28775ee450fe57f7a3c15d51f023bde3d186d8 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -226,6 +226,79 @@
                 spos[1] += sdds[1]
             spos[0] += sdds[0]
 
+    def count_octs(self, OctreeContainer octree):
+        cdef int i, j, k, n
+        cdef np.float64_t pos[3], dds[3]
+        cdef np.int64_t count = 0
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (octree.DRE[i] - octree.DLE[i]) / octree.nn[i]
+        # Pos is the center of the octs
+        pos[0] = octree.DLE[0] + dds[0]/2.0
+        for i in range(octree.nn[0]):
+            pos[1] = octree.DLE[1] + dds[1]/2.0
+            for j in range(octree.nn[1]):
+                pos[2] = octree.DLE[2] + dds[2]/2.0
+                for k in range(octree.nn[2]):
+                    if octree.root_mesh[i][j][k] == NULL: continue
+                    self.recursively_count_octs(
+                        octree.root_mesh[i][j][k],
+                        pos, dds, 0, &count) 
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        return count
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void recursively_count_octs(self, Oct *root,
+                        np.float64_t pos[3], np.float64_t dds[3],
+                        int level, np.int64_t *count):
+        cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
+        cdef int i, j, k, res, ii
+        cdef Oct *ch
+        # Remember that pos is the *center* of the oct, and dds is the oct
+        # width.  So to get to the edges, we add/subtract half of dds.
+        for i in range(3):
+            # sdds is the cell width
+            sdds[i] = dds[i]/2.0
+            LE[i] = pos[i] - dds[i]/2.0
+            RE[i] = pos[i] + dds[i]/2.0
+        #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
+        res = self.select_grid(LE, RE, level)
+        cdef int eterm[3] 
+        eterm[0] = eterm[1] = eterm[2] = 0
+        cdef int next_level, this_level
+        # next_level: an int that says whether or not we can progress to children
+        # this_level: an int that says whether or not we can select from this
+        # level
+        next_level = this_level = 1
+        if level == self.max_level:
+            next_level = 0
+        if level < self.min_level or level > self.max_level:
+            this_level = 0
+        if res == 0 and this_level == 1:
+            return
+        # Now we visit all our children.  We subtract off sdds for the first
+        # pass because we center it on the first cell.
+        spos[0] = pos[0] - sdds[0]/2.0
+        for i in range(2):
+            spos[1] = pos[1] - sdds[1]/2.0
+            for j in range(2):
+                spos[2] = pos[2] - sdds[2]/2.0
+                for k in range(2):
+                    ii = ((k*2)+j)*2+i
+                    ch = root.children[i][j][k]
+                    if next_level == 1 and ch != NULL:
+                        self.recursively_count_octs(
+                            ch, spos, sdds, level + 1, count)
+                    elif this_level == 1:
+                        count[0] += self.select_cell(spos, sdds, eterm)
+                    spos[2] += sdds[2]
+                spos[1] += sdds[1]
+            spos[0] += sdds[0]
+
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level) nogil:


https://bitbucket.org/yt_analysis/yt/commits/f73078da0b91/
Changeset:   f73078da0b91
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 20:58:19
Summary:     Fix up oct icoords.  Will soon change to a visit function.
Affected #:  2 files

diff -r 3c28775ee450fe57f7a3c15d51f023bde3d186d8 -r f73078da0b9129fd7c8e9a599fedffd59808fdca yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -120,7 +120,6 @@
         # Now we add them all at once.
         self.oct_handler.add(morton)
 
-
     def _detect_fields(self):
         # TODO: Add additional fields
         pfl = []

diff -r 3c28775ee450fe57f7a3c15d51f023bde3d186d8 -r f73078da0b9129fd7c8e9a599fedffd59808fdca yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -106,34 +106,41 @@
             o = self.oct_list[oi]
             yield (o.file_ind, o.domain_ind, o.domain)
 
-    @cython.boundscheck(False)
+    #@cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        #Return the integer positions of the cells
-        #Limited to this domain and within the mask
-        #Positions are binary; aside from the root mesh
-        #to each digit we just add a << 1 and a 0 or 1 
-        #for each child recursively
+    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        cdef int oi, i, ci, ii
+        coords = np.empty((num_cells, 3), dtype="int64")
+        cdef int oi, i, ci, ii, eterm[3]
         ci = 0
+        cdef np.float64_t left_edge[3], right_edge[3], dds[3]
         for oi in range(self.nocts):
             o = self.oct_list[oi]
-            if o.domain != domain_id: continue
+            #if o.domain != domain_id: continue
+            if o.children[0][0][0] != NULL: continue
+            self.oct_bounds(o, left_edge, dds)
+            for i in range(3): right_edge[i] = left_edge[i] + dds[i]
+            if not selector.select_grid(left_edge, right_edge, o.level):
+                continue
+            for i in range(3): # Set up cell info
+                dds[i] /= 2.0
+            right_edge[0] = left_edge[0] + dds[0]/2.0
             for i in range(2):
+                right_edge[1] = left_edge[1] + dds[1]/2.0
                 for j in range(2):
+                    right_edge[2] = left_edge[2] + dds[2]/2.0
                     for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[oi, ii] == 1:
+                        if selector.select_cell(right_edge, dds, eterm) == 1:
                             coords[ci, 0] = (o.pos[0] << 1) + i
                             coords[ci, 1] = (o.pos[1] << 1) + j
                             coords[ci, 2] = (o.pos[2] << 1) + k
                             ci += 1
+                        right_edge[2] += dds[2]
+                    right_edge[1] += dds[1]
+                right_edge[0] += dds[0]
         return coords
 
     @cython.boundscheck(False)
@@ -214,7 +221,6 @@
         cdef int max_level = 0
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
         cdef np.int64_t i = 0, lpos = 0
-        self.max_level = max_level
         cdef int cur_dom = -1
         # We always need at least 2, and if max_domain is 0, we need 3.
         for i in range(self.nn[0]):
@@ -225,6 +231,8 @@
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
             self.oct_list[i].file_ind = -1
+            max_level = imax(max_level, self.oct_list[i].level)
+        self.max_level = max_level
 
     cdef visit_assign(self, Oct *o, np.int64_t *lpos):
         cdef int i, j, k
@@ -400,26 +408,6 @@
             dmask[o.domain] = 1
         return dmask.astype("bool")
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_cells(self, SelectorObject selector,
-              np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        #Count how many cells per level there are
-        cdef int i, j, k, oi
-        # pos here is CELL center, not OCT center.
-        cdef np.float64_t pos[3]
-        cdef int n = mask.shape[0]
-        cdef int eterm[3]
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain + 1, 'int64')
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain == -1: continue
-            for i in range(8):
-                count[o.domain] += mask[oi,i]
-        return count
-
     def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
                    int domain_id):
         cdef np.int64_t i, oi, n, use


https://bitbucket.org/yt_analysis/yt/commits/fc0e492c0265/
Changeset:   fc0e492c0265
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 21:09:03
Summary:     Convert generic counting function into visitor function.
Affected #:  2 files

diff -r f73078da0b9129fd7c8e9a599fedffd59808fdca -r fc0e492c026567482fc6d20f786bfbe0255db89e yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -26,6 +26,11 @@
 cimport numpy as np
 
 cdef struct Oct
+cdef struct OctVisitorData:
+    np.uint64_t index
+    void *array
+
+ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor)
 
 cdef class SelectorObject:
     cdef public np.int32_t min_level
@@ -35,9 +40,11 @@
                         np.float64_t pos[3], np.float64_t dds[3],
                         np.ndarray[np.uint8_t, ndim=2] mask,
                         int level = ?)
-    cdef void recursively_count_octs(self, Oct *root,
+    cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
-                        int level, np.int64_t *count)
+                        int level,
+                        oct_visitor_function *func,
+                        OctVisitorData *data)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level) nogil
@@ -46,3 +53,4 @@
     cdef void set_bounds(self,
                          np.float64_t left_edge[3], np.float64_t right_edge[3],
                          np.float64_t dds[3], int ind[3][2], int *check)
+

diff -r f73078da0b9129fd7c8e9a599fedffd59808fdca -r fc0e492c026567482fc6d20f786bfbe0255db89e yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -113,7 +113,10 @@
     else:
         raise RuntimeError
 
-# Inclined Box
+# Now our visitor functions
+
+cdef void visit_count_octs(Oct *o, OctVisitorData *data):
+    data.index += 1
 
 cdef class SelectorObject:
 
@@ -229,11 +232,12 @@
     def count_octs(self, OctreeContainer octree):
         cdef int i, j, k, n
         cdef np.float64_t pos[3], dds[3]
-        cdef np.int64_t count = 0
         # This dds is the oct-width
         for i in range(3):
             dds[i] = (octree.DRE[i] - octree.DLE[i]) / octree.nn[i]
         # Pos is the center of the octs
+        cdef OctVisitorData data
+        data.index = 0
         pos[0] = octree.DLE[0] + dds[0]/2.0
         for i in range(octree.nn[0]):
             pos[1] = octree.DLE[1] + dds[1]/2.0
@@ -241,20 +245,22 @@
                 pos[2] = octree.DLE[2] + dds[2]/2.0
                 for k in range(octree.nn[2]):
                     if octree.root_mesh[i][j][k] == NULL: continue
-                    self.recursively_count_octs(
+                    self.recursively_visit_octs(
                         octree.root_mesh[i][j][k],
-                        pos, dds, 0, &count) 
+                        pos, dds, 0, visit_count_octs, &data)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
-        return count
+        return data.index
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void recursively_count_octs(self, Oct *root,
+    cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
-                        int level, np.int64_t *count):
+                        int level, 
+                        oct_visitor_function *func,
+                        OctVisitorData *data):
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
         cdef int i, j, k, res, ii
         cdef Oct *ch
@@ -291,10 +297,11 @@
                     ii = ((k*2)+j)*2+i
                     ch = root.children[i][j][k]
                     if next_level == 1 and ch != NULL:
-                        self.recursively_count_octs(
-                            ch, spos, sdds, level + 1, count)
-                    elif this_level == 1:
-                        count[0] += self.select_cell(spos, sdds, eterm)
+                        self.recursively_visit_octs(
+                            ch, spos, sdds, level + 1, func, data)
+                    elif this_level == 1 and self.select_cell(
+                                    spos, sdds, eterm):
+                        func(ch, data)
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]
             spos[0] += sdds[0]


https://bitbucket.org/yt_analysis/yt/commits/c29ef8057b0d/
Changeset:   c29ef8057b0d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 21:33:06
Summary:     Convert icoords to visitor function.
Affected #:  5 files

diff -r fc0e492c026567482fc6d20f786bfbe0255db89e -r c29ef8057b0def006df90b6d629a2d27cc0baca8 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -25,6 +25,8 @@
 
 cimport numpy as np
 from fp_utils cimport *
+from selection_routines cimport SelectorObject, \
+    OctVisitorData, oct_visitor_function
 
 cdef int ORDER_MAX
 
@@ -67,8 +69,15 @@
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.
     cdef np.int64_t get_domain_offset(self, int domain_id)
+    cdef void visit_all_octs(self, SelectorObject selector,
+                        oct_visitor_function *func,
+                        OctVisitorData *data)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+
+# Now some visitor functions
+
+cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)

diff -r fc0e492c026567482fc6d20f786bfbe0255db89e -r c29ef8057b0def006df90b6d629a2d27cc0baca8 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -31,7 +31,8 @@
 import numpy as np
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, ORDER_MAX
-from selection_routines cimport SelectorObject
+from selection_routines cimport SelectorObject, \
+    OctVisitorData, oct_visitor_function
 cimport cython
 
 ORDER_MAX = 20
@@ -137,6 +138,29 @@
                 yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
+    cdef void visit_all_octs(self, SelectorObject selector,
+                        oct_visitor_function *func,
+                        OctVisitorData *data):
+        cdef int i, j, k, n
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (self.DRE[i] - self.DLE[i]) / self.nn[i]
+        # Pos is the center of the octs
+        pos[0] = self.DLE[0] + dds[0]/2.0
+        for i in range(self.nn[0]):
+            pos[1] = self.DLE[1] + dds[1]/2.0
+            for j in range(self.nn[1]):
+                pos[2] = self.DLE[2] + dds[2]/2.0
+                for k in range(self.nn[2]):
+                    if self.root_mesh[i][j][k] == NULL: continue
+                    selector.recursively_visit_octs(
+                        self.root_mesh[i][j][k],
+                        pos, dds, 0, func, data)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
         cdef int i
         for i in range(3):
@@ -804,17 +828,11 @@
                             local_filled += 1
         return local_filled
 
+# Now some visitor functions
 
-cdef int compare_octs(void *vo1, void *vo2) nogil:
-    #This only compares if the octs live on the
-    #domain, not if they are actually equal
-    #Used to sort octs into consecutive domains
-    cdef Oct *o1 = (<Oct**> vo1)[0]
-    cdef Oct *o2 = (<Oct**> vo2)[0]
-    if o1.domain < o2.domain: return -1
-    elif o1.domain == o2.domain:
-        if o1.level < o2.level: return -1
-        if o1.level > o2.level: return 1
-        else: return 0
-    elif o1.domain > o2.domain: return 1
-
+cdef void visit_icoords_octs(Oct *o, OctVisitorData *data):
+    cdef np.int64_t *coords = <np.int64_t*> data.array
+    cdef int i
+    for i in range(3):
+        coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
+    data.index += 1

diff -r fc0e492c026567482fc6d20f786bfbe0255db89e -r c29ef8057b0def006df90b6d629a2d27cc0baca8 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -25,15 +25,15 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-from oct_container cimport OctreeContainer, Oct, OctInfo
+from oct_container cimport OctreeContainer, Oct, OctInfo, \
+    visit_icoords_octs, ORDER_MAX
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
 cimport numpy as np
 import numpy as np
-from oct_container cimport Oct, OctAllocationContainer, \
-    OctreeContainer, ORDER_MAX
-from selection_routines cimport SelectorObject
+from selection_routines cimport SelectorObject, \
+    OctVisitorData, oct_visitor_function
 cimport cython
 
 cdef class ParticleOctreeContainer(OctreeContainer):
@@ -114,33 +114,11 @@
             num_cells = selector.count_octs(self)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
-        cdef int oi, i, ci, ii, eterm[3]
-        ci = 0
+        cdef OctVisitorData data
+        data.array = <void *> coords.data
+        data.index = 0
         cdef np.float64_t left_edge[3], right_edge[3], dds[3]
-        for oi in range(self.nocts):
-            o = self.oct_list[oi]
-            #if o.domain != domain_id: continue
-            if o.children[0][0][0] != NULL: continue
-            self.oct_bounds(o, left_edge, dds)
-            for i in range(3): right_edge[i] = left_edge[i] + dds[i]
-            if not selector.select_grid(left_edge, right_edge, o.level):
-                continue
-            for i in range(3): # Set up cell info
-                dds[i] /= 2.0
-            right_edge[0] = left_edge[0] + dds[0]/2.0
-            for i in range(2):
-                right_edge[1] = left_edge[1] + dds[1]/2.0
-                for j in range(2):
-                    right_edge[2] = left_edge[2] + dds[2]/2.0
-                    for k in range(2):
-                        if selector.select_cell(right_edge, dds, eterm) == 1:
-                            coords[ci, 0] = (o.pos[0] << 1) + i
-                            coords[ci, 1] = (o.pos[1] << 1) + j
-                            coords[ci, 2] = (o.pos[2] << 1) + k
-                            ci += 1
-                        right_edge[2] += dds[2]
-                    right_edge[1] += dds[1]
-                right_edge[0] += dds[0]
+        self.visit_all_octs(selector, visit_icoords_octs, &data)
         return coords
 
     @cython.boundscheck(False)

diff -r fc0e492c026567482fc6d20f786bfbe0255db89e -r c29ef8057b0def006df90b6d629a2d27cc0baca8 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -28,6 +28,7 @@
 cdef struct Oct
 cdef struct OctVisitorData:
     np.uint64_t index
+    int ind[3]
     void *array
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor)

diff -r fc0e492c026567482fc6d20f786bfbe0255db89e -r c29ef8057b0def006df90b6d629a2d27cc0baca8 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -230,27 +230,9 @@
             spos[0] += sdds[0]
 
     def count_octs(self, OctreeContainer octree):
-        cdef int i, j, k, n
-        cdef np.float64_t pos[3], dds[3]
-        # This dds is the oct-width
-        for i in range(3):
-            dds[i] = (octree.DRE[i] - octree.DLE[i]) / octree.nn[i]
-        # Pos is the center of the octs
         cdef OctVisitorData data
         data.index = 0
-        pos[0] = octree.DLE[0] + dds[0]/2.0
-        for i in range(octree.nn[0]):
-            pos[1] = octree.DLE[1] + dds[1]/2.0
-            for j in range(octree.nn[1]):
-                pos[2] = octree.DLE[2] + dds[2]/2.0
-                for k in range(octree.nn[2]):
-                    if octree.root_mesh[i][j][k] == NULL: continue
-                    self.recursively_visit_octs(
-                        octree.root_mesh[i][j][k],
-                        pos, dds, 0, visit_count_octs, &data)
-                    pos[2] += dds[2]
-                pos[1] += dds[1]
-            pos[0] += dds[0]
+        octree.visit_all_octs(self, visit_count_octs, &data)
         return data.index
 
     @cython.boundscheck(False)
@@ -301,7 +283,10 @@
                             ch, spos, sdds, level + 1, func, data)
                     elif this_level == 1 and self.select_cell(
                                     spos, sdds, eterm):
-                        func(ch, data)
+                        data.ind[0] = i
+                        data.ind[1] = j
+                        data.ind[2] = k
+                        func(root, data)
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]
             spos[0] += sdds[0]


https://bitbucket.org/yt_analysis/yt/commits/8181e28e4064/
Changeset:   8181e28e4064
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 22:05:21
Summary:     Converted ires and fcoords to visitor functions.
Affected #:  5 files

diff -r c29ef8057b0def006df90b6d629a2d27cc0baca8 -r 8181e28e4064a64a688e345e469f842d6423e646 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -106,6 +106,7 @@
             regions.add_data_file(pos, data_file.file_id)
             pos = np.floor((pos - DLE)/dx).astype("uint64")
             morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
+            ind += pos.shape[0]
         f.close()
         return morton
 

diff -r c29ef8057b0def006df90b6d629a2d27cc0baca8 -r 8181e28e4064a64a688e345e469f842d6423e646 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -81,3 +81,5 @@
 # Now some visitor functions
 
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
+cdef void visit_ires_octs(Oct *o, OctVisitorData *data)
+cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data)

diff -r c29ef8057b0def006df90b6d629a2d27cc0baca8 -r 8181e28e4064a64a688e345e469f842d6423e646 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -836,3 +836,21 @@
     for i in range(3):
         coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
     data.index += 1
+
+cdef void visit_ires_octs(Oct *o, OctVisitorData *data):
+    cdef np.int64_t *ires = <np.int64_t*> data.array
+    ires[data.index] = o.level
+    data.index += 1
+
+cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data):
+    # Note that this does not actually give the correct floating point
+    # coordinates.  It gives them in some unit system where the domain is 1.0
+    # in all directions, and assumes that they will be scaled later.
+    cdef np.float64_t *fcoords = <np.float64_t*> data.array
+    cdef int i
+    cdef np.float64_t c, dx 
+    dx = 1.0 / (2 << o.level)
+    for i in range(3):
+        c = <np.float64_t> ((o.pos[i] << 1 ) + data.ind[i]) 
+        fcoords[data.index * 3 + i] = (c + 0.5) * dx
+    data.index += 1

diff -r c29ef8057b0def006df90b6d629a2d27cc0baca8 -r 8181e28e4064a64a688e345e469f842d6423e646 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -116,6 +116,7 @@
             npart = sum(data_file.total_particles.values())
             morton[ind:ind + npart] = \
                 self.io._initialize_index(data_file, self.regions)
+            ind += npart
         morton.sort()
         # Now we add them all at once.
         self.oct_handler.add(morton)

diff -r c29ef8057b0def006df90b6d629a2d27cc0baca8 -r 8181e28e4064a64a688e345e469f842d6423e646 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -25,8 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-from oct_container cimport OctreeContainer, Oct, OctInfo, \
-    visit_icoords_octs, ORDER_MAX
+from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \
+    visit_icoords_octs, visit_ires_octs, visit_fcoords_octs
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -117,75 +117,43 @@
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
-        cdef np.float64_t left_edge[3], right_edge[3], dds[3]
         self.visit_all_octs(selector, visit_icoords_octs, &data)
         return coords
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
+    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(cell_count, dtype="int64")
-        cdef int oi, i, ci
-        ci = 0
-        for oi in range(self.nocts):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    res[ci] = o.level
-                    ci += 1
+        res = np.empty(num_cells, dtype="int64")
+        cdef OctVisitorData data
+        data.array = <void *> res.data
+        data.index = 0
+        self.visit_all_octs(selector, visit_ires_octs, &data)
         return res
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
+    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        cdef int oi, i, ci
-        cdef np.float64_t base_dx[3], dx[3], pos[3]
+        coords = np.empty((num_cells, 3), dtype="float64")
+        cdef OctVisitorData data
+        data.array = <void *> coords.data
+        data.index = 0
+        self.visit_all_octs(selector, visit_fcoords_octs, &data)
+        cdef int i
+        cdef np.float64_t base_dx
         for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        ci = 0
-        cdef int proc
-        for oi in range(self.nocts):
-            proc = 0
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    proc = 1
-                    break
-            if proc == 0: continue
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[oi, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci += 1
+            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            coords[:,i] *= base_dx
+            coords[:,i] += self.DLE[i]
         return coords
 
     def allocate_domains(self, domain_counts):


https://bitbucket.org/yt_analysis/yt/commits/e4f6828237f1/
Changeset:   e4f6828237f1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 22:09:34
Summary:     Remove currently-unused functions.
Affected #:  1 file

diff -r 8181e28e4064a64a688e345e469f842d6423e646 -r e4f6828237f1dc49c3331b7415e4df0099d89af7 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -216,26 +216,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        cdef np.int64_t ndo, doff
-        ndo = self.dom_offsets[domain_id + 2] \
-            - self.dom_offsets[domain_id + 1]
-        doff = self.dom_offsets[domain_id + 1]
-        for oi in range(ndo):
-            o = self.oct_list[oi + doff]
-            for i in range(8):
-                if mask[o.domain_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def add(self, np.ndarray[np.uint64_t, ndim=1] indices):
         #Add this particle to the root oct
         #Then if that oct has children, add it to them recursively
@@ -334,107 +314,6 @@
                         self.visit(o.children[i][j][k], counts, level + 1)
         return
 
-    def domain_identify(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        #Return an array of length # of domains
-        #Every element is True if there is at least one
-        #fully refined *cell* in that domain that isn't masked out
-        cdef int i, oi, m
-        cdef Oct *o
-        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] dmask
-        dmask = np.zeros(self.max_domain+1, dtype='uint8')
-        for oi in range(self.nocts):
-            m = 0
-            o = self.oct_list[oi]
-            #if o.sd.np <= 0 or o.domain == -1: continue
-            for i in range(8):
-                if mask[oi, i] == 1:
-                    m = 1
-                    break
-            if m == 0: continue
-            dmask[o.domain] = 1
-        return dmask.astype("bool")
-
-    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                   int domain_id):
-        cdef np.int64_t i, oi, n, use
-        cdef Oct *o
-        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
-                np.zeros((mask.shape[0], 8), 'uint8')
-        n = mask.shape[0]
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(8):
-                m2[o.domain_ind, i] = mask[o.domain_ind, i]
-        return m2
-
-    def domain_mask(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        # What distinguishes this one from domain_and is that we have a mask,
-        # which covers the whole domain, but our output will only be of a much
-        # smaller subset of octs that belong to a given domain *and* the mask.
-        # Note also that typically when something calls domain_and, they will 
-        # use a logical_any along the oct axis.  Here we don't do that.
-        # Note also that we change the shape of the returned array.
-        cdef np.int64_t i, j, k, oi, n, nm, use
-        cdef Oct *o
-        n = mask.shape[0]
-        nm = 0
-        # This could perhaps be faster if we 
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            nm += use
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, nm), 'uint8')
-        nm = 0
-        for oi in range(n):
-            o = self.oct_list[oi]
-            if o.domain != domain_id: continue
-            use = 0
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        use = m2[i, j, k, nm] = 1
-            nm += use
-        return m2.astype("bool")
-
-    def domain_ind(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        # Here we once again do something similar to the other functions.  We
-        # need a set of indices into the final reduced, masked values.  The
-        # indices will be domain.n long, and will be of type int64.  This way,
-        # we can get the Oct through a .get() call, then use Oct.file_ind as an
-        # index into this newly created array, then finally use the returned
-        # index into the domain subset array for deposition.
-        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
-        cdef Oct *o
-        # For particle octrees, domain 0 is special and means non-leaf nodes.
-        offset = self.dom_offsets[domain_id + 1]
-        noct = self.dom_offsets[domain_id + 2] - offset
-        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
-        nm = 0
-        for oi in range(noct):
-            ind[oi] = -1
-            o = self.oct_list[oi + offset]
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            if use == 1:
-                ind[oi] = nm
-            nm += use
-        return ind
-
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]


https://bitbucket.org/yt_analysis/yt/commits/db7f3802fc2e/
Changeset:   db7f3802fc2e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-14 23:41:57
Summary:     Starting process of creating ParticleOctreeSubset selector.

This is the start of re-enabling spatial fields for particle codes.
Affected #:  6 files

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -168,3 +168,74 @@
     def select_particles(self, selector, x, y, z):
         mask = selector.select_points(x,y,z)
         return mask
+
+class ParticleOctreeSubset(OctreeSubset):
+    # Subclassing OctreeSubset is somewhat dubious.
+    # This is some subset of an octree.  Note that the sum of subsets of an
+    # octree may multiply include data files.  While we can attempt to mitigate
+    # this, it's unavoidable for many types of data storage on disk.
+    _type_name = 'particle_octree_subset'
+    _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
+    def __init__(self, data_files, pf, min_ind = 0, max_ind = 0):
+        # The first attempt at this will not work in parallel.
+        self.data_files = data_files
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = pf.h.oct_handler
+        self.min_ind = min_ind
+        self.max_ind = max_ind
+        if max_ind == 0: max_ind = (1 << 63)
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+
+    def select_icoords(self, dobj):
+        return self.oct_handler.icoords(dobj)
+
+    def select_fcoords(self, dobj):
+        return self.oct_handler.fcoords(dobj)
+
+    def select_fwidth(self, dobj):
+        # Recall domain_dimensions is the number of cells, not octs
+        base_dx = (self.domain.pf.domain_width /
+                   self.domain.pf.domain_dimensions)
+        widths = np.empty((self.cell_count, 3), dtype="float64")
+        dds = (2**self.select_ires(dobj))
+        for i in range(3):
+            widths[:,i] = base_dx[i] / dds
+        return widths
+
+    def select_ires(self, dobj):
+        return self.oct_handler.ires(dobj)
+
+    def select(self, selector):
+        if id(selector) == self._last_selector_id:
+            return self._last_mask
+        m1 = self.selector.select_octs(self.oct_handler)
+        m2 = selector.select_octs(self.oct_handler)
+        np.logical_and(m1, m2, m1)
+        del m2
+        self._last_mask = m1
+        if self._last_mask.sum() == 0: return None
+        self._last_selector_id = id(selector)
+        return self._last_mask
+
+    def count(self, selector):
+        if id(selector) == self._last_selector_id:
+            if self._last_mask is None: return 0
+            return self._last_mask.sum()
+        self.select(selector)
+        return self.count(selector)
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask
+

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.ActiveDimensions, dtype='float64')
+    return np.ones(data.ires.shape, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -59,10 +59,6 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
-class ParticleOctreeSubset(object):
-    def __init__(self):
-        pass
-
 class ParticleFile(object):
     def __init__(self, pf, io, filename, file_id):
         self.pf = pf

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -52,16 +52,24 @@
         chunks = list(chunks)
         for ftype, fname in fields:
             ptf[ftype].append(fname)
-        for chunk in chunks: # Will be OWLS domains
-            for data_file in chunk.objs:
-                f = h5py.File(data_file.filename, "r")
-                # This double-reads
-                for ptype, field_list in sorted(ptf.items()):
-                    coords = f["/%s/Coordinates" % ptype][:].astype("float64")
-                    psize[ptype] += selector.count_points(
-                        coords[:,0], coords[:,1], coords[:,2])
-                    del coords
-                f.close()
+        # For this type of file, we actually have something slightly different.
+        # We are given a list of ParticleDataChunks, which is composed of
+        # individual ParticleOctreeSubsets.  The data_files attribute on these
+        # may in fact overlap.  So we will iterate over a union of all the
+        # data_files.
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = h5py.File(data_file.filename, "r")
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                coords = f["/%s/Coordinates" % ptype][:].astype("float64")
+                psize[ptype] += selector.count_points(
+                    coords[:,0], coords[:,1], coords[:,2])
+                del coords
+            f.close()
         # Now we have all the sizes, and we can allocate
         ind = {}
         for field in fields:
@@ -72,24 +80,23 @@
                 shape = psize[field[0]]
             rv[field] = np.empty(shape, dtype="float64")
             ind[field] = 0
-        for chunk in chunks: # Will be OWLS domains
-            for data_file in chunk.objs:
-                f = h5py.File(data_file.filename, "r")
-                for ptype, field_list in sorted(ptf.items()):
-                    g = f["/%s" % ptype]
-                    coords = g["Coordinates"][:].astype("float64")
-                    mask = selector.select_points(
-                                coords[:,0], coords[:,1], coords[:,2])
-                    del coords
-                    if mask is None: continue
-                    for field in field_list:
-                        data = g[field][:][mask,...]
-                        my_ind = ind[ptype, field]
-                        mylog.debug("Filling from %s to %s with %s",
-                            my_ind, my_ind+data.shape[0], field)
-                        rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                        ind[ptype, field] += data.shape[0]
-                f.close()
+        for data_file in data_files:
+            f = h5py.File(data_file.filename, "r")
+            for ptype, field_list in sorted(ptf.items()):
+                g = f["/%s" % ptype]
+                coords = g["Coordinates"][:].astype("float64")
+                mask = selector.select_points(
+                            coords[:,0], coords[:,1], coords[:,2])
+                del coords
+                if mask is None: continue
+                for field in field_list:
+                    data = g[field][:][mask,...]
+                    my_ind = ind[ptype, field]
+                    mylog.debug("Filling from %s to %s with %s",
+                        my_ind, my_ind+data.shape[0], field)
+                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+                    ind[ptype, field] += data.shape[0]
+            f.close()
         return rv
 
     def _initialize_index(self, data_file, regions):

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -44,6 +44,7 @@
     ParallelAnalysisInterface, parallel_splitter
 
 from yt.data_objects.data_containers import data_object_registry
+from yt.data_objects.octree_subset import ParticleOctreeSubset
 
 class ParticleGeometryHandler(GeometryHandler):
     _global_mesh = False
@@ -141,9 +142,10 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            mask = dobj.selector.select_octs(self.oct_handler)
             file_ids = self.regions.identify_data_files(dobj.selector)
-            dobj._chunk_info = [self.data_files[i] for i in file_ids]
+            subset = [ParticleOctreeSubset([self.data_files[i] for i in file_ids],
+                                          self.parameter_file)]
+            dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
@@ -152,15 +154,17 @@
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
-            if ngz > 0:
-                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
-            else:
-                g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield ParticleDataChunk(self.oct_handler, self.regions,
-                                    dobj, "spatial", [g], size)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        osubset = ParticleOctreeSubset(sobjs, self.parameter_file)
+                                       
+        if ngz > 0:
+            raise NotImplementedError
+        yield ParticleDataChunk(self.oct_handler, self.regions,
+                                dobj, "spatial", [osubset])
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -175,5 +179,6 @@
         super(ParticleDataChunk, self).__init__(*args, **kwargs)
 
     def _accumulate_values(self, method):
-        mfunc = getattr(self.oct_handler, "select_%s" % method)
-        return mfunc(self.dobj)
+        mfunc = getattr(self.oct_handler, method)
+        return mfunc(self.dobj.selector)
+

diff -r e4f6828237f1dc49c3331b7415e4df0099d89af7 -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1213,3 +1213,50 @@
 
 octree_subset_selector = OctreeSubsetSelector
 
+cdef class ParticleOctreeSubsetSelector(SelectorObject):
+    # This is a numpy array, which will be a bool of ndim 1
+    cdef np.uint64_t min_ind
+    cdef np.uint64_t max_ind
+
+    def __init__(self, dobj):
+        self.min_ind = dobj.min_ind
+        self.max_ind = dobj.max_ind
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_octs(self, OctreeContainer octree):
+        # There has to be a better way to do this.
+        cdef np.ndarray[np.uint8_t, ndim=2, cast=True] m2
+        m2 = np.ones((octree.nocts, 8), dtype="uint8")
+        # This is where we'll -- in the future -- cut up based on indices of
+        # the octs.
+        return m2.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        check[0] = 0
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        return 1
+
+particle_octree_subset_selector = ParticleOctreeSubsetSelector
+


https://bitbucket.org/yt_analysis/yt/commits/7ce103b71f24/
Changeset:   7ce103b71f24
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 00:02:11
Summary:     Getting closer -- but data.ires in Ones is breaking.
Affected #:  1 file

diff -r db7f3802fc2eafc45bbd99b1b0f3095ff05d42ee -r 7ce103b71f24ae606469fc050269296fee81a599 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -142,9 +142,8 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            file_ids = self.regions.identify_data_files(dobj.selector)
-            subset = [ParticleOctreeSubset([self.data_files[i] for i in file_ids],
-                                          self.parameter_file)]
+            data_files = getattr(dobj, "data_files", self.data_files)
+            subset = [ParticleOctreeSubset(data_files, self.parameter_file)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
@@ -159,12 +158,13 @@
         # This is where we will perform cutting of the Octree and
         # load-balancing.  That may require a specialized selector object to
         # cut based on some space-filling curve index.
-        osubset = ParticleOctreeSubset(sobjs, self.parameter_file)
-                                       
-        if ngz > 0:
-            raise NotImplementedError
-        yield ParticleDataChunk(self.oct_handler, self.regions,
-                                dobj, "spatial", [osubset])
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield ParticleDataChunk(self.oct_handler, self.regions, dobj,
+                                    "spatial", [g])
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)


https://bitbucket.org/yt_analysis/yt/commits/209dd30d499e/
Changeset:   209dd30d499e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 00:12:24
Summary:     Adding an "fwidth" function.
Affected #:  3 files

diff -r 7ce103b71f24ae606469fc050269296fee81a599 -r 209dd30d499e9c59bd06e3c2f924591a575c9fa7 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -83,3 +83,4 @@
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
 cdef void visit_ires_octs(Oct *o, OctVisitorData *data)
 cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data)
+cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data)

diff -r 7ce103b71f24ae606469fc050269296fee81a599 -r 209dd30d499e9c59bd06e3c2f924591a575c9fa7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -854,3 +854,15 @@
         c = <np.float64_t> ((o.pos[i] << 1 ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
+
+cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data):
+    # Note that this does not actually give the correct floating point
+    # coordinates.  It gives them in some unit system where the domain is 1.0
+    # in all directions, and assumes that they will be scaled later.
+    cdef np.float64_t *fwidth = <np.float64_t*> data.array
+    cdef int i
+    cdef np.float64_t dx 
+    dx = 1.0 / (2 << o.level)
+    for i in range(3):
+        fwidth[data.index * 3 + i] = dx
+    data.index += 1

diff -r 7ce103b71f24ae606469fc050269296fee81a599 -r 209dd30d499e9c59bd06e3c2f924591a575c9fa7 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -26,7 +26,8 @@
 """
 
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \
-    visit_icoords_octs, visit_ires_octs, visit_fcoords_octs
+    visit_icoords_octs, visit_ires_octs, \
+    visit_fcoords_octs, visit_fwidth_octs
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -138,6 +139,24 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
+    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
+        cdef np.ndarray[np.float64_t, ndim=2] fwidth
+        fwidth = np.empty((num_cells, 3), dtype="float64")
+        cdef OctVisitorData data
+        data.array = <void *> fwidth.data
+        data.index = 0
+        self.visit_all_octs(selector, visit_fwidth_octs, &data)
+        cdef np.float64_t base_dx
+        for i in range(3):
+            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            fwidth[:,i] *= base_dx
+        return fwidth
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
         if num_cells == -1:
             num_cells = selector.count_octs(self)


https://bitbucket.org/yt_analysis/yt/commits/ca46ea9c4089/
Changeset:   ca46ea9c4089
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 00:20:54
Summary:     Adding a select_grid function and fixing accumulate_values.

This nearly gets spatial chunking working for particle codes.  A few more
lingering issues to deal with still, for instance how to get the Octs selected
correctly now that the visit_oct function only looks at the extents.
Affected #:  2 files

diff -r 209dd30d499e9c59bd06e3c2f924591a575c9fa7 -r ca46ea9c40892e92d9999a365e03ead8c8075dcc yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -180,5 +180,7 @@
 
     def _accumulate_values(self, method):
         mfunc = getattr(self.oct_handler, method)
-        return mfunc(self.dobj.selector)
+        rv = mfunc(self.dobj.selector)
+        self._data_size = rv.shape[0]
+        return rv
 

diff -r 209dd30d499e9c59bd06e3c2f924591a575c9fa7 -r ca46ea9c40892e92d9999a365e03ead8c8075dcc yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1258,5 +1258,10 @@
                          int eterm[3]) nogil:
         return 1
 
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level) nogil:
+        # This is where we'll want to add a check for the min/max index.
+        return 1
+
 particle_octree_subset_selector = ParticleOctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/5e52753237df/
Changeset:   5e52753237df
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 03:53:09
Summary:     Spatial fields now generate, but all zeros.
Affected #:  8 files

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -287,7 +287,7 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.size)
+            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -346,7 +346,7 @@
             dl = 1.0
         else:
             dl = chunk.fwidth[:, self.axis]
-        v = np.empty((chunk.size, len(fields)), dtype="float64")
+        v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i in range(len(fields)):
             v[:,i] = chunk[fields[i]] * dl
         if self.weight_field is not None:
@@ -354,7 +354,7 @@
             np.multiply(v, w[:,None], v)
             np.multiply(w, dl, w)
         else:
-            w = np.ones(chunk.size, dtype="float64")
+            w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords
         i1 = icoords[:,x_dict[self.axis]]
         i2 = icoords[:,y_dict[self.axis]]

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -176,7 +176,7 @@
     # this, it's unavoidable for many types of data storage on disk.
     _type_name = 'particle_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
-    def __init__(self, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_selector, data_files, pf, min_ind = 0, max_ind = 0):
         # The first attempt at this will not work in parallel.
         self.data_files = data_files
         self.field_data = YTFieldData()
@@ -191,34 +191,54 @@
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
+        self.base_selector = base_selector
+    
+    _domain_ind = None
 
+    @property
+    def domain_ind(self):
+        if self._domain_ind is None:
+            mask = self.selector.select_octs(self.oct_handler)
+            di = self.oct_handler.domain_ind(mask)
+            self._domain_ind = di
+        return self._domain_ind
+
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nvals = (self.domain_ind >= 0).sum() * 8
+        op = cls(nvals) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
+        vals = op.finalize()
+        return self._reshape_vals(vals)
+    
     def select_icoords(self, dobj):
-        return self.oct_handler.icoords(dobj)
+        return self.oct_handler.icoords(dobj.selector)
 
     def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(dobj)
+        return self.oct_handler.fcoords(dobj.selector)
 
     def select_fwidth(self, dobj):
         # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
+        base_dx = (self.pf.domain_width /
+                   self.pf.domain_dimensions)
         dds = (2**self.select_ires(dobj))
+        widths = np.empty((dds.shape[0], 3), dtype="float64")
         for i in range(3):
             widths[:,i] = base_dx[i] / dds
         return widths
 
     def select_ires(self, dobj):
-        return self.oct_handler.ires(dobj)
+        return self.oct_handler.ires(dobj.selector)
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask
-        m1 = self.selector.select_octs(self.oct_handler)
-        m2 = selector.select_octs(self.oct_handler)
-        np.logical_and(m1, m2, m1)
-        del m2
-        self._last_mask = m1
+        self._last_mask = self.oct_handler.domain_mask(
+                self.selector)
         if self._last_mask.sum() == 0: return None
         self._last_selector_id = id(selector)
         return self._last_mask

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -80,6 +80,9 @@
 
 # Now some visitor functions
 
+cdef void visit_count_octs(Oct *o, OctVisitorData *data)
+cdef void visit_count_total_octs(Oct *o, OctVisitorData *data)
+cdef void visit_mark_octs(Oct *o, OctVisitorData *data)
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
 cdef void visit_ires_octs(Oct *o, OctVisitorData *data)
 cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data)

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -830,6 +830,26 @@
 
 # Now some visitor functions
 
+cdef void visit_count_octs(Oct *o, OctVisitorData *data):
+    # Number of cells visited
+    data.index += 1
+
+cdef void visit_count_total_octs(Oct *o, OctVisitorData *data):
+    # Number of *octs* visited.
+    if data.last != o.domain_ind:
+        data.index += 1
+        data.last = o.domain_ind
+
+cdef void visit_mark_octs(Oct *o, OctVisitorData *data):
+    cdef int i
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        for i in range(8):
+            arr[data.index * 8 + i] = 1
+        data.index += 1
+
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data):
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -142,8 +142,12 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            data_files = getattr(dobj, "data_files", self.data_files)
-            subset = [ParticleOctreeSubset(data_files, self.parameter_file)]
+            data_files = getattr(dobj, "data_files", None)
+            if data_files is None:
+                data_files = [self.data_files[i] for i in
+                              self.regions.identify_data_files(dobj.selector)]
+            subset = [ParticleOctreeSubset(dobj.selector, data_files, 
+                        self.parameter_file)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -27,7 +27,9 @@
 
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \
     visit_icoords_octs, visit_ires_octs, \
-    visit_fcoords_octs, visit_fwidth_octs
+    visit_fcoords_octs, visit_fwidth_octs, \
+    visit_count_octs, visit_count_total_octs, \
+    visit_mark_octs
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -187,7 +189,8 @@
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
         cdef np.int64_t i = 0, lpos = 0
         cdef int cur_dom = -1
-        # We always need at least 2, and if max_domain is 0, we need 3.
+        # Note that we now assign them in the same order they will be visited
+        # by recursive visitors.
         for i in range(self.nn[0]):
             for j in range(self.nn[1]):
                 for k in range(self.nn[2]):
@@ -195,6 +198,8 @@
         assert(lpos == self.nocts)
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
+            if self.oct_list[i].children[0][0][0] != NULL:
+                self.oct_list[i].domain = -1
             self.oct_list[i].file_ind = -1
             max_level = imax(max_level, self.oct_list[i].level)
         self.max_level = max_level
@@ -211,7 +216,7 @@
         return
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
-        return self.dom_offsets[domain_id + 1]
+        return 0
 
     cdef Oct* allocate_oct(self):
         #Allocate the memory, set to NULL or -1
@@ -333,6 +338,34 @@
                         self.visit(o.children[i][j][k], counts, level + 1)
         return
 
+    def domain_ind(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        cdef np.ndarray[np.int64_t, ndim=1] ind
+        ind = np.empty(mask.shape[0], 'int64')
+        # Here's where we grab the masked items.
+        nm = 0
+        for oi in range(mask.shape[0]):
+            ind[oi] = -1
+            use = 0
+            for i in range(8):
+                if mask[oi, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind
+
+    def domain_mask(self, SelectorObject selector):
+        cdef OctVisitorData data
+        data.index = 0
+        data.last = -1
+        self.visit_all_octs(selector, visit_count_total_octs, &data)
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, data.index), 'uint8')
+        data.index = 0
+        data.last = -1
+        data.array = m2.data
+        self.visit_all_octs(selector, visit_mark_octs, &data)
+        return m2.astype("bool")
+
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]
@@ -399,3 +432,4 @@
                 if ((fmask >> i) & 1) == 1:
                     files.append(i + n * 64)
         return files
+

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -28,6 +28,7 @@
 cdef struct Oct
 cdef struct OctVisitorData:
     np.uint64_t index
+    np.uint64_t last
     int ind[3]
     void *array
 

diff -r ca46ea9c40892e92d9999a365e03ead8c8075dcc -r 5e52753237df033ac87c2acb04b21e489ca86779 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1217,10 +1217,12 @@
     # This is a numpy array, which will be a bool of ndim 1
     cdef np.uint64_t min_ind
     cdef np.uint64_t max_ind
+    cdef SelectorObject base_selector
 
     def __init__(self, dobj):
         self.min_ind = dobj.min_ind
         self.max_ind = dobj.max_ind
+        self.base_selector = dobj.base_selector
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1228,9 +1230,18 @@
     def select_octs(self, OctreeContainer octree):
         # There has to be a better way to do this.
         cdef np.ndarray[np.uint8_t, ndim=2, cast=True] m2
-        m2 = np.ones((octree.nocts, 8), dtype="uint8")
+        m2 = self.base_selector.select_octs(octree)
         # This is where we'll -- in the future -- cut up based on indices of
         # the octs.
+        cdef np.int64_t nm, i
+        cdef np.uint8_t use, k
+        nm = m2.shape[0]
+        for i in range(nm):
+            use = 0
+            for k in range(8):
+                if m2[i,k] == 1: use = 1
+            for k in range(8):
+                m2[i,k] = use
         return m2.astype("bool")
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/d17da4886813/
Changeset:   d17da4886813
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 04:03:08
Summary:     Uncertain why this change is necessary to get non-zero results.
Affected #:  3 files

diff -r 5e52753237df033ac87c2acb04b21e489ca86779 -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -223,13 +223,7 @@
 
     def select_fwidth(self, dobj):
         # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.pf.domain_width /
-                   self.pf.domain_dimensions)
-        dds = (2**self.select_ires(dobj))
-        widths = np.empty((dds.shape[0], 3), dtype="float64")
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
+        return self.oct_handler.fwidth(dobj.selector)
 
     def select_ires(self, dobj):
         return self.oct_handler.ires(dobj.selector)

diff -r 5e52753237df033ac87c2acb04b21e489ca86779 -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -75,10 +75,11 @@
             oct = octree.get(pos, &oi)
             # This next line is unfortunate.  Basically it says, sometimes we
             # might have particles that belong to octs outside our domain.
+            #print oct.domain, domain_id
             if oct.domain != domain_id: continue
-            #print domain_id, oct.local_ind, oct.ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
             # Note that this has to be our local index, not our in-file index.
             offset = dom_ind[oct.domain_ind - moff] * 8
+            #print domain_id, offset, oct.domain_ind, oct.file_ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,

diff -r 5e52753237df033ac87c2acb04b21e489ca86779 -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -199,7 +199,9 @@
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
             if self.oct_list[i].children[0][0][0] != NULL:
-                self.oct_list[i].domain = -1
+                self.oct_list[i].domain = 0
+            else:
+                self.oct_list[i].domain = 0
             self.oct_list[i].file_ind = -1
             max_level = imax(max_level, self.oct_list[i].level)
         self.max_level = max_level


https://bitbucket.org/yt_analysis/yt/commits/d8b0bf0e6a73/
Changeset:   d8b0bf0e6a73
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 16:33:11
Summary:     Spatial for particle datasets now works.

The Morton-ordering was backwards, causing octs to be missed.  Additionally,
the root_dimensions argument (now removed, as it is not relevant, but it will
return for the ParticleRegions) was causing the size of the projection tree to
shrink to occupy only a tiny portion of the corner.
Affected #:  4 files

diff -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 -r d8b0bf0e6a737bd8a236cd71169c42a4474e2764 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -311,7 +311,6 @@
         np.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
         np.add(py, oy, py)
         np.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
-
         if self.weight_field is not None:
             np.divide(nvals, nwvals[:,None], nvals)
         if self.weight_field is None:
@@ -336,8 +335,8 @@
 
     def _initialize_chunk(self, chunk, tree):
         icoords = chunk.icoords
-        i1 = icoords[:,0]
-        i2 = icoords[:,1]
+        i1 = icoords[:,x_dict[self.axis]]
+        i2 = icoords[:,y_dict[self.axis]]
         ilevel = chunk.ires
         tree.initialize_chunk(i1, i2, ilevel)
 

diff -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 -r d8b0bf0e6a737bd8a236cd71169c42a4474e2764 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -145,9 +145,8 @@
                     ('unused', 16, 'i') )
 
     def __init__(self, filename, data_style="gadget_binary",
-                 additional_fields = (), root_dimensions = 64,
+                 additional_fields = (),
                  unit_base = None):
-        self._root_dimensions = root_dimensions
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -180,7 +179,7 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -258,11 +257,9 @@
     _fieldinfo_known = KnownOWLSFields
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
+    def __init__(self, filename, data_style="OWLS"):
         self.storage_filename = None
         super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               root_dimensions,
                                                unit_base = None)
 
     def __repr__(self):
@@ -283,7 +280,7 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -346,14 +343,13 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64, endian = ">",
+                 endian = ">",
                  field_dtypes = None,
                  domain_left_edge = None,
                  domain_right_edge = None,
                  unit_base = None,
                  cosmology_parameters = None):
         self.endian = endian
-        self._root_dimensions = root_dimensions
         self.storage_filename = None
         if domain_left_edge is None:
             domain_left_edge = np.zeros(3, "float64") - 0.5
@@ -399,7 +395,7 @@
         # NOTE: These are now set in the main initializer.
         #self.domain_left_edge = np.zeros(3, "float64") - 0.5
         #self.domain_right_edge = np.ones(3, "float64") + 0.5
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1

diff -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 -r d8b0bf0e6a737bd8a236cd71169c42a4474e2764 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -198,10 +198,7 @@
         assert(lpos == self.nocts)
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
-            if self.oct_list[i].children[0][0][0] != NULL:
-                self.oct_list[i].domain = 0
-            else:
-                self.oct_list[i].domain = 0
+            self.oct_list[i].domain = 0
             self.oct_list[i].file_ind = -1
             max_level = imax(max_level, self.oct_list[i].level)
         self.max_level = max_level

diff -r d17da48868139c69d0f5ee3e86c0952e2ce0f912 -r d8b0bf0e6a737bd8a236cd71169c42a4474e2764 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -329,9 +329,9 @@
     morton_indices = np.zeros(left_index.shape[0], 'uint64')
     for i in range(left_index.shape[0]):
         mi = 0
-        mi |= spread_bits(left_index[i,0])<<0
+        mi |= spread_bits(left_index[i,2])<<0
         mi |= spread_bits(left_index[i,1])<<1
-        mi |= spread_bits(left_index[i,2])<<2
+        mi |= spread_bits(left_index[i,0])<<2
         morton_indices[i] = mi
     return morton_indices
 


https://bitbucket.org/yt_analysis/yt/commits/8880d16e1caf/
Changeset:   8880d16e1caf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-15 16:57:53
Summary:     Adding some comments about how to move forward.
Affected #:  1 file

diff -r d8b0bf0e6a737bd8a236cd71169c42a4474e2764 -r 8880d16e1caf0b86c7fb0885a874f7bb5193ea42 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -72,14 +72,18 @@
                 field_vals[j] = field_pointers[j][i]
             for j in range(3):
                 pos[j] = positions[i, j]
+            # This line should be modified to have it return the index into an
+            # array based on whatever cutting of the domain we have done.  This
+            # may or may not include the domain indices that we have
+            # previously generated.  This way we can support not knowing the
+            # full octree structure.  All we *really* care about is some
+            # arbitrary offset into a field value for deposition.
             oct = octree.get(pos, &oi)
             # This next line is unfortunate.  Basically it says, sometimes we
             # might have particles that belong to octs outside our domain.
-            #print oct.domain, domain_id
             if oct.domain != domain_id: continue
             # Note that this has to be our local index, not our in-file index.
             offset = dom_ind[oct.domain_ind - moff] * 8
-            #print domain_id, offset, oct.domain_ind, oct.file_ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,


https://bitbucket.org/yt_analysis/yt/commits/f6c7b1949be8/
Changeset:   f6c7b1949be8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-17 16:07:27
Summary:     Shorthand commit for a smoothing kernel test field.
Affected #:  1 file

diff -r 8880d16e1caf0b86c7fb0885a874f7bb5193ea42 -r f6c7b1949be82e3bbd735b843fc0049856d0d8a0 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -234,3 +234,13 @@
         func = _field_concat_slice(iname, axi)
         OWLSFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
+
+def SmoothedGas(field, data):
+    pos = data["PartType0", "Coordinates"]
+    sml = data["PartType0", "SmoothingLength"]
+    dens = data["PartType0", "Density"]
+    rv = data.deposit(pos, [sml, dens], method="simple_smooth")
+    return rv
+OWLSFieldInfo.add_field(("deposit", "PartType0_simple_smooth"),
+                function = SmoothedGas, validators = [ValidateSpatial()])
+


https://bitbucket.org/yt_analysis/yt/commits/c58d372d1e36/
Changeset:   c58d372d1e36
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-17 16:28:56
Summary:     Beginning update of Gadget IO.
Affected #:  2 files

diff -r f6c7b1949be82e3bbd735b843fc0049856d0d8a0 -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -32,7 +32,8 @@
     BaseIOHandler
 
 from yt.utilities.fortran_utils import read_record
-from yt.utilities.lib.geometry_utils import get_morton_indices
+from yt.utilities.lib.geometry_utils import get_morton_indices, \
+    get_morton_indices_unravel
 
 from yt.geometry.oct_container import _ORDER_MAX
 
@@ -186,26 +187,28 @@
         ptall = []
         psize = defaultdict(lambda: 0)
         chunks = list(chunks)
-        pf = chunks[0].objs[0].domain.pf
         ptypes = set()
         for ftype, fname in fields:
             ptf[ftype].append(fname)
             ptypes.add(ftype)
         ptypes = list(ptypes)
         ptypes.sort(key = lambda a: self._ptypes.index(a))
+        data_files = set([])
         for chunk in chunks:
-            for subset in chunk.objs:
-                poff = subset.domain.field_offsets
-                tp = subset.domain.total_particles
-                f = open(subset.domain.domain_filename, "rb")
-                for ptype in ptypes:
-                    f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
-                    pos = self._read_field_from_file(f,
-                                tp[ptype], "Coordinates")
-                    psize[ptype] += selector.count_points(
-                        pos[:,0], pos[:,1], pos[:,2])
-                    del pos
-                f.close()
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype in ptypes:
+                f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
+                pos = self._read_field_from_file(f,
+                            tp[ptype], "Coordinates")
+                psize[ptype] += selector.count_points(
+                    pos[:,0], pos[:,1], pos[:,2])
+                del pos
+            f.close()
         ind = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", psize[field[0]], field)
@@ -215,29 +218,28 @@
                 shape = psize[field[0]]
             rv[field] = np.empty(shape, dtype="float64")
             ind[field] = 0
-        for chunk in chunks: 
-            for subset in chunk.objs:
-                poff = subset.domain.field_offsets
-                tp = subset.domain.total_particles
-                f = open(subset.domain.domain_filename, "rb")
-                for ptype, field_list in sorted(ptf.items()):
-                    f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
-                    pos = self._read_field_from_file(f,
-                                tp[ptype], "Coordinates")
-                    mask = selector.select_points(
-                        pos[:,0], pos[:,1], pos[:,2])
-                    del pos
-                    if mask is None: continue
-                    for field in field_list:
-                        f.seek(poff[ptype, field], os.SEEK_SET)
-                        data = self._read_field_from_file(f, tp[ptype], field)
-                        data = data[mask]
-                        my_ind = ind[ptype, field]
-                        mylog.debug("Filling from %s to %s with %s",
-                            my_ind, my_ind+data.shape[0], field)
-                        rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                        ind[ptype, field] += data.shape[0]
-                f.close()
+        for data_file in data_files:
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype, field_list in sorted(ptf.items()):
+                f.seek(poff[ptype, "Coordinates"], os.SEEK_SET)
+                pos = self._read_field_from_file(f,
+                            tp[ptype], "Coordinates")
+                mask = selector.select_points(
+                    pos[:,0], pos[:,1], pos[:,2])
+                del pos
+                if mask is None: continue
+                for field in field_list:
+                    f.seek(poff[ptype, field], os.SEEK_SET)
+                    data = self._read_field_from_file(f, tp[ptype], field)
+                    data = data[mask]
+                    my_ind = ind[ptype, field]
+                    mylog.debug("Filling from %s to %s with %s",
+                        my_ind, my_ind+data.shape[0], field)
+                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+                    ind[ptype, field] += data.shape[0]
+            f.close()
         return rv
 
     def _read_field_from_file(self, f, count, name):
@@ -253,19 +255,27 @@
             arr = arr.reshape((count/3, 3), order="C")
         return arr.astype("float64")
 
-    def _initialize_index(self, domain, octree):
-        count = sum(domain.total_particles.values())
+    def _initialize_index(self, data_file, regions):
+        count = sum(data_file.total_particles.values())
         dt = [("px", "float32"), ("py", "float32"), ("pz", "float32")]
-        with open(domain.domain_filename, "rb") as f:
+        DLE = data_file.pf.domain_left_edge
+        DRE = data_file.pf.domain_right_edge
+        dx = (DRE - DLE) / 2**_ORDER_MAX
+        with open(data_file.filename, "rb") as f:
             f.seek(self._header_offset)
             # The first total_particles * 3 values are positions
             pp = np.fromfile(f, dtype = dt, count = count)
-        pos = np.empty((count, 3), dtype="float64")
-        pos[:,0] = pp['px']
-        pos[:,1] = pp['py']
-        pos[:,2] = pp['pz']
+        pos = np.column_stack([pp['px'], pp['py'], pp['pz']]).astype("float64")
         del pp
-        octree.add(pos, domain.domain_id)
+        regions.add_data_file(pos, data_file.file_id)
+        lx = np.floor((pos[:,0] - DLE[0])/dx[0]).astype("uint64")
+        ly = np.floor((pos[:,1] - DLE[1])/dx[1]).astype("uint64")
+        lz = np.floor((pos[:,2] - DLE[2])/dx[2]).astype("uint64")
+        del pos
+        morton = get_morton_indices_unravel(lx, ly, lz)
+        del lx, ly, lz
+        return morton
+        
 
     def _count_particles(self, domain):
         npart = dict((self._ptypes[i], v)

diff -r f6c7b1949be82e3bbd735b843fc0049856d0d8a0 -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -335,6 +335,23 @@
         morton_indices[i] = mi
     return morton_indices
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def get_morton_indices_unravel(np.ndarray[np.uint64_t, ndim=1] left_x,
+                               np.ndarray[np.uint64_t, ndim=1] left_y,
+                               np.ndarray[np.uint64_t, ndim=1] left_z,):
+    cdef np.int64_t i, mi
+    cdef np.ndarray[np.uint64_t, ndim=1] morton_indices
+    morton_indices = np.zeros(left_x.shape[0], 'uint64')
+    for i in range(left_x.shape[0]):
+        mi = 0
+        mi |= spread_bits(left_z[i])<<0
+        mi |= spread_bits(left_y[i])<<1
+        mi |= spread_bits(left_x[i])<<2
+        morton_indices[i] = mi
+    return morton_indices
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/4a332ffbe8a9/
Changeset:   4a332ffbe8a9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-17 18:26:29
Summary:     Eliminate final mask usage.  Grid intersection seems somewhat off and
sub-region selection is currently broken.
Affected #:  5 files

diff -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -198,8 +198,7 @@
     @property
     def domain_ind(self):
         if self._domain_ind is None:
-            mask = self.selector.select_octs(self.oct_handler)
-            di = self.oct_handler.domain_ind(mask)
+            di = self.oct_handler.domain_ind(self.base_selector)
             self._domain_ind = di
         return self._domain_ind
 
@@ -214,7 +213,7 @@
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
         vals = op.finalize()
         return self._reshape_vals(vals)
-    
+
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(dobj.selector)
 

diff -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -83,6 +83,7 @@
 cdef void visit_count_octs(Oct *o, OctVisitorData *data)
 cdef void visit_count_total_octs(Oct *o, OctVisitorData *data)
 cdef void visit_mark_octs(Oct *o, OctVisitorData *data)
+cdef void visit_index_octs(Oct *o, OctVisitorData *data)
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
 cdef void visit_ires_octs(Oct *o, OctVisitorData *data)
 cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data)

diff -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -850,6 +850,15 @@
             arr[data.index * 8 + i] = 1
         data.index += 1
 
+cdef void visit_index_octs(Oct *o, OctVisitorData *data):
+    cdef int i
+    cdef np.int64_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.int64_t *> data.array
+        arr[o.domain_ind] = data.index
+        data.index += 1
+
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data):
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i

diff -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -29,7 +29,7 @@
     visit_icoords_octs, visit_ires_octs, \
     visit_fcoords_octs, visit_fwidth_octs, \
     visit_count_octs, visit_count_total_octs, \
-    visit_mark_octs
+    visit_mark_octs, visit_index_octs
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -337,19 +337,15 @@
                         self.visit(o.children[i][j][k], counts, level + 1)
         return
 
-    def domain_ind(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+    def domain_ind(self, selector):
         cdef np.ndarray[np.int64_t, ndim=1] ind
-        ind = np.empty(mask.shape[0], 'int64')
         # Here's where we grab the masked items.
-        nm = 0
-        for oi in range(mask.shape[0]):
-            ind[oi] = -1
-            use = 0
-            for i in range(8):
-                if mask[oi, i] == 1: use = 1
-            if use == 1:
-                ind[oi] = nm
-            nm += use
+        ind = np.zeros(self.nocts, 'int64') - 1
+        cdef OctVisitorData data
+        data.array = ind.data
+        data.last = -1
+        data.index = 0
+        self.visit_all_octs(selector, visit_index_octs, &data)
         return ind
 
     def domain_mask(self, SelectorObject selector):

diff -r c58d372d1e36c4ea6b360ee5e0a8f579b4d0180f -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1271,8 +1271,9 @@
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level) nogil:
-        # This is where we'll want to add a check for the min/max index.
-        return 1
+        # Because visitors now use select_grid, we should be explicitly
+        # checking this.
+        return self.base_selector.select_grid(left_edge, right_edge, level)
 
 particle_octree_subset_selector = ParticleOctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/10befbee6f52/
Changeset:   10befbee6f52
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-17 18:45:08
Summary:     Use traversal everywhere and fix selection.
Affected #:  3 files

diff -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b -r 10befbee6f52858268c7f4c4310a403bc15eeb23 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -198,7 +198,7 @@
     @property
     def domain_ind(self):
         if self._domain_ind is None:
-            di = self.oct_handler.domain_ind(self.base_selector)
+            di = self.oct_handler.domain_ind(self.selector)
             self._domain_ind = di
         return self._domain_ind
 
@@ -231,7 +231,7 @@
         if id(selector) == self._last_selector_id:
             return self._last_mask
         self._last_mask = self.oct_handler.domain_mask(
-                self.selector)
+                self.base_selector)
         if self._last_mask.sum() == 0: return None
         self._last_selector_id = id(selector)
         return self._last_mask

diff -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b -r 10befbee6f52858268c7f4c4310a403bc15eeb23 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -842,13 +842,13 @@
 
 cdef void visit_mark_octs(Oct *o, OctVisitorData *data):
     cdef int i
-    cdef np.uint8_t *arr
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
     if data.last != o.domain_ind:
         data.last = o.domain_ind
-        arr = <np.uint8_t *> data.array
-        for i in range(8):
-            arr[data.index * 8 + i] = 1
         data.index += 1
+    cdef np.int64_t index = data.index * 8
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    arr[index] = 1
 
 cdef void visit_index_octs(Oct *o, OctVisitorData *data):
     cdef int i

diff -r 4a332ffbe8a9c8acce22ce7c78342666c99b8f5b -r 10befbee6f52858268c7f4c4310a403bc15eeb23 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -355,7 +355,7 @@
         self.visit_all_octs(selector, visit_count_total_octs, &data)
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, data.index), 'uint8')
-        data.index = 0
+        data.index = -1
         data.last = -1
         data.array = m2.data
         self.visit_all_octs(selector, visit_mark_octs, &data)


https://bitbucket.org/yt_analysis/yt/commits/52a4eaee2343/
Changeset:   52a4eaee2343
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 16:50:18
Summary:     A variety of changes.

 * Change the ParticleOctree to use base_region.
 * Add a new masking visitor.
 * Change how select_octs works.
Affected #:  7 files

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -176,7 +176,7 @@
     # this, it's unavoidable for many types of data storage on disk.
     _type_name = 'particle_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
-    def __init__(self, base_selector, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
         # The first attempt at this will not work in parallel.
         self.data_files = data_files
         self.field_data = YTFieldData()
@@ -191,8 +191,9 @@
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
-        self.base_selector = base_selector
-    
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
     _domain_ind = None
 
     @property
@@ -230,6 +231,8 @@
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask
+        # This is where things get confused.  I believe the data is differently
+        # ordered than the mask.
         self._last_mask = self.oct_handler.domain_mask(
                 self.base_selector)
         if self._last_mask.sum() == 0: return None

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -83,6 +83,7 @@
 cdef void visit_count_octs(Oct *o, OctVisitorData *data)
 cdef void visit_count_total_octs(Oct *o, OctVisitorData *data)
 cdef void visit_mark_octs(Oct *o, OctVisitorData *data)
+cdef void visit_mask_octs(Oct *o, OctVisitorData *data)
 cdef void visit_index_octs(Oct *o, OctVisitorData *data)
 cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
 cdef void visit_ires_octs(Oct *o, OctVisitorData *data)

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -142,6 +142,7 @@
                         oct_visitor_function *func,
                         OctVisitorData *data):
         cdef int i, j, k, n
+        data.global_index = -1
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):
@@ -850,6 +851,13 @@
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
     arr[index] = 1
 
+cdef void visit_mask_octs(Oct *o, OctVisitorData *data):
+    cdef int i
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    cdef np.int64_t index = data.index * 8
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    arr[index] = 1
+
 cdef void visit_index_octs(Oct *o, OctVisitorData *data):
     cdef int i
     cdef np.int64_t *arr

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -146,7 +146,7 @@
             if data_files is None:
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
-            subset = [ParticleOctreeSubset(dobj.selector, data_files, 
+            subset = [ParticleOctreeSubset(dobj, data_files, 
                         self.parameter_file)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
@@ -181,10 +181,3 @@
         self.oct_handler = oct_handler
         self.regions = regions
         super(ParticleDataChunk, self).__init__(*args, **kwargs)
-
-    def _accumulate_values(self, method):
-        mfunc = getattr(self.oct_handler, method)
-        rv = mfunc(self.dobj.selector)
-        self._data_size = rv.shape[0]
-        return rv
-

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -343,18 +343,21 @@
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
         data.array = ind.data
+        data.index = 0
         data.last = -1
-        data.index = 0
         self.visit_all_octs(selector, visit_index_octs, &data)
         return ind
 
     def domain_mask(self, SelectorObject selector):
+        # This is actually not correct.  The hard part is that we need to
+        # iterate the same way visit_all_octs does, but we need to track the
+        # number of octs total visited.
         cdef OctVisitorData data
         data.index = 0
         data.last = -1
         self.visit_all_octs(selector, visit_count_total_octs, &data)
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, data.index), 'uint8')
+                np.zeros((2, 2, 2, data.index), 'uint8', order='F')
         data.index = -1
         data.last = -1
         data.array = m2.data

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -29,6 +29,7 @@
 cdef struct OctVisitorData:
     np.uint64_t index
     np.uint64_t last
+    np.uint64_t global_index
     int ind[3]
     void *array
 

diff -r 10befbee6f52858268c7f4c4310a403bc15eeb23 -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -29,7 +29,8 @@
 from libc.stdlib cimport malloc, free
 from fp_utils cimport fclip, iclip
 from selection_routines cimport SelectorObject
-from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
+from oct_container cimport OctreeContainer, OctAllocationContainer, Oct, \
+    visit_mark_octs, visit_count_total_octs
 #from geometry_utils cimport point_to_hilbert
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
@@ -178,6 +179,7 @@
                         np.float64_t pos[3], np.float64_t dds[3],
                         np.ndarray[np.uint8_t, ndim=2] mask,
                         int level = 0):
+
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
         cdef int i, j, k, res, ii
         cdef Oct *ch
@@ -268,6 +270,7 @@
             this_level = 0
         if res == 0 and this_level == 1:
             return
+        data.global_index += 1
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
         spos[0] = pos[0] - sdds[0]/2.0
@@ -1229,19 +1232,18 @@
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
         # There has to be a better way to do this.
-        cdef np.ndarray[np.uint8_t, ndim=2, cast=True] m2
-        m2 = self.base_selector.select_octs(octree)
+        cdef OctVisitorData data
+        data.index = 0
+        data.last = -1
+        octree.visit_all_octs(self, visit_count_total_octs, &data)
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, data.index), 'uint8', order='C')
         # This is where we'll -- in the future -- cut up based on indices of
         # the octs.
-        cdef np.int64_t nm, i
-        cdef np.uint8_t use, k
-        nm = m2.shape[0]
-        for i in range(nm):
-            use = 0
-            for k in range(8):
-                if m2[i,k] == 1: use = 1
-            for k in range(8):
-                m2[i,k] = use
+        data.index = -1
+        data.last = -1
+        data.array = m2.data
+        octree.visit_all_octs(self, visit_mark_octs, &data)
         return m2.astype("bool")
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/e3a9b69f4b0c/
Changeset:   e3a9b69f4b0c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 19:21:40
Summary:     Attempting to push selection deeper, to avoid a few more mask constructions.

Spatial/non-spatial ordering is still incorrect.
Affected #:  6 files

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -248,28 +248,19 @@
         if ngz == 0:
             for io_chunk in self.chunks([], "io"):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
-                    mask = self._current_chunk.objs[0].select(self.selector)
-                    if mask is None: continue
-                    data = self[field]
-                    if len(data.shape) == 4:
-                        # This is how we keep it consistent between oct ordering
-                        # and grid ordering.
-                        data = data.T[mask.T]
-                    else:
-                        data = data[mask]
-                    rv[ind:ind+data.size] = data
-                    ind += data.size
+                    print self.selector
+                    ind += self._current_chunk.objs[0].select(
+                            self.selector, self[field], rv, ind)
         else:
             chunks = self.hierarchy._chunk(self, "spatial", ngz = ngz)
             for i, chunk in enumerate(chunks):
                 with self._chunked_read(chunk):
                     gz = self._current_chunk.objs[0]
                     wogz = gz._base_grid
-                    mask = wogz.select(self.selector)
-                    if mask is None: continue
-                    data = gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz][mask]
-                    rv[ind:ind+data.size] = data
-                    ind += data.size
+                    ind += wogz.select(
+                        self.selector,
+                        gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],
+                        rv, ind)
         return rv
 
     def _generate_particle_field(self, field):

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -75,6 +75,8 @@
             return self._current_chunk.fwidth[:,1]
         elif field == "dz":
             return self._current_chunk.fwidth[:,2]
+        else:
+            raise RuntimeError
 
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(self.domain.domain_id, self.mask,
@@ -117,9 +119,10 @@
         return tr
 
     def _reshape_vals(self, arr):
+        if len(arr.shape) == 4: return arr
         nz = self._num_zones + 2*self._num_ghost_zones
         n_oct = arr.shape[0] / (nz**3.0)
-        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        arr = arr.reshape((nz, nz, nz, n_oct), order="C")
         return arr
 
     _domain_ind = None
@@ -185,8 +188,8 @@
         self.hierarchy = self.pf.hierarchy
         self.oct_handler = pf.h.oct_handler
         self.min_ind = min_ind
+        if max_ind == 0: max_ind = (1 << 63)
         self.max_ind = max_ind
-        if max_ind == 0: max_ind = (1 << 63)
         self._last_mask = None
         self._last_selector_id = None
         self._current_particle_type = 'all'
@@ -228,16 +231,9 @@
     def select_ires(self, dobj):
         return self.oct_handler.ires(dobj.selector)
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        # This is where things get confused.  I believe the data is differently
-        # ordered than the mask.
-        self._last_mask = self.oct_handler.domain_mask(
-                self.base_selector)
-        if self._last_mask.sum() == 0: return None
-        self._last_selector_id = id(selector)
-        return self._last_mask
+    def select(self, selector, source, dest, offset):
+        n = self.oct_handler.selector_fill(selector, source, dest, offset)
+        return n
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
@@ -254,4 +250,3 @@
     def select_particles(self, selector, x, y, z):
         mask = selector.select_points(x,y,z)
         return mask
-

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -80,12 +80,13 @@
 
 # Now some visitor functions
 
-cdef void visit_count_octs(Oct *o, OctVisitorData *data)
-cdef void visit_count_total_octs(Oct *o, OctVisitorData *data)
-cdef void visit_mark_octs(Oct *o, OctVisitorData *data)
-cdef void visit_mask_octs(Oct *o, OctVisitorData *data)
-cdef void visit_index_octs(Oct *o, OctVisitorData *data)
-cdef void visit_icoords_octs(Oct *o, OctVisitorData *data)
-cdef void visit_ires_octs(Oct *o, OctVisitorData *data)
-cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data)
-cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data)
+cdef oct_visitor_function visit_count_octs
+cdef oct_visitor_function visit_count_total_octs
+cdef oct_visitor_function visit_mark_octs
+cdef oct_visitor_function visit_mask_octs
+cdef oct_visitor_function visit_index_octs
+cdef oct_visitor_function visit_icoords_octs
+cdef oct_visitor_function visit_ires_octs
+cdef oct_visitor_function visit_fcoords_octs
+cdef oct_visitor_function visit_fwidth_octs
+cdef oct_visitor_function visit_copy_array

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -831,6 +831,14 @@
 
 # Now some visitor functions
 
+cdef void visit_copy_array(Oct *o, OctVisitorData *data):
+    # We should always have global_index less than our source.
+    cdef np.int64_t index = data.global_index * 8
+    cdef np.float64_t **p = <np.float64_t**> data.array
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    p[1][data.index] = p[0][index]
+    data.index += 1
+
 cdef void visit_count_octs(Oct *o, OctVisitorData *data):
     # Number of cells visited
     data.index += 1

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -38,7 +38,7 @@
     void *alloca(int)
 
 cdef inline int gind(int i, int j, int k, int dims[3]):
-    return ((k*dims[1])+j)*dims[0]+i
+    return ((i*dims[1])+j)*dims[2]+k
 
 
 ####################################################

diff -r 52a4eaee2343e6e716a7f2fc210d47e2265077c0 -r e3a9b69f4b0c766138f7249fe05c70576b044907 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -29,7 +29,7 @@
     visit_icoords_octs, visit_ires_octs, \
     visit_fcoords_octs, visit_fwidth_octs, \
     visit_count_octs, visit_count_total_octs, \
-    visit_mark_octs, visit_index_octs
+    visit_mark_octs, visit_index_octs, visit_copy_array
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -348,21 +348,23 @@
         self.visit_all_octs(selector, visit_index_octs, &data)
         return ind
 
-    def domain_mask(self, SelectorObject selector):
+    def selector_fill(self, SelectorObject selector,
+                      np.ndarray[np.float64_t, ndim=4] source,
+                      np.ndarray[np.float64_t, ndim=1] dest,
+                      np.int64_t offset):
         # This is actually not correct.  The hard part is that we need to
         # iterate the same way visit_all_octs does, but we need to track the
         # number of octs total visited.
         cdef OctVisitorData data
-        data.index = 0
+        data.index = offset
+        # We only need this so we can continue calculating the offset
         data.last = -1
-        self.visit_all_octs(selector, visit_count_total_octs, &data)
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, data.index), 'uint8', order='F')
-        data.index = -1
-        data.last = -1
-        data.array = m2.data
-        self.visit_all_octs(selector, visit_mark_octs, &data)
-        return m2.astype("bool")
+        cdef void *p[2]
+        p[0] = source.data
+        p[1] = dest.data
+        data.array = &p
+        self.visit_all_octs(selector, visit_copy_array, &data)
+        return data.index - offset
 
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]


https://bitbucket.org/yt_analysis/yt/commits/14f5ff9a4421/
Changeset:   14f5ff9a4421
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 23:07:59
Summary:     Some functions need to be called even if no cells are selected.

This also brings the spatial fields back in line with the non-spatial fields
for particle octrees.

However, I think it breaks most of the other Octree frontends.
Affected #:  6 files

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -122,7 +122,8 @@
         if len(arr.shape) == 4: return arr
         nz = self._num_zones + 2*self._num_ghost_zones
         n_oct = arr.shape[0] / (nz**3.0)
-        arr = arr.reshape((nz, nz, nz, n_oct), order="C")
+        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        arr = np.asfortranarray(arr)
         return arr
 
     _domain_ind = None

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -80,7 +80,6 @@
 
 # Now some visitor functions
 
-cdef oct_visitor_function visit_count_octs
 cdef oct_visitor_function visit_count_total_octs
 cdef oct_visitor_function visit_mark_octs
 cdef oct_visitor_function visit_mask_octs

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -831,25 +831,24 @@
 
 # Now some visitor functions
 
-cdef void visit_copy_array(Oct *o, OctVisitorData *data):
+cdef void visit_copy_array(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # We should always have global_index less than our source.
+    if selected == 0: return
     cdef np.int64_t index = data.global_index * 8
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
     p[1][data.index] = p[0][index]
     data.index += 1
 
-cdef void visit_count_octs(Oct *o, OctVisitorData *data):
-    # Number of cells visited
-    data.index += 1
-
-cdef void visit_count_total_octs(Oct *o, OctVisitorData *data):
+cdef void visit_count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Count even if not selected.
     # Number of *octs* visited.
     if data.last != o.domain_ind:
         data.index += 1
         data.last = o.domain_ind
 
-cdef void visit_mark_octs(Oct *o, OctVisitorData *data):
+cdef void visit_mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We mark them even if they are not selected
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
     if data.last != o.domain_ind:
@@ -859,14 +858,16 @@
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
     arr[index] = 1
 
-cdef void visit_mask_octs(Oct *o, OctVisitorData *data):
+cdef void visit_mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.index * 8
+    cdef np.int64_t index = data.global_index * 8
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
     arr[index] = 1
 
-cdef void visit_index_octs(Oct *o, OctVisitorData *data):
+cdef void visit_index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Note that we provide an index even if the cell is not selected.
     cdef int i
     cdef np.int64_t *arr
     if data.last != o.domain_ind:
@@ -875,22 +876,25 @@
         arr[o.domain_ind] = data.index
         data.index += 1
 
-cdef void visit_icoords_octs(Oct *o, OctVisitorData *data):
+cdef void visit_icoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
     for i in range(3):
         coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
     data.index += 1
 
-cdef void visit_ires_octs(Oct *o, OctVisitorData *data):
+cdef void visit_ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
     cdef np.int64_t *ires = <np.int64_t*> data.array
     ires[data.index] = o.level
     data.index += 1
 
-cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data):
+cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that this does not actually give the correct floating point
     # coordinates.  It gives them in some unit system where the domain is 1.0
     # in all directions, and assumes that they will be scaled later.
+    if selected == 0: return
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
@@ -900,10 +904,11 @@
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
-cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data):
+cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that this does not actually give the correct floating point
     # coordinates.  It gives them in some unit system where the domain is 1.0
     # in all directions, and assumes that they will be scaled later.
+    if selected == 0: return
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -28,7 +28,7 @@
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \
     visit_icoords_octs, visit_ires_octs, \
     visit_fcoords_octs, visit_fwidth_octs, \
-    visit_count_octs, visit_count_total_octs, \
+    visit_count_total_octs, \
     visit_mark_octs, visit_index_octs, visit_copy_array
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -33,7 +33,8 @@
     int ind[3]
     void *array
 
-ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor)
+ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
+                                   np.uint8_t selected)
 
 cdef class SelectorObject:
     cdef public np.int32_t min_level

diff -r e3a9b69f4b0c766138f7249fe05c70576b044907 -r 14f5ff9a442112c858c84f8274bd72cef19fc66c yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -30,7 +30,7 @@
 from fp_utils cimport fclip, iclip
 from selection_routines cimport SelectorObject
 from oct_container cimport OctreeContainer, OctAllocationContainer, Oct, \
-    visit_mark_octs, visit_count_total_octs
+    visit_mark_octs, visit_count_total_octs, visit_mask_octs
 #from geometry_utils cimport point_to_hilbert
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
@@ -116,8 +116,8 @@
 
 # Now our visitor functions
 
-cdef void visit_count_octs(Oct *o, OctVisitorData *data):
-    data.index += 1
+cdef void visit_count_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    data.index += selected
 
 cdef class SelectorObject:
 
@@ -150,27 +150,21 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
-        cdef int i, j, k, n
-        cdef np.ndarray[np.uint8_t, ndim=2] mask = np.zeros((octree.nocts, 8), dtype='uint8')
-        cdef np.float64_t pos[3], dds[3]
-        # This dds is the oct-width
-        for i in range(3):
-            dds[i] = (octree.DRE[i] - octree.DLE[i]) / octree.nn[i]
-        # Pos is the center of the octs
-        pos[0] = octree.DLE[0] + dds[0]/2.0
-        for i in range(octree.nn[0]):
-            pos[1] = octree.DLE[1] + dds[1]/2.0
-            for j in range(octree.nn[1]):
-                pos[2] = octree.DLE[2] + dds[2]/2.0
-                for k in range(octree.nn[2]):
-                    if octree.root_mesh[i][j][k] == NULL: continue
-                    self.recursively_select_octs(
-                        octree.root_mesh[i][j][k],
-                        pos, dds, mask, 0) 
-                    pos[2] += dds[2]
-                pos[1] += dds[1]
-            pos[0] += dds[0]
-        return mask.astype("bool")
+        # There has to be a better way to do this.
+        cdef OctVisitorData data
+        data.index = 0
+        data.last = -1
+        data.global_index = -1
+        octree.visit_all_octs(self, visit_count_total_octs, &data)
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, data.index), 'uint8', order='C')
+        # This is where we'll -- in the future -- cut up based on indices of
+        # the octs.
+        data.index = -1
+        data.last = -1
+        data.array = m2.data
+        octree.visit_all_octs(self, visit_mask_octs, &data)
+        return m2.astype("bool")
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -234,7 +228,7 @@
     def count_octs(self, OctreeContainer octree):
         cdef OctVisitorData data
         data.index = 0
-        octree.visit_all_octs(self, visit_count_octs, &data)
+        octree.visit_all_octs(self, visit_count_cells, &data)
         return data.index
 
     @cython.boundscheck(False)
@@ -248,6 +242,7 @@
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
         cdef int i, j, k, res, ii
         cdef Oct *ch
+        cdef np.uint8_t selected
         # Remember that pos is the *center* of the oct, and dds is the oct
         # width.  So to get to the edges, we add/subtract half of dds.
         for i in range(3):
@@ -270,7 +265,7 @@
             this_level = 0
         if res == 0 and this_level == 1:
             return
-        data.global_index += 1
+        cdef int increment = 1
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
         spos[0] = pos[0] - sdds[0]/2.0
@@ -284,12 +279,14 @@
                     if next_level == 1 and ch != NULL:
                         self.recursively_visit_octs(
                             ch, spos, sdds, level + 1, func, data)
-                    elif this_level == 1 and self.select_cell(
-                                    spos, sdds, eterm):
+                    elif this_level == 1:
+                        data.global_index += increment
+                        increment = 0
+                        selected = self.select_cell(spos, sdds, eterm)
                         data.ind[0] = i
                         data.ind[1] = j
                         data.ind[2] = k
-                        func(root, data)
+                        func(root, data, selected)
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]
             spos[0] += sdds[0]


https://bitbucket.org/yt_analysis/yt/commits/3ffaeb736bef/
Changeset:   3ffaeb736bef
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 23:23:37
Summary:     Removing print statement.
Affected #:  1 file

diff -r 14f5ff9a442112c858c84f8274bd72cef19fc66c -r 3ffaeb736bef058752f2b4d9f15f03a171b76ebf yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -248,7 +248,6 @@
         if ngz == 0:
             for io_chunk in self.chunks([], "io"):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
-                    print self.selector
                     ind += self._current_chunk.objs[0].select(
                             self.selector, self[field], rv, ind)
         else:


https://bitbucket.org/yt_analysis/yt/commits/c051307ad745/
Changeset:   c051307ad745
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 23:45:28
Summary:     A few more changes to try to get chunking working properly.
Affected #:  2 files

diff -r 3ffaeb736bef058752f2b4d9f15f03a171b76ebf -r c051307ad74525a6e6a5f48713c79cb42c72c925 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -146,7 +146,8 @@
             if data_files is None:
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
-            subset = [ParticleOctreeSubset(dobj, data_files, 
+            base_region = getattr(dobj, "base_region", dobj)
+            subset = [ParticleOctreeSubset(base_region, data_files, 
                         self.parameter_file)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
@@ -181,3 +182,11 @@
         self.oct_handler = oct_handler
         self.regions = regions
         super(ParticleDataChunk, self).__init__(*args, **kwargs)
+
+    def _accumulate_values(self, method):
+        mfunc = getattr(self.oct_handler, method)
+        rv = mfunc(self.dobj.selector)
+        self._data_size = rv.shape[0]
+        return rv
+
+

diff -r 3ffaeb736bef058752f2b4d9f15f03a171b76ebf -r c051307ad74525a6e6a5f48713c79cb42c72c925 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1267,6 +1267,7 @@
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
                          int eterm[3]) nogil:
         return 1
+        return self.base_selector.select_cell(pos, dds, eterm)
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level) nogil:


https://bitbucket.org/yt_analysis/yt/commits/725fe2106c30/
Changeset:   725fe2106c30
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-19 00:44:00
Summary:     Refactoring oct visitor functions into their own module.
Affected #:  7 files

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -27,6 +27,7 @@
 from fp_utils cimport *
 from selection_routines cimport SelectorObject, \
     OctVisitorData, oct_visitor_function
+from oct_visitors cimport *
 
 cdef int ORDER_MAX
 
@@ -78,14 +79,3 @@
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
 
-# Now some visitor functions
-
-cdef oct_visitor_function visit_count_total_octs
-cdef oct_visitor_function visit_mark_octs
-cdef oct_visitor_function visit_mask_octs
-cdef oct_visitor_function visit_index_octs
-cdef oct_visitor_function visit_icoords_octs
-cdef oct_visitor_function visit_ires_octs
-cdef oct_visitor_function visit_fcoords_octs
-cdef oct_visitor_function visit_fwidth_octs
-cdef oct_visitor_function visit_copy_array

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -31,8 +31,7 @@
 import numpy as np
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, ORDER_MAX
-from selection_routines cimport SelectorObject, \
-    OctVisitorData, oct_visitor_function
+cimport oct_visitors
 cimport cython
 
 ORDER_MAX = 20
@@ -829,90 +828,3 @@
                             local_filled += 1
         return local_filled
 
-# Now some visitor functions
-
-cdef void visit_copy_array(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We should always have global_index less than our source.
-    if selected == 0: return
-    cdef np.int64_t index = data.global_index * 8
-    cdef np.float64_t **p = <np.float64_t**> data.array
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
-    p[1][data.index] = p[0][index]
-    data.index += 1
-
-cdef void visit_count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Count even if not selected.
-    # Number of *octs* visited.
-    if data.last != o.domain_ind:
-        data.index += 1
-        data.last = o.domain_ind
-
-cdef void visit_mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We mark them even if they are not selected
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        data.index += 1
-    cdef np.int64_t index = data.index * 8
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
-    arr[index] = 1
-
-cdef void visit_mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * 8
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
-    arr[index] = 1
-
-cdef void visit_index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that we provide an index even if the cell is not selected.
-    cdef int i
-    cdef np.int64_t *arr
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        arr = <np.int64_t *> data.array
-        arr[o.domain_ind] = data.index
-        data.index += 1
-
-cdef void visit_icoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *coords = <np.int64_t*> data.array
-    cdef int i
-    for i in range(3):
-        coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
-    data.index += 1
-
-cdef void visit_ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *ires = <np.int64_t*> data.array
-    ires[data.index] = o.level
-    data.index += 1
-
-cdef void visit_fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fcoords = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t c, dx 
-    dx = 1.0 / (2 << o.level)
-    for i in range(3):
-        c = <np.float64_t> ((o.pos[i] << 1 ) + data.ind[i]) 
-        fcoords[data.index * 3 + i] = (c + 0.5) * dx
-    data.index += 1
-
-cdef void visit_fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fwidth = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t dx 
-    dx = 1.0 / (2 << o.level)
-    for i in range(3):
-        fwidth[data.index * 3 + i] = dx
-    data.index += 1

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/oct_visitors.pxd
--- /dev/null
+++ b/yt/geometry/oct_visitors.pxd
@@ -0,0 +1,42 @@
+"""
+Oct visitor definitions file
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+from selection_routines cimport \
+    OctVisitorData, oct_visitor_function
+from oct_container cimport \
+    Oct
+
+cdef oct_visitor_function count_total_octs
+cdef oct_visitor_function count_total_cells
+cdef oct_visitor_function mark_octs
+cdef oct_visitor_function mask_octs
+cdef oct_visitor_function index_octs
+cdef oct_visitor_function icoords_octs
+cdef oct_visitor_function ires_octs
+cdef oct_visitor_function fcoords_octs
+cdef oct_visitor_function fwidth_octs
+cdef oct_visitor_function copy_array_f64
+cdef oct_visitor_function copy_array_i64

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/oct_visitors.pyx
--- /dev/null
+++ b/yt/geometry/oct_visitors.pyx
@@ -0,0 +1,134 @@
+"""
+Oct visitor functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy
+import numpy
+from fp_utils cimport *
+
+# Now some visitor functions
+
+cdef void copy_array_f64(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We should always have global_index less than our source.
+    # "last" here tells us the dimensionality of the array.
+    if selected == 0: return
+    cdef np.int64_t index = (data.global_index * 8)*data.last
+    cdef np.float64_t **p = <np.float64_t**> data.array
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    p[1][data.index] = p[0][index]
+    data.index += 1
+
+cdef void copy_array_i64(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We should always have global_index less than our source.
+    # "last" here tells us the dimensionality of the array.
+    if selected == 0: return
+    cdef np.int64_t index = (data.global_index * 8)*data.last
+    cdef np.int64_t **p = <np.int64_t**> data.array
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    p[1][data.index] = p[0][index]
+    data.index += 1
+
+cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Count even if not selected.
+    # Number of *octs* visited.
+    if data.last != o.domain_ind:
+        data.index += 1
+        data.last = o.domain_ind
+
+cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Count even if not selected.
+    # Number of *octs* visited.
+    data.index += selected
+
+cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We mark them even if they are not selected
+    cdef int i
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        data.index += 1
+    cdef np.int64_t index = data.index * 8
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    arr[index] = 1
+
+cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
+    cdef int i
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    cdef np.int64_t index = data.global_index * 8
+    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    arr[index] = 1
+
+cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Note that we provide an index even if the cell is not selected.
+    cdef int i
+    cdef np.int64_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.int64_t *> data.array
+        arr[o.domain_ind] = data.index
+        data.index += 1
+
+cdef void icoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
+    cdef np.int64_t *coords = <np.int64_t*> data.array
+    cdef int i
+    for i in range(3):
+        coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
+    data.index += 1
+
+cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
+    cdef np.int64_t *ires = <np.int64_t*> data.array
+    ires[data.index] = o.level
+    data.index += 1
+
+cdef void fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Note that this does not actually give the correct floating point
+    # coordinates.  It gives them in some unit system where the domain is 1.0
+    # in all directions, and assumes that they will be scaled later.
+    if selected == 0: return
+    cdef np.float64_t *fcoords = <np.float64_t*> data.array
+    cdef int i
+    cdef np.float64_t c, dx 
+    dx = 1.0 / (2 << o.level)
+    for i in range(3):
+        c = <np.float64_t> ((o.pos[i] << 1 ) + data.ind[i]) 
+        fcoords[data.index * 3 + i] = (c + 0.5) * dx
+    data.index += 1
+
+cdef void fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # Note that this does not actually give the correct floating point
+    # coordinates.  It gives them in some unit system where the domain is 1.0
+    # in all directions, and assumes that they will be scaled later.
+    if selected == 0: return
+    cdef np.float64_t *fwidth = <np.float64_t*> data.array
+    cdef int i
+    cdef np.float64_t dx 
+    dx = 1.0 / (2 << o.level)
+    for i in range(3):
+        fwidth[data.index * 3 + i] = dx
+    data.index += 1

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -25,11 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \
-    visit_icoords_octs, visit_ires_octs, \
-    visit_fcoords_octs, visit_fwidth_octs, \
-    visit_count_total_octs, \
-    visit_mark_octs, visit_index_octs, visit_copy_array
+from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX
+cimport oct_visitors
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -120,7 +117,7 @@
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
-        self.visit_all_octs(selector, visit_icoords_octs, &data)
+        self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
     @cython.boundscheck(False)
@@ -135,7 +132,7 @@
         cdef OctVisitorData data
         data.array = <void *> res.data
         data.index = 0
-        self.visit_all_octs(selector, visit_ires_octs, &data)
+        self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
     @cython.boundscheck(False)
@@ -149,7 +146,7 @@
         cdef OctVisitorData data
         data.array = <void *> fwidth.data
         data.index = 0
-        self.visit_all_octs(selector, visit_fwidth_octs, &data)
+        self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
             base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
@@ -168,7 +165,7 @@
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
-        self.visit_all_octs(selector, visit_fcoords_octs, &data)
+        self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
         for i in range(3):
@@ -345,25 +342,34 @@
         data.array = ind.data
         data.index = 0
         data.last = -1
-        self.visit_all_octs(selector, visit_index_octs, &data)
+        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
     def selector_fill(self, SelectorObject selector,
-                      np.ndarray[np.float64_t, ndim=4] source,
-                      np.ndarray[np.float64_t, ndim=1] dest,
-                      np.int64_t offset):
+                      np.ndarray source,
+                      np.ndarray dest = None,
+                      np.int64_t offset = 0, int dims = 1):
         # This is actually not correct.  The hard part is that we need to
         # iterate the same way visit_all_octs does, but we need to track the
         # number of octs total visited.
         cdef OctVisitorData data
         data.index = offset
         # We only need this so we can continue calculating the offset
-        data.last = -1
+        data.last = dims
         cdef void *p[2]
         p[0] = source.data
         p[1] = dest.data
         data.array = &p
-        self.visit_all_octs(selector, visit_copy_array, &data)
+        cdef oct_visitor_function *func
+        if source.dtype != dest.dtype:
+            raise RuntimeError
+        if source.dtype == np.int64:
+            func = oct_visitors.copy_array_i64
+        elif source.dtype == np.float64:
+            func = oct_visitors.copy_array_f64
+        else:
+            raise NotImplementedError
+        self.visit_all_octs(selector, func, &data)
         return data.index - offset
 
 cdef class ParticleRegions:

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -29,8 +29,8 @@
 from libc.stdlib cimport malloc, free
 from fp_utils cimport fclip, iclip
 from selection_routines cimport SelectorObject
-from oct_container cimport OctreeContainer, OctAllocationContainer, Oct, \
-    visit_mark_octs, visit_count_total_octs, visit_mask_octs
+from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
+cimport oct_visitors
 #from geometry_utils cimport point_to_hilbert
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
@@ -114,11 +114,6 @@
     else:
         raise RuntimeError
 
-# Now our visitor functions
-
-cdef void visit_count_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    data.index += selected
-
 cdef class SelectorObject:
 
     def __cinit__(self, dobj):
@@ -155,7 +150,7 @@
         data.index = 0
         data.last = -1
         data.global_index = -1
-        octree.visit_all_octs(self, visit_count_total_octs, &data)
+        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, data.index), 'uint8', order='C')
         # This is where we'll -- in the future -- cut up based on indices of
@@ -163,7 +158,7 @@
         data.index = -1
         data.last = -1
         data.array = m2.data
-        octree.visit_all_octs(self, visit_mask_octs, &data)
+        octree.visit_all_octs(self, oct_visitors.mask_octs, &data)
         return m2.astype("bool")
 
     @cython.boundscheck(False)
@@ -228,7 +223,7 @@
     def count_octs(self, OctreeContainer octree):
         cdef OctVisitorData data
         data.index = 0
-        octree.visit_all_octs(self, visit_count_cells, &data)
+        octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
     @cython.boundscheck(False)
@@ -1232,7 +1227,7 @@
         cdef OctVisitorData data
         data.index = 0
         data.last = -1
-        octree.visit_all_octs(self, visit_count_total_octs, &data)
+        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, data.index), 'uint8', order='C')
         # This is where we'll -- in the future -- cut up based on indices of
@@ -1240,7 +1235,7 @@
         data.index = -1
         data.last = -1
         data.array = m2.data
-        octree.visit_all_octs(self, visit_mark_octs, &data)
+        octree.visit_all_octs(self, oct_visitors.mark_octs, &data)
         return m2.astype("bool")
 
     @cython.boundscheck(False)

diff -r c051307ad74525a6e6a5f48713c79cb42c72c925 -r 725fe2106c30074adcbf993e89081186d717c110 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -14,6 +14,13 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd"])
+    config.add_extension("oct_visitors", 
+                ["yt/geometry/oct_visitors.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.add_extension("particle_oct_container", 
                 ["yt/geometry/particle_oct_container.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt/commits/1d2a9b997cf8/
Changeset:   1d2a9b997cf8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-19 04:23:16
Summary:     Initial attempt at implementing multi-dim coord selection.
Affected #:  4 files

diff -r 725fe2106c30074adcbf993e89081186d717c110 -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -220,17 +220,25 @@
         return self._reshape_vals(vals)
 
     def select_icoords(self, dobj):
-        return self.oct_handler.icoords(dobj.selector)
+        d = self.oct_handler.icoords(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
+        return tr
 
     def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(dobj.selector)
+        d = self.oct_handler.fcoords(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
+        return tr
 
     def select_fwidth(self, dobj):
         # Recall domain_dimensions is the number of cells, not octs
-        return self.oct_handler.fwidth(dobj.selector)
+        d = self.oct_handler.fwidth(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
+        return tr
 
     def select_ires(self, dobj):
-        return self.oct_handler.ires(dobj.selector)
+        d = self.oct_handler.ires(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1)
+        return tr
 
     def select(self, selector, source, dest, offset):
         n = self.oct_handler.selector_fill(selector, source, dest, offset)

diff -r 725fe2106c30074adcbf993e89081186d717c110 -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -35,21 +35,25 @@
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
+    cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
-    p[1][data.index] = p[0][index]
-    data.index += 1
+    for i in range(data.last):
+        p[1][data.index + i] = p[0][index + i]
+    data.index += data.last
 
 cdef void copy_array_i64(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
+    cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
-    p[1][data.index] = p[0][index]
-    data.index += 1
+    for i in range(data.last):
+        p[1][data.index + i] = p[0][index + i]
+    data.index += data.last
 
 cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.

diff -r 725fe2106c30074adcbf993e89081186d717c110 -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -182,11 +182,3 @@
         self.oct_handler = oct_handler
         self.regions = regions
         super(ParticleDataChunk, self).__init__(*args, **kwargs)
-
-    def _accumulate_values(self, method):
-        mfunc = getattr(self.oct_handler, method)
-        rv = mfunc(self.dobj.selector)
-        self._data_size = rv.shape[0]
-        return rv
-
-

diff -r 725fe2106c30074adcbf993e89081186d717c110 -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -352,6 +352,15 @@
         # This is actually not correct.  The hard part is that we need to
         # iterate the same way visit_all_octs does, but we need to track the
         # number of octs total visited.
+        cdef np.int64_t num_cells = -1
+        if dest is None:
+            num_cells = selector.count_octs(self)
+            if dims > 1:
+                dest = np.zeros((num_cells, dims), dtype=source.dtype,
+                    order='C')
+            else:
+                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
+            dest = dest - 10000
         cdef OctVisitorData data
         data.index = offset
         # We only need this so we can continue calculating the offset
@@ -370,6 +379,8 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
+        if num_cells >= 0:
+            return dest
         return data.index - offset
 
 cdef class ParticleRegions:


https://bitbucket.org/yt_analysis/yt/commits/886547e04cbc/
Changeset:   886547e04cbc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-19 05:03:51
Summary:     Need to increment by 'data.last' between copies.
Affected #:  2 files

diff -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 -r 886547e04cbc4537419fa1f1689c1d378195a0f0 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -230,7 +230,6 @@
         return tr
 
     def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
         d = self.oct_handler.fwidth(self.selector)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
         return tr

diff -r 1d2a9b997cf8df9e2e636fc536fc8754ec56f552 -r 886547e04cbc4537419fa1f1689c1d378195a0f0 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.float64_t **p = <np.float64_t**> data.array
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    index += (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last
@@ -50,7 +50,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.int64_t **p = <np.int64_t**> data.array
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    index += (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last


https://bitbucket.org/yt_analysis/yt/commits/c78f757d2bb4/
Changeset:   c78f757d2bb4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-19 05:06:18
Summary:     Ordering fix.

Note that we still seem to be getting some spurious cell/oct selection that is
visible in projections using spherical data sources.
Affected #:  1 file

diff -r 886547e04cbc4537419fa1f1689c1d378195a0f0 -r c78f757d2bb415f5d00648421f4e021f882b4ccc yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.float64_t **p = <np.float64_t**> data.array
-    index += (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])*data.last
+    index += (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last
@@ -50,7 +50,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.int64_t **p = <np.int64_t**> data.array
-    index += (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])*data.last
+    index += (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last


https://bitbucket.org/yt_analysis/yt/commits/049580841736/
Changeset:   049580841736
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-19 23:39:02
Summary:     Some refactoring of indexing functions.

Also allowing nvals to be a tuple.
Affected #:  4 files

diff -r c78f757d2bb415f5d00648421f4e021f882b4ccc -r 049580841736fe75201f1acbafd7b147b9eb381b yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -40,3 +40,10 @@
 cdef oct_visitor_function fwidth_octs
 cdef oct_visitor_function copy_array_f64
 cdef oct_visitor_function copy_array_i64
+
+cdef inline int oind(OctVisitorData *data):
+    return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+
+cdef inline int rind(OctVisitorData *data):
+    #return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+    return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])

diff -r c78f757d2bb415f5d00648421f4e021f882b4ccc -r 049580841736fe75201f1acbafd7b147b9eb381b yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.float64_t **p = <np.float64_t**> data.array
-    index += (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])*data.last
+    index += oind(data)*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last
@@ -50,7 +50,7 @@
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.int64_t **p = <np.int64_t**> data.array
-    index += (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])*data.last
+    index += oind(data)*data.last
     for i in range(data.last):
         p[1][data.index + i] = p[0][index + i]
     data.index += data.last
@@ -75,7 +75,7 @@
         data.last = o.domain_ind
         data.index += 1
     cdef np.int64_t index = data.index * 8
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    index += oind(data)
     arr[index] = 1
 
 cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -83,11 +83,12 @@
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
     cdef np.int64_t index = data.global_index * 8
-    index += ((data.ind[2]*2)+data.ind[1])*2+data.ind[0] 
+    index += oind(data)
     arr[index] = 1
 
 cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that we provide an index even if the cell is not selected.
+    if selected == 0: return
     cdef int i
     cdef np.int64_t *arr
     if data.last != o.domain_ind:

diff -r c78f757d2bb415f5d00648421f4e021f882b4ccc -r 049580841736fe75201f1acbafd7b147b9eb381b yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -39,6 +39,7 @@
 
 cdef inline int gind(int i, int j, int k, int dims[3]):
     return ((i*dims[1])+j)*dims[2]+k
+    #return ((k*dims[1])+j)*dims[0]+i
 
 
 ####################################################
@@ -57,7 +58,8 @@
 
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
-    cdef np.int64_t nvals
+    cdef public object nvals
+    cdef public int bad_indices
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
                       np.float64_t ppos[3], np.float64_t *fields)

diff -r c78f757d2bb415f5d00648421f4e021f882b4ccc -r 049580841736fe75201f1acbafd7b147b9eb381b yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -50,6 +50,7 @@
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None, int domain_id = -1):
         cdef int nf, i, j
+        self.bad_indices = 0
         if fields is None:
             fields = []
         nf = len(fields)
@@ -127,7 +128,7 @@
     cdef public object ocount
     def initialize(self):
         # Create a numpy array accessible to python
-        self.ocount = np.zeros(self.nvals, dtype="int64")
+        self.ocount = np.zeros(self.nvals, dtype="int64", order='F')
         cdef np.ndarray arr = self.ocount
         # alias the C-view for use in cython
         self.count = <np.int64_t*> arr.data
@@ -161,10 +162,10 @@
     cdef public object otemp
 
     def initialize(self):
-        self.odata = np.zeros(self.nvals, dtype="float64")
+        self.odata = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray arr = self.odata
         self.data = <np.float64_t*> arr.data
-        self.otemp = np.zeros(self.nvals, dtype="float64")
+        self.otemp = np.zeros(self.nvals, dtype="float64", order='F')
         arr = self.otemp
         self.temp = <np.float64_t*> arr.data
 
@@ -217,7 +218,7 @@
     cdef np.float64_t *sum
     cdef public object osum
     def initialize(self):
-        self.osum = np.zeros(self.nvals, dtype="float64")
+        self.osum = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray arr = self.osum
         self.sum = <np.float64_t*> arr.data
 


https://bitbucket.org/yt_analysis/yt/commits/847fd9cd34ff/
Changeset:   847fd9cd34ff
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-20 12:53:21
Summary:     A few more attempted fixes.

The failure of spheres to project correctly suggests to me that what is
happening is a mis-copying during the selector_fill operation.  I am unable to
determine where this is going wrong; I initially thought that having the i,j,k
indices backwards could cause this, but this no longer seems to be the case to
me.
Affected #:  4 files

diff -r 049580841736fe75201f1acbafd7b147b9eb381b -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -212,12 +212,12 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (self.domain_ind >= 0).sum() * 8
+        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
         vals = op.finalize()
-        return self._reshape_vals(vals)
+        return np.asfortranarray(vals)
 
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector)

diff -r 049580841736fe75201f1acbafd7b147b9eb381b -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -45,5 +45,4 @@
     return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    #return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
     return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])

diff -r 049580841736fe75201f1acbafd7b147b9eb381b -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -36,6 +36,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
+    # There are this many records between "octs"
     cdef np.int64_t index = (data.global_index * 8)*data.last
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.last
@@ -88,7 +89,6 @@
 
 cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that we provide an index even if the cell is not selected.
-    if selected == 0: return
     cdef int i
     cdef np.int64_t *arr
     if data.last != o.domain_ind:

diff -r 049580841736fe75201f1acbafd7b147b9eb381b -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -234,6 +234,7 @@
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
         self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0]
+        return
         
     def finalize(self):
         return self.osum
@@ -305,10 +306,11 @@
     cdef np.float64_t *field
     cdef public object ofield
     def initialize(self):
-        self.ofield = np.zeros(self.nvals, dtype="float64")
+        self.ofield = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray arr = self.ofield
         self.field = <np.float64_t *> arr.data
 
+    @cython.cdivision(True)
     cdef void process(self, int dim[3],
                       np.float64_t left_edge[3],
                       np.float64_t dds[3],


https://bitbucket.org/yt_analysis/yt/commits/abf193b22ae2/
Changeset:   abf193b22ae2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-20 13:26:37
Summary:     Remove deprecated routine recursively_select_octs.
Affected #:  2 files

diff -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e -r abf193b22ae2cc50255264b13d7b4ee3d2924e6d yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,10 +40,6 @@
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level
 
-    cdef void recursively_select_octs(self, Oct *root,
-                        np.float64_t pos[3], np.float64_t dds[3],
-                        np.ndarray[np.uint8_t, ndim=2] mask,
-                        int level = ?)
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,

diff -r 847fd9cd34ff2ec1bb34d9ba34301d1e1433f05e -r abf193b22ae2cc50255264b13d7b4ee3d2924e6d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -161,65 +161,6 @@
         octree.visit_all_octs(self, oct_visitors.mask_octs, &data)
         return m2.astype("bool")
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef void recursively_select_octs(self, Oct *root,
-                        np.float64_t pos[3], np.float64_t dds[3],
-                        np.ndarray[np.uint8_t, ndim=2] mask,
-                        int level = 0):
-
-        cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
-        cdef int i, j, k, res, ii
-        cdef Oct *ch
-        # Remember that pos is the *center* of the oct, and dds is the oct
-        # width.  So to get to the edges, we add/subtract half of dds.
-        for i in range(3):
-            # sdds is the cell width
-            sdds[i] = dds[i]/2.0
-            LE[i] = pos[i] - dds[i]/2.0
-            RE[i] = pos[i] + dds[i]/2.0
-        #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
-        res = self.select_grid(LE, RE, level)
-        cdef int eterm[3] 
-        eterm[0] = eterm[1] = eterm[2] = 0
-        cdef int next_level, this_level
-        # next_level: an int that says whether or not we can progress to children
-        # this_level: an int that says whether or not we can select from this
-        # level
-        next_level = this_level = 1
-        if level == self.max_level:
-            next_level = 0
-        if level < self.min_level or level > self.max_level:
-            this_level = 0
-        if res == 0:
-            for i in range(8):
-                mask[root.domain_ind,i] = 0
-            # If this level *is* being selected (i.e., no early termination)
-            # then we know no child zones will be selected.
-            if this_level == 1:
-                return
-        # Now we visit all our children.  We subtract off sdds for the first
-        # pass because we center it on the first cell.
-        spos[0] = pos[0] - sdds[0]/2.0
-        for i in range(2):
-            spos[1] = pos[1] - sdds[1]/2.0
-            for j in range(2):
-                spos[2] = pos[2] - sdds[2]/2.0
-                for k in range(2):
-                    ii = ((k*2)+j)*2+i
-                    ch = root.children[i][j][k]
-                    if next_level == 1 and ch != NULL:
-                        mask[root.domain_ind, ii] = 0
-                        self.recursively_select_octs(
-                            ch, spos, sdds, mask, level + 1)
-                    elif this_level == 1:
-                        mask[root.domain_ind, ii] = \
-                            self.select_cell(spos, sdds, eterm)
-                    spos[2] += sdds[2]
-                spos[1] += sdds[1]
-            spos[0] += sdds[0]
-
     def count_octs(self, OctreeContainer octree):
         cdef OctVisitorData data
         data.index = 0


https://bitbucket.org/yt_analysis/yt/commits/bd162c69f9a4/
Changeset:   bd162c69f9a4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-20 14:42:26
Summary:     Mark out a few more potential problems.
Affected #:  2 files

diff -r abf193b22ae2cc50255264b13d7b4ee3d2924e6d -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -379,6 +379,8 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
+        assert ((data.global_index + 1)*8*dims == source.size)
+        assert (dest.size == data.index)
         if num_cells >= 0:
             return dest
         return data.index - offset

diff -r abf193b22ae2cc50255264b13d7b4ee3d2924e6d -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1165,19 +1165,7 @@
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
         # There has to be a better way to do this.
-        cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, data.index), 'uint8', order='C')
-        # This is where we'll -- in the future -- cut up based on indices of
-        # the octs.
-        data.index = -1
-        data.last = -1
-        data.array = m2.data
-        octree.visit_all_octs(self, oct_visitors.mark_octs, &data)
-        return m2.astype("bool")
+        raise RuntimeError
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1203,7 +1191,6 @@
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
                          int eterm[3]) nogil:
         return 1
-        return self.base_selector.select_cell(pos, dds, eterm)
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level) nogil:


https://bitbucket.org/yt_analysis/yt/commits/8585c00bb2d9/
Changeset:   8585c00bb2d9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 00:33:24
Summary:     This small change to how buffers are filled fixes a terrible bug.

The bug is not terrible in that it causes really bad, horrible, incorrect
answers -- no, it's bad in that it took me four days to track it down and it
was relatively simple, but I tore apart most of the code trying to search for
it.  This included looking at i,j,k,n ordering, examining hundreds of thousands
of possibly Oct combinations, and so on.  But the real issue was that during
deposition of a particle field, sometimes we'd simply overrun the precision
that we can rely upon during the traversal not of the *octree*, but of the
*quadtree* that builds our projections.  So rather than adding/subtracting
(where the error built up) I created a new temporary variable, on the stack,
that stores the necessary information to reconstruct the previous state of the
node.

I have to say, this entire process of tracking this bug down was fun in that it
was extremely satisfying, but also a bit of a wild goose change.

Anyway, this is the tiny commit that will be merged in immediately as I prepare
the rest of the new Octree stuff for inclusion.
Affected #:  1 file

diff -r 6f22f08f086adad2954c0efecd766713008bc66e -r 8585c00bb2d9ddebf026fa19cdc65cb35e1b6da2 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -342,6 +342,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     def get_all(self, int count_only = 0, int style = 1):
         cdef int i, j, vi
         cdef int total = 0
@@ -391,6 +392,7 @@
                 count += self.count(node.children[i][j])
         return count
 
+    @cython.cdivision(True)
     cdef int fill(self, QuadTreeNode *node, 
                         np.int64_t curpos,
                         np.float64_t *px,
@@ -403,6 +405,8 @@
                         np.float64_t wtoadd,
                         np.int64_t level):
         cdef int i, j, n
+        cdef np.float64_t *vorig
+        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -422,6 +426,7 @@
         cdef np.int64_t added = 0
         if self.merged == 1:
             for i in range(self.nvals):
+                vorig[i] = vtoadd[i]
                 vtoadd[i] += node.val[i]
             wtoadd += node.weight_val
         elif self.merged == -1:
@@ -437,7 +442,7 @@
                         vtoadd, wtoadd, level + 1)
         if self.merged == 1:
             for i in range(self.nvals):
-                vtoadd[i] -= node.val[i]
+                vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
         return added
 


https://bitbucket.org/yt_analysis/yt/commits/b769f0f4ca5c/
Changeset:   b769f0f4ca5c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 00:35:57
Summary:     Merging with other line of development, including QuadTree bugfix.
Affected #:  4 files

diff -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 -r b769f0f4ca5c20ff636a68576a02fdd260ab4b23 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -492,7 +492,11 @@
             field_list = None
         field_list = self.comm.mpi_bcast(field_list)
         self.field_list = []
-        # Now we will, avoiding the problem of particle types not having names.
+        # Now we will iterate over all fields, trying to avoid the problem of
+        # particle types not having names.  This should convert all known
+        # particle fields that exist in Enzo outputs into the construction
+        # ("all", field) and should not otherwise affect ActiveParticle
+        # simulations.
         for field in field_list:
             if ("all", field) in KnownEnzoFields:
                 self.field_list.append(("all", field))

diff -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 -r b769f0f4ca5c20ff636a68576a02fdd260ab4b23 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -91,6 +91,7 @@
         hydro_offset = np.zeros(n_levels, dtype='int64')
         hydro_offset -= 1
         level_count = np.zeros(n_levels, dtype='int64')
+        skipped = []
         for level in range(self.amr_header['nlevelmax']):
             for cpu in range(self.amr_header['nboundary'] +
                              self.amr_header['ncpu']):
@@ -101,13 +102,15 @@
                 except AssertionError:
                     print "You are running with the wrong number of fields."
                     print "Please specify these in the load command."
+                    print "We are looking for %s fields." % self.nvar
+                    print "The last set of field sizes was: %s" % skipped
                     raise
                 if hvals['file_ncache'] == 0: continue
                 assert(hvals['file_ilevel'] == level+1)
                 if cpu + 1 == self.domain_id and level >= min_level:
                     hydro_offset[level - min_level] = f.tell()
                     level_count[level - min_level] = hvals['file_ncache']
-                fpu.skip(f, 8 * self.nvar)
+                skipped = fpu.skip(f, 8 * self.nvar)
         self._hydro_offset = hydro_offset
         self._level_count = level_count
         return self._hydro_offset

diff -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 -r b769f0f4ca5c20ff636a68576a02fdd260ab4b23 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -158,7 +158,7 @@
     >>> f = open("fort.3", "rb")
     >>> skip(f, 3)
     """
-    skipped = 0
+    skipped = []
     pos = f.tell()
     for i in range(n):
         fmt = endian+"I"
@@ -167,7 +167,7 @@
         f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
         s2= struct.unpack(fmt, size)[0]
         assert s1==s2 
-        skipped += s1/struct.calcsize(fmt)
+        skipped.append(s1/struct.calcsize(fmt))
     return skipped
 
 def peek_record_size(f,endian='='):

diff -r bd162c69f9a44ee62e6252d0dccd9bade282ef18 -r b769f0f4ca5c20ff636a68576a02fdd260ab4b23 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -342,6 +342,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     def get_all(self, int count_only = 0, int style = 1):
         cdef int i, j, vi
         cdef int total = 0
@@ -391,6 +392,7 @@
                 count += self.count(node.children[i][j])
         return count
 
+    @cython.cdivision(True)
     cdef int fill(self, QuadTreeNode *node, 
                         np.int64_t curpos,
                         np.float64_t *px,
@@ -403,6 +405,8 @@
                         np.float64_t wtoadd,
                         np.int64_t level):
         cdef int i, j, n
+        cdef np.float64_t *vorig
+        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -422,6 +426,7 @@
         cdef np.int64_t added = 0
         if self.merged == 1:
             for i in range(self.nvals):
+                vorig[i] = vtoadd[i]
                 vtoadd[i] += node.val[i]
             wtoadd += node.weight_val
         elif self.merged == -1:
@@ -437,7 +442,7 @@
                         vtoadd, wtoadd, level + 1)
         if self.merged == 1:
             for i in range(self.nvals):
-                vtoadd[i] -= node.val[i]
+                vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
         return added
 


https://bitbucket.org/yt_analysis/yt/commits/ac97077f2e47/
Changeset:   ac97077f2e47
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 00:51:37
Summary:     Adding a brief note about the index order in particle_deposit.pxd.
Affected #:  1 file

diff -r b769f0f4ca5c20ff636a68576a02fdd260ab4b23 -r ac97077f2e471c40a2ac3dae0d31bb94543d4b83 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -38,8 +38,11 @@
     void *alloca(int)
 
 cdef inline int gind(int i, int j, int k, int dims[3]):
+    # The ordering is such that we want i to vary the slowest in this instance,
+    # even though in other instances it varies the fastest.  To see this in
+    # action, try looking at the results of an n_ref=256 particle CIC plot,
+    # which shows it the most clearly.
     return ((i*dims[1])+j)*dims[2]+k
-    #return ((k*dims[1])+j)*dims[0]+i
 
 
 ####################################################


https://bitbucket.org/yt_analysis/yt/commits/e3cc788c3d2f/
Changeset:   e3cc788c3d2f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 16:15:38
Summary:     Updated Tipsy IO for new Morton-ordering.
Affected #:  2 files

diff -r ac97077f2e471c40a2ac3dae0d31bb94543d4b83 -r e3cc788c3d2f00c52c8c55b134a8547e4656b585 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -275,11 +275,10 @@
         morton = get_morton_indices_unravel(lx, ly, lz)
         del lx, ly, lz
         return morton
-        
 
-    def _count_particles(self, domain):
+    def _count_particles(self, data_file):
         npart = dict((self._ptypes[i], v)
-            for i, v in enumerate(domain.header["Npart"])) 
+            for i, v in enumerate(data_file.header["Npart"])) 
         return npart
 
     _header_offset = 256
@@ -384,23 +383,26 @@
             ptypes.add(ftype)
         ptypes = list(ptypes)
         ptypes.sort(key = lambda a: self._ptypes.index(a))
+        data_files = set([])
         for chunk in chunks:
-            for subset in chunk.objs:
-                poff = subset.domain.field_offsets
-                tp = subset.domain.total_particles
-                f = open(subset.domain.domain_filename, "rb")
-                for ptype in ptypes:
-                    f.seek(poff[ptype], os.SEEK_SET)
-                    p = np.fromfile(f, self._pdtypes[ptype], count=tp[ptype])
-                    mask = selector.select_points(
-                        p['Coordinates']['x'].astype("float64"),
-                        p['Coordinates']['y'].astype("float64"),
-                        p['Coordinates']['z'].astype("float64"))
-                    tf = self._fill_fields(ptf[ptype], p, mask)
-                    for field in tf:
-                        rv[ptype, field] = tf[field]
-                    del p, tf
-                f.close()
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            poff = data_file.field_offsets
+            tp = data_file.total_particles
+            f = open(data_file.filename, "rb")
+            for ptype in ptypes:
+                f.seek(poff[ptype], os.SEEK_SET)
+                p = np.fromfile(f, self._pdtypes[ptype], count=tp[ptype])
+                mask = selector.select_points(
+                    p['Coordinates']['x'].astype("float64"),
+                    p['Coordinates']['y'].astype("float64"),
+                    p['Coordinates']['z'].astype("float64"))
+                tf = self._fill_fields(ptf[ptype], p, mask)
+                for field in tf:
+                    rv[ptype, field] = tf[field]
+                del p, tf
+            f.close()
         return rv
 
     def _initialize_index(self, data_file, regions):
@@ -447,26 +449,26 @@
         mylog.info("Adding %0.3e particles", morton.size)
         return morton
 
-    def _count_particles(self, domain):
+    def _count_particles(self, data_file):
         npart = {
-            "Gas": domain.pf.parameters['nsph'],
-            "Stars": domain.pf.parameters['nstar'],
-            "DarkMatter": domain.pf.parameters['ndark']
+            "Gas": data_file.pf.parameters['nsph'],
+            "Stars": data_file.pf.parameters['nstar'],
+            "DarkMatter": data_file.pf.parameters['ndark']
         }
         return npart
 
-    def _create_dtypes(self, domain):
+    def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
-        self._header_offset = domain.pf._header_offset
+        self._header_offset = data_file.pf._header_offset
         self._pdtypes = {}
         pds = {}
         field_list = []
-        tp = domain.total_particles
+        tp = data_file.total_particles
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
-            dtbase = domain.pf._field_dtypes.get(field, 'f')
-            ff = "%s%s" % (domain.pf.endian, dtbase)
+            dtbase = data_file.pf._field_dtypes.get(field, 'f')
+            ff = "%s%s" % (data_file.pf.endian, dtbase)
             if field in _vector_fields:
                 dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
@@ -478,15 +480,15 @@
         self._field_list = field_list
         return self._field_list
 
-    def _identify_fields(self, domain):
+    def _identify_fields(self, data_file):
         return self._field_list
 
-    def _calculate_particle_offsets(self, domain):
+    def _calculate_particle_offsets(self, data_file):
         field_offsets = {}
-        pos = domain.pf._header_offset
+        pos = data_file.pf._header_offset
         for ptype in self._ptypes:
             field_offsets[ptype] = pos
-            if domain.total_particles[ptype] == 0: continue
+            if data_file.total_particles[ptype] == 0: continue
             size = self._pdtypes[ptype].itemsize
-            pos += domain.total_particles[ptype] * size
+            pos += data_file.total_particles[ptype] * size
         return field_offsets

diff -r ac97077f2e471c40a2ac3dae0d31bb94543d4b83 -r e3cc788c3d2f00c52c8c55b134a8547e4656b585 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -412,7 +412,7 @@
         cdef int ind[3], i
         cdef np.ndarray[np.uint64_t, ndim=3] mask
         mask = self.masks[file_id/64]
-        val = 1 << (file_id - (file_id/64)*64)
+        cdef np.int64_t val = 1 << (file_id - (file_id/64)*64)
         for p in range(no):
             # Now we locate the particle
             for i in range(3):


https://bitbucket.org/yt_analysis/yt/commits/2c7066b27099/
Changeset:   2c7066b27099
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 16:32:48
Summary:     Profiling revealed a few instances of easy Cython speedups.

We're now dominated by IO and casting (via astype) from float32/float64.
Eventually we may be able to eliminate this by either using 32 bit internall or
figuring out a better IO system for Tipsy/Gadget.
Affected #:  3 files

diff -r e3cc788c3d2f00c52c8c55b134a8547e4656b585 -r 2c7066b270997d000070dd7636e37d6343083df1 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -137,6 +137,7 @@
                 yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
+    @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
                         OctVisitorData *data):

diff -r e3cc788c3d2f00c52c8c55b134a8547e4656b585 -r 2c7066b270997d000070dd7636e37d6343083df1 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -25,6 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+cimport cython
 cimport numpy
 import numpy
 from fp_utils cimport *
@@ -111,6 +112,7 @@
     ires[data.index] = o.level
     data.index += 1
 
+ at cython.cdivision(True)
 cdef void fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that this does not actually give the correct floating point
     # coordinates.  It gives them in some unit system where the domain is 1.0
@@ -125,6 +127,7 @@
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
+ at cython.cdivision(True)
 cdef void fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that this does not actually give the correct floating point
     # coordinates.  It gives them in some unit system where the domain is 1.0

diff -r e3cc788c3d2f00c52c8c55b134a8547e4656b585 -r 2c7066b270997d000070dd7636e37d6343083df1 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -45,6 +45,8 @@
     def finalize(self, *args):
         raise NotImplementedError
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def process_octree(self, OctreeContainer octree,
                      np.ndarray[np.int64_t, ndim=1] dom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
@@ -90,6 +92,8 @@
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
         
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def process_grid(self, gobj,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):


https://bitbucket.org/yt_analysis/yt/commits/299fb9fa8915/
Changeset:   299fb9fa8915
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 18:12:19
Summary:     Update grid class with new select() function call.
Affected #:  1 file

diff -r 2c7066b270997d000070dd7636e37d6343083df1 -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -441,14 +441,14 @@
         return new_field
 
     def select_icoords(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='int64')
         coords = convert_mask_to_indices(mask, mask.sum())
         coords += self.get_global_startindex()[None, :]
         return coords
 
     def select_fcoords(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
         coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
         coords += 0.5
@@ -457,15 +457,15 @@
         return coords
 
     def select_fwidth(self, dobj):
-        mask = self.select(dobj.selector)
-        if mask is None: return np.empty((0,3), dtype='float64')
-        coords = np.empty((mask.sum(), 3), dtype='float64')
+        count = self.count(dobj.selector)
+        if count == 0: return np.empty((0,3), dtype='float64')
+        coords = np.empty((count, 3), dtype='float64')
         for axis in range(3):
             coords[:,axis] = self.dds[axis]
         return coords
 
     def select_ires(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty(0, dtype='int64')
         coords = np.empty(mask.sum(), dtype='int64')
         coords[:] = self.Level
@@ -484,21 +484,27 @@
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="F")
+        return vals.reshape(self.ActiveDimensions, order="C")
 
-    def select(self, selector):
+    def _get_selector_mask(self, selector):
         if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
+            mask = self._last_mask
+        else:
+            self._last_mask = mask = selector.fill_mask(self)
+            self._last_selector_id = id(selector)
+        return mask
+
+    def select(self, selector, source, dest, offset):
+        mask = self._get_selector_mask(selector)
+        count = self.count(selector)
+        if count == 0: return
+        dest[offset:offset+count] = source[mask]
+        return count
 
     def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+        mask = self._get_selector_mask(selector)
+        if mask is None: return 0
+        return mask.sum()
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results


https://bitbucket.org/yt_analysis/yt/commits/c7b78242f38c/
Changeset:   c7b78242f38c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 20:43:19
Summary:     Beginning consolidation of Oct/ParticleOct code.  Breaks currently.
Affected #:  10 files

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -281,9 +281,8 @@
             chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
         # We do this once
-        if self.pf.h._global_mesh:
-            for chunk in self.data_source.chunks(None, "io"):
-                self._initialize_chunk(chunk, tree)
+        for chunk in self.data_source.chunks(None, "io"):
+            self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -43,28 +43,23 @@
     _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
-    _con_args = ('domain', 'mask', 'cell_count')
+    _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
 
-    def __init__(self, domain, mask, cell_count):
+    def __init__(self, base_region, domain, pf):
         self.field_data = YTFieldData()
         self.field_parameters = {}
-        self.mask = mask
         self.domain = domain
+        self.domain_id = domain.domain_id
         self.pf = domain.pf
         self.hierarchy = self.pf.hierarchy
         self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
         self._last_mask = None
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
 
     def _generate_container_field(self, field):
         if self._current_chunk is None:
@@ -78,31 +73,6 @@
         else:
             raise RuntimeError
 
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
-
     def __getitem__(self, key):
         tr = super(OctreeSubset, self).__getitem__(key)
         try:
@@ -140,22 +110,41 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (self.domain_ind >= 0).sum() * 8
+        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
-                          self.domain.domain_id)
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
         vals = op.finalize()
-        return self._reshape_vals(vals)
+        return np.asfortranarray(vals)
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = self.oct_handler.domain_mask(
-                self.mask, self.domain.domain_id)
-        if self._last_mask.sum() == 0: return None
-        self._last_selector_id = id(selector)
-        return self._last_mask
+    def select_icoords(self, dobj):
+        d = self.oct_handler.icoords(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_fcoords(self, dobj):
+        d = self.oct_handler.fcoords(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_fwidth(self, dobj):
+        d = self.oct_handler.fwidth(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_ires(self, dobj):
+        d = self.oct_handler.ires(self.selector)
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select(self, selector, source, dest, offset):
+        n = self.oct_handler.selector_fill(selector, source, dest, offset,
+                                           domain_id = self.domain_id)
+        return n
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
@@ -180,6 +169,7 @@
     # this, it's unavoidable for many types of data storage on disk.
     _type_name = 'particle_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
+    domain_id = -1
     def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
         # The first attempt at this will not work in parallel.
         self.data_files = data_files
@@ -206,55 +196,3 @@
             di = self.oct_handler.domain_ind(self.selector)
             self._domain_ind = di
         return self._domain_ind
-
-    def deposit(self, positions, fields = None, method = None):
-        # Here we perform our particle deposition.
-        cls = getattr(particle_deposit, "deposit_%s" % method, None)
-        if cls is None:
-            raise YTParticleDepositionNotImplemented(method)
-        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
-        op = cls(nvals) # We allocate number of zones, not number of octs
-        op.initialize()
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
-        vals = op.finalize()
-        return np.asfortranarray(vals)
-
-    def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
-        return tr
-
-    def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
-        return tr
-
-    def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3)
-        return tr
-
-    def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1)
-        return tr
-
-    def select(self, selector, source, dest, offset):
-        n = self.oct_handler.selector_fill(selector, source, dest, offset)
-        return n
-
-    def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
-
-    def count_particles(self, selector, x, y, z):
-        # We don't cache the selector results
-        count = selector.count_points(x,y,z)
-        return count
-
-    def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
-        return mask

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -346,17 +346,18 @@
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
             mask = dobj.selector.select_octs(self.oct_handler)
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            subsets = [RAMSESDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
+            base_region = getattr(dobj, "base_region", dobj)
+            # Note that domain_ids will be ONE INDEXED
+            domain_ids = self.oct_handler.domain_identify(dobj.selector)
+            subsets = [RAMSESDomainSubset(base_region, self.domains[did - 1],
+                                          self.parameter_file)
+                       for did in domain_ids]
             dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -372,7 +373,7 @@
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+            yield YTDataChunk(dobj, "io", [subset], None)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -31,6 +31,8 @@
 import numpy as np
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, ORDER_MAX
+from selection_routines cimport SelectorObject, \
+    OctVisitorData, oct_visitor_function
 cimport oct_visitors
 cimport cython
 
@@ -215,61 +217,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_cells(self, SelectorObject selector,
-              np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef int i, j, k
-        cdef np.int64_t oi
-        # pos here is CELL center, not OCT center.
-        cdef np.float64_t pos[3]
-        cdef int n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # 
-        cur = self.cont
-        for oi in range(n):
-            if oi - cur.offset >= cur.n_assigned:
-                cur = cur.next
-            o = &cur.my_octs[oi - cur.offset]
-            for i in range(8):
-                count[o.domain - 1] += mask[o.domain_ind,i]
-        return count
-
-    @cython.boundscheck(True)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_leaves(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        # Modified to work when not all octs are assigned
-        cdef int i, j, k, ii
-        cdef np.int64_t oi
-        # pos here is CELL center, not OCT center.
-        cdef np.float64_t pos[3]
-        cdef int n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # 
-        cur = self.cont
-        for oi in range(n):
-            if oi - cur.offset >= cur.n_assigned:
-                cur = cur.next
-                if cur == NULL:
-                    break
-            o = &cur.my_octs[oi - cur.offset]
-            # skip if unassigned
-            if o == NULL:
-                continue
-            if o.domain == -1: 
-                continue
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if o.children[i][j][k] == NULL:
-                            ii = ((k*2)+j)*2+i
-                            count[o.domain - 1] += mask[o.domain_ind,ii]
-        return count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
         #Get 3x3x3 neighbors, although the 1,1,1 oct is the
         #central one. 
@@ -357,8 +304,129 @@
                 bounds[i, 3+ii] = size[ii]
         return bounds
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
+        cdef np.ndarray[np.int64_t, ndim=2] coords
+        coords = np.empty((num_cells, 3), dtype="int64")
+        cdef OctVisitorData data
+        data.array = <void *> coords.data
+        data.index = 0
+        self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
+        return coords
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
+        #Return the 'resolution' of each cell; ie the level
+        cdef np.ndarray[np.int64_t, ndim=1] res
+        res = np.empty(num_cells, dtype="int64")
+        cdef OctVisitorData data
+        data.array = <void *> res.data
+        data.index = 0
+        self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
+        return res
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
+        cdef np.ndarray[np.float64_t, ndim=2] fwidth
+        fwidth = np.empty((num_cells, 3), dtype="float64")
+        cdef OctVisitorData data
+        data.array = <void *> fwidth.data
+        data.index = 0
+        self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
+        cdef np.float64_t base_dx
+        for i in range(3):
+            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            fwidth[:,i] *= base_dx
+        return fwidth
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+        if num_cells == -1:
+            num_cells = selector.count_octs(self)
+        #Return the floating point unitary position of every cell
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        coords = np.empty((num_cells, 3), dtype="float64")
+        cdef OctVisitorData data
+        data.array = <void *> coords.data
+        data.index = 0
+        self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
+        cdef int i
+        cdef np.float64_t base_dx
+        for i in range(3):
+            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            coords[:,i] *= base_dx
+            coords[:,i] += self.DLE[i]
+        return coords
+
+    def selector_fill(self, SelectorObject selector,
+                      np.ndarray source,
+                      np.ndarray dest = None,
+                      np.int64_t offset = 0, int dims = 1,
+                      int domain_id = -1):
+        # This is actually not correct.  The hard part is that we need to
+        # iterate the same way visit_all_octs does, but we need to track the
+        # number of octs total visited.
+        cdef np.int64_t num_cells = -1
+        if dest is None:
+            num_cells = selector.count_octs(self)
+            if dims > 1:
+                dest = np.zeros((num_cells, dims), dtype=source.dtype,
+                    order='C')
+            else:
+                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
+        cdef OctVisitorData data
+        data.index = offset
+        data.domain = domain_id
+        # We only need this so we can continue calculating the offset
+        data.dims = dims
+        cdef void *p[2]
+        p[0] = source.data
+        p[1] = dest.data
+        data.array = &p
+        cdef oct_visitor_function *func
+        if source.dtype != dest.dtype:
+            raise RuntimeError
+        if source.dtype == np.int64:
+            func = oct_visitors.copy_array_i64
+        elif source.dtype == np.float64:
+            func = oct_visitors.copy_array_f64
+        else:
+            raise NotImplementedError
+        self.visit_all_octs(selector, func, &data)
+        if num_cells >= 0:
+            return dest
+        return data.index - offset
+
 cdef class RAMSESOctreeContainer(OctreeContainer):
 
+    def domain_identify(self, SelectorObject selector):
+        cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
+        domain_mask = np.zeros(self.max_domain, dtype="uint8")
+        cdef OctVisitorData data
+        data.array = domain_mask.data
+        self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
+        cdef int i
+        domain_ids = []
+        for i in range(self.max_domain):
+            if domain_mask[i] == 1:
+                domain_ids.append(i+1)
+        return domain_ids
+
+
     cdef np.int64_t get_domain_offset(self, int domain_id):
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
         return cont.offset
@@ -413,149 +481,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count(self, np.ndarray[np.uint8_t, ndim=1, cast=True] mask,
-                     split = False):
-        cdef int n = mask.shape[0]
-        cdef int i, dom
-        cdef OctAllocationContainer *cur
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # This is the idiom for iterating over many containers.
-        cur = self.cont
-        for i in range(n):
-            if i - cur.offset >= cur.n_assigned: cur = cur.next
-            if mask[i] == 1:
-                count[cur.my_octs[i - cur.offset].domain - 1] += 1
-        return count
-
-    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                   int domain_id):
-        cdef np.int64_t i, oi, n,  use
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
-                np.zeros((mask.shape[0], 8), 'uint8')
-        n = mask.shape[0]
-        for oi in range(cur.n_assigned):
-            o = &cur.my_octs[oi]
-            use = 0
-            for i in range(8):
-                m2[o.domain_ind, i] = mask[o.domain_ind, i]
-        return m2 # NOTE: This is uint8_t
-
-    def domain_mask(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        # What distinguishes this one from domain_and is that we have a mask,
-        # which covers the whole domain, but our output will only be of a much
-        # smaller subset of octs that belong to a given domain *and* the mask.
-        # Note also that typically when something calls domain_and, they will 
-        # use a logical_any along the oct axis.  Here we don't do that.
-        # Note also that we change the shape of the returned array.
-        cdef np.int64_t i, j, k, oi, n, nm, use
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        nm = 0
-        for oi in range(cur.n_assigned):
-            o = &cur.my_octs[oi]
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            nm += use
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, nm), 'uint8')
-        nm = 0
-        for oi in range(cur.n_assigned):
-            o = &cur.my_octs[oi]
-            use = 0
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        use = m2[i, j, k, nm] = 1
-            nm += use
-        return m2.astype("bool")
-
-    def domain_ind(self,
-                    # mask is the base selector's *global* mask
-                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                    int domain_id):
-        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
-        nm = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            use = 0
-            for i in range(8):
-                if mask[o.domain_ind, i] == 1: use = 1
-            if use == 1:
-                ind[o.domain_ind - cur.offset] = nm
-            nm += use
-        return ind
-
-    def check(self, int curdom, int print_all = 0):
-        cdef int dind, pi
-        cdef Oct oct
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int nbad = 0
-        cdef int nmissed = 0
-        cdef int unassigned = 0
-        for pi in range(cont.n_assigned):
-            oct = cont.my_octs[pi]
-            if print_all==1:
-                print pi, oct.level, oct.domain,
-                print oct.pos[0],oct.pos[1],oct.pos[2]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] != NULL and \
-                           oct.children[i][j][k].level != oct.level + 1:
-                            nbad += 1
-                        if oct.domain != curdom:
-                            print curdom, oct.domain
-                            nmissed += 1
-                        if oct.domain == -1:
-                            unassigned += 1
-        print "DOMAIN % 3i HAS % 9i BAD OCTS (%s / %s / %s)" % (curdom, nbad, 
-            cont.n - cont.n_assigned, cont.n_assigned, cont.n)
-        print "DOMAIN % 3i HAS % 9i MISSED OCTS" % (curdom, nmissed)
-        print "DOMAIN % 3i HAS % 9i UNASSIGNED OCTS" % (curdom, unassigned)
-
-    def check_refinement(self, int curdom):
-        cdef int pi, i, j, k, some_refined, some_unrefined
-        cdef Oct *oct
-        cdef int bad = 0
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        for pi in range(cont.n_assigned):
-            oct = &cont.my_octs[pi]
-            some_unrefined = 0
-            some_refined = 0
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] == NULL:
-                            some_unrefined = 1
-                        else:
-                            some_refined = 1
-            if some_unrefined == some_refined == 1:
-                #print "BAD", oct.file_ind, oct.domain_ind
-                bad += 1
-                if curdom == 10 or curdom == 72:
-                    for i in range(2):
-                        for j in range(2):
-                            for k in range(2):
-                                print (oct.children[i][j][k] == NULL),
-                    print
-        print "BAD TOTAL", curdom, bad, cont.n_assigned
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def add(self, int curdom, int curlevel, int ng,
             np.ndarray[np.float64_t, ndim=2] pos,
             int local_domain, int skip_boundary = 1):
@@ -608,116 +533,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii, level
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        ci = 0
-        for oi in range(cur.n_assigned):
-            o = &cur.my_octs[oi]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        coords[ci, 0] = (o.pos[0] << 1) + i
-                        coords[ci, 1] = (o.pos[1] << 1) + j
-                        coords[ci, 2] = (o.pos[2] << 1) + k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] levels
-        levels = np.empty(cell_count, dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[oi + cur.offset, i] == 0: continue
-                levels[ci] = o.level
-                ci += 1
-        return levels
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        for oi in range(cur.n_assigned):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[o.domain_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.float64_t pos[3]
-        cdef np.float64_t base_dx[3], dx[3]
-        n = mask.shape[0]
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def fill_level(self, int domain, int level, dest_fields, source_fields,
                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
         cdef np.ndarray[np.float64_t, ndim=2] source
@@ -745,6 +560,17 @@
                             local_filled += 1
         return local_filled
 
+    def domain_ind(self, selector):
+        cdef np.ndarray[np.int64_t, ndim=1] ind
+        # Here's where we grab the masked items.
+        ind = np.zeros(self.nocts, 'int64') - 1
+        cdef OctVisitorData data
+        data.array = ind.data
+        data.index = 0
+        data.last = -1
+        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
+        return ind
+
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
     @cython.boundscheck(True)

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -40,6 +40,7 @@
 cdef oct_visitor_function fwidth_octs
 cdef oct_visitor_function copy_array_f64
 cdef oct_visitor_function copy_array_i64
+cdef oct_visitor_function identify_octs
 
 cdef inline int oind(OctVisitorData *data):
     return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -36,26 +36,28 @@
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
+    if data.domain > 0 and o.domain != data.domain: return
     cdef int i
     # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * 8)*data.last
+    cdef np.int64_t index = (data.global_index * 8)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
-    index += oind(data)*data.last
-    for i in range(data.last):
+    index += oind(data)*data.dims
+    for i in range(data.dims):
         p[1][data.index + i] = p[0][index + i]
-    data.index += data.last
+    data.index += data.dims
 
 cdef void copy_array_i64(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
+    if data.domain > 0 and o.domain != data.domain: return
     cdef int i
-    cdef np.int64_t index = (data.global_index * 8)*data.last
+    cdef np.int64_t index = (data.global_index * 8)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
-    index += oind(data)*data.last
-    for i in range(data.last):
+    index += oind(data)*data.dims
+    for i in range(data.dims):
         p[1][data.index + i] = p[0][index + i]
-    data.index += data.last
+    data.index += data.dims
 
 cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.
@@ -140,3 +142,11 @@
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1
+
+cdef void identify_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We assume that our domain has *already* been selected by, which means
+    # we'll get all cells within the domain for a by-domain selector and all
+    # cells within the domain *and* selector for the selector itself.
+    if selected == 0: return
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    arr[o.domain - 1] = 1

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -154,7 +154,7 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield ParticleDataChunk(self.oct_handler, self.regions, dobj, "all", oobjs, None)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -168,14 +168,12 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            yield ParticleDataChunk(self.oct_handler, self.regions, dobj,
-                                    "spatial", [g])
+            yield YTDataChunk(dobj, "spatial", [g])
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield ParticleDataChunk(self.oct_handler, self.regions,
-                                    dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None)
 
 class ParticleDataChunk(YTDataChunk):
     def __init__(self, oct_handler, regions, *args, **kwargs):

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -106,74 +106,6 @@
             o = self.oct_list[oi]
             yield (o.file_ind, o.domain_ind, o.domain)
 
-    #@cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self)
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_cells, 3), dtype="int64")
-        cdef OctVisitorData data
-        data.array = <void *> coords.data
-        data.index = 0
-        self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self)
-        #Return the 'resolution' of each cell; ie the level
-        cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_cells, dtype="int64")
-        cdef OctVisitorData data
-        data.array = <void *> res.data
-        data.index = 0
-        self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
-        return res
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self)
-        cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_cells, 3), dtype="float64")
-        cdef OctVisitorData data
-        data.array = <void *> fwidth.data
-        data.index = 0
-        self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
-        cdef np.float64_t base_dx
-        for i in range(3):
-            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            fwidth[:,i] *= base_dx
-        return fwidth
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self)
-        #Return the floating point unitary position of every cell
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_cells, 3), dtype="float64")
-        cdef OctVisitorData data
-        data.array = <void *> coords.data
-        data.index = 0
-        self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
-        cdef int i
-        cdef np.float64_t base_dx
-        for i in range(3):
-            base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            coords[:,i] *= base_dx
-            coords[:,i] += self.DLE[i]
-        return coords
-
     def allocate_domains(self, domain_counts):
         pass
 
@@ -334,57 +266,6 @@
                         self.visit(o.children[i][j][k], counts, level + 1)
         return
 
-    def domain_ind(self, selector):
-        cdef np.ndarray[np.int64_t, ndim=1] ind
-        # Here's where we grab the masked items.
-        ind = np.zeros(self.nocts, 'int64') - 1
-        cdef OctVisitorData data
-        data.array = ind.data
-        data.index = 0
-        data.last = -1
-        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
-        return ind
-
-    def selector_fill(self, SelectorObject selector,
-                      np.ndarray source,
-                      np.ndarray dest = None,
-                      np.int64_t offset = 0, int dims = 1):
-        # This is actually not correct.  The hard part is that we need to
-        # iterate the same way visit_all_octs does, but we need to track the
-        # number of octs total visited.
-        cdef np.int64_t num_cells = -1
-        if dest is None:
-            num_cells = selector.count_octs(self)
-            if dims > 1:
-                dest = np.zeros((num_cells, dims), dtype=source.dtype,
-                    order='C')
-            else:
-                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
-            dest = dest - 10000
-        cdef OctVisitorData data
-        data.index = offset
-        # We only need this so we can continue calculating the offset
-        data.last = dims
-        cdef void *p[2]
-        p[0] = source.data
-        p[1] = dest.data
-        data.array = &p
-        cdef oct_visitor_function *func
-        if source.dtype != dest.dtype:
-            raise RuntimeError
-        if source.dtype == np.int64:
-            func = oct_visitors.copy_array_i64
-        elif source.dtype == np.float64:
-            func = oct_visitors.copy_array_f64
-        else:
-            raise NotImplementedError
-        self.visit_all_octs(selector, func, &data)
-        assert ((data.global_index + 1)*8*dims == source.size)
-        assert (dest.size == data.index)
-        if num_cells >= 0:
-            return dest
-        return data.index - offset
-
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -32,6 +32,8 @@
     np.uint64_t global_index
     int ind[3]
     void *array
+    int dims
+    int domain
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
@@ -47,7 +49,7 @@
                         OctVisitorData *data)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil
+                               np.int32_t level, Oct *o = ?) nogil
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
                          int eterm[3]) nogil
     cdef void set_bounds(self,

diff -r 299fb9fa8915f54575ca3841ce1caf92aebaef87 -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -187,7 +187,7 @@
             LE[i] = pos[i] - dds[i]/2.0
             RE[i] = pos[i] + dds[i]/2.0
         #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
-        res = self.select_grid(LE, RE, level)
+        res = self.select_grid(LE, RE, level, root)
         cdef int eterm[3] 
         eterm[0] = eterm[1] = eterm[2] = 0
         cdef int next_level, this_level
@@ -201,7 +201,15 @@
             this_level = 0
         if res == 0 and this_level == 1:
             return
+        if res == -1: 
+            # This happens when we do domain selection but the oct has
+            # children.  This would allow an oct to pass to its children but
+            # not get accessed itself.
+            next_level = 1
+            this_level = 0
         cdef int increment = 1
+        if data.domain > 0 and root.domain != data.domain:
+            increment = 0
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
         spos[0] = pos[0] - sdds[0]/2.0
@@ -229,7 +237,7 @@
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         return 0
     
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
@@ -425,7 +433,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         cdef np.float64_t box_center, relcenter, closest, dist, edge
         cdef int i
         if (left_edge[0] <= self.center[0] <= right_edge[0] and
@@ -491,7 +499,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         if level < self.min_level or level > self.max_level: return 0
         for i in range(3):
             if left_edge[i] >= self.right_edge[i]: return 0
@@ -562,7 +570,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         cdef np.float64_t *arr[2]
         cdef np.float64_t pos[3], H, D, R2, temp
         cdef int i, j, k, n
@@ -630,7 +638,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         cdef int i, j, k, n
         cdef np.float64_t *arr[2]
         cdef np.float64_t pos[3]
@@ -698,7 +706,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         if right_edge[self.axis] > self.coord \
            and left_edge[self.axis] <= self.coord:
             return 1
@@ -736,7 +744,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         if (    (self.px >= left_edge[self.px_ax])
             and (self.px < right_edge[self.px_ax])
             and (self.py >= left_edge[self.py_ax])
@@ -815,7 +823,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         cdef int i, ax
         cdef int i1, i2
         cdef np.float64_t vs[3], t, v[3]
@@ -1006,7 +1014,7 @@
     @cython.cdivision(True)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
-                               np.int32_t level) nogil:
+                               np.int32_t level, Oct *o = NULL) nogil:
         # This is the sphere selection
         cdef np.float64_t radius2, box_center, relcenter, closest, dist, edge
         return 1
@@ -1099,28 +1107,18 @@
 grid_selector = GridSelector
 
 cdef class OctreeSubsetSelector(SelectorObject):
-    # This is a numpy array, which will be a bool of ndim 1
-    cdef object oct_mask
     cdef int domain_id
+    cdef SelectorObject base_selector
 
     def __init__(self, dobj):
-        self.oct_mask = dobj.mask
         self.domain_id = dobj.domain.domain_id
+        self.base_selector = dobj.base_selector
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
-        cdef np.ndarray[np.uint8_t, ndim=2] m2
-        m2 = octree.domain_and(self.oct_mask, self.domain_id)
-        cdef int oi, i, a
-        for oi in range(m2.shape[0]):
-            a = 0
-            for i in range(8):
-                if m2[oi, i] == 1: a = 1
-            for i in range(8):
-                m2[oi, i] = a
-        return m2.astype("bool")
+        raise RuntimeError
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1147,6 +1145,20 @@
                          int eterm[3]) nogil:
         return 1
 
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        # Because visitors now use select_grid, we should be explicitly
+        # checking this.
+        cdef int any_children = 0
+        cdef int i, j, k
+        if o == NULL:
+            return 0
+        cdef int res
+        res = self.base_selector.select_grid(left_edge, right_edge, level, o)
+        if res == 1 and o.domain != self.domain_id: return -1
+        return 1
+
 octree_subset_selector = OctreeSubsetSelector
 
 cdef class ParticleOctreeSubsetSelector(SelectorObject):
@@ -1193,10 +1205,11 @@
         return 1
 
     cdef int select_grid(self, np.float64_t left_edge[3],
-                         np.float64_t right_edge[3], np.int32_t level) nogil:
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        return self.base_selector.select_grid(left_edge, right_edge, level)
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
 
 particle_octree_subset_selector = ParticleOctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/e9ab67bfd569/
Changeset:   e9ab67bfd569
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 20:57:37
Summary:     Attempting to speed up traversal.
Affected #:  2 files

diff -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 -r e9ab67bfd5695e0f0b27f320388854120c4ced7c yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -69,6 +69,7 @@
 cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.
     # Number of *octs* visited.
+    if data.domain > 0 and o.domain != data.domain: return
     data.index += selected
 
 cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -94,6 +95,7 @@
     # Note that we provide an index even if the cell is not selected.
     cdef int i
     cdef np.int64_t *arr
+    if data.domain > 0 and data.domain != o.domain: return
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         arr = <np.int64_t *> data.array

diff -r c7b78242f38ceced42d4c05d8eb95a254a67daf8 -r e9ab67bfd5695e0f0b27f320388854120c4ced7c yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1152,8 +1152,7 @@
         # checking this.
         cdef int any_children = 0
         cdef int i, j, k
-        if o == NULL:
-            return 0
+        if o == NULL: return 0
         cdef int res
         res = self.base_selector.select_grid(left_edge, right_edge, level, o)
         if res == 1 and o.domain != self.domain_id: return -1


https://bitbucket.org/yt_analysis/yt/commits/3959497fa6a3/
Changeset:   3959497fa6a3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 22:32:18
Summary:     Attempting to thread domain info through the selectors.

This (hopefully) will unify the oct containers and make it viable to distribute
the RAMSES oct container over multiple processors.
Affected #:  5 files

diff -r e9ab67bfd5695e0f0b27f320388854120c4ced7c -r 3959497fa6a3f90e117788dcd5f570296387ad7e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -118,25 +118,25 @@
         return np.asfortranarray(vals)
 
     def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector)
+        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector)
+        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector)
+        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector)
+        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
                                             domain_id = self.domain_id)
         return tr

diff -r e9ab67bfd5695e0f0b27f320388854120c4ced7c -r 3959497fa6a3f90e117788dcd5f570296387ad7e yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -345,10 +345,10 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            mask = dobj.selector.select_octs(self.oct_handler)
             base_region = getattr(dobj, "base_region", dobj)
             # Note that domain_ids will be ONE INDEXED
             domain_ids = self.oct_handler.domain_identify(dobj.selector)
+            mylog.debug("Identified %s intersecting domains", len(domain_ids))
             subsets = [RAMSESDomainSubset(base_region, self.domains[did - 1],
                                           self.parameter_file)
                        for did in domain_ids]

diff -r e9ab67bfd5695e0f0b27f320388854120c4ced7c -r 3959497fa6a3f90e117788dcd5f570296387ad7e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -307,43 +307,49 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
+                int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
+        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1):
+    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1,
+                int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self)
+            num_cells = selector.count_octs(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(num_cells, dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> res.data
         data.index = 0
+        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1):
+    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1,
+                int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
         fwidth = np.empty((num_cells, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> fwidth.data
         data.index = 0
+        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
@@ -354,15 +360,17 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1):
+    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
+                int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self)
+            num_cells = selector.count_octs(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
+        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
@@ -382,7 +390,7 @@
         # number of octs total visited.
         cdef np.int64_t num_cells = -1
         if dest is None:
-            num_cells = selector.count_octs(self)
+            num_cells = selector.count_octs(self, domain_id)
             if dims > 1:
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
@@ -407,6 +415,14 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
+        if (data.global_index + 1) * 8 * data.dims > source.size:
+            print "GLOBAL INDEX RAN AHEAD.",
+            print (data.global_index + 1) * 8 * data.dims - source.size
+            raise RuntimeError
+        if data.index > dest.size:
+            print "DEST INDEX RAN AHEAD.",
+            print data.index - dest.size
+            raise RuntimeError
         if num_cells >= 0:
             return dest
         return data.index - offset
@@ -418,6 +434,7 @@
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
         cdef OctVisitorData data
         data.array = domain_mask.data
+        data.domain = -1
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
@@ -426,7 +443,6 @@
                 domain_ids.append(i+1)
         return domain_ids
 
-
     cdef np.int64_t get_domain_offset(self, int domain_id):
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
         return cont.offset
@@ -560,11 +576,12 @@
                             local_filled += 1
         return local_filled
 
-    def domain_ind(self, selector):
+    def domain_ind(self, selector, int domain_id = -1):
         cdef np.ndarray[np.int64_t, ndim=1] ind
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
+        data.domain = domain_id
         data.array = ind.data
         data.index = 0
         data.last = -1

diff -r e9ab67bfd5695e0f0b27f320388854120c4ced7c -r 3959497fa6a3f90e117788dcd5f570296387ad7e yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -62,6 +62,7 @@
 cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.
     # Number of *octs* visited.
+    if data.domain > 0 and o.domain != data.domain: return
     if data.last != o.domain_ind:
         data.index += 1
         data.last = o.domain_ind

diff -r e9ab67bfd5695e0f0b27f320388854120c4ced7c -r 3959497fa6a3f90e117788dcd5f570296387ad7e yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -146,6 +146,7 @@
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
         # There has to be a better way to do this.
+        raise RuntimeError
         cdef OctVisitorData data
         data.index = 0
         data.last = -1
@@ -161,9 +162,10 @@
         octree.visit_all_octs(self, oct_visitors.mask_octs, &data)
         return m2.astype("bool")
 
-    def count_octs(self, OctreeContainer octree):
+    def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
         data.index = 0
+        data.domain = domain_id
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
@@ -188,28 +190,29 @@
             RE[i] = pos[i] + dds[i]/2.0
         #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
         res = self.select_grid(LE, RE, level, root)
+        if res == 1 and data.domain > 0 and root.domain != data.domain:
+            res = -1
         cdef int eterm[3] 
+        cdef int increment = 1
         eterm[0] = eterm[1] = eterm[2] = 0
         cdef int next_level, this_level
         # next_level: an int that says whether or not we can progress to children
         # this_level: an int that says whether or not we can select from this
         # level
         next_level = this_level = 1
+        if res == -1:
+            # This happens when we do domain selection but the oct has
+            # children.  This would allow an oct to pass to its children but
+            # not get accessed itself.
+            next_level = 1
+            this_level = 0
+            increment = 0
         if level == self.max_level:
             next_level = 0
         if level < self.min_level or level > self.max_level:
             this_level = 0
         if res == 0 and this_level == 1:
             return
-        if res == -1: 
-            # This happens when we do domain selection but the oct has
-            # children.  This would allow an oct to pass to its children but
-            # not get accessed itself.
-            next_level = 1
-            this_level = 0
-        cdef int increment = 1
-        if data.domain > 0 and root.domain != data.domain:
-            increment = 0
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
         spos[0] = pos[0] - sdds[0]/2.0
@@ -1150,13 +1153,12 @@
                          Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        cdef int any_children = 0
-        cdef int i, j, k
+        cdef int res
         if o == NULL: return 0
-        cdef int res
         res = self.base_selector.select_grid(left_edge, right_edge, level, o)
-        if res == 1 and o.domain != self.domain_id: return -1
-        return 1
+        if res != 0 and o.domain != self.domain_id:
+            res = -1
+        return res
 
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/ff8d85b3c294/
Changeset:   ff8d85b3c294
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-21 23:50:22
Summary:     Beginning conversion of RAMSES to distributed memory.
Affected #:  4 files

diff -r 3959497fa6a3f90e117788dcd5f570296387ad7e -r ff8d85b3c294be269af13380e3aa881c1df0adcc yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -53,7 +53,7 @@
         self.domain_id = domain.domain_id
         self.pf = domain.pf
         self.hierarchy = self.pf.hierarchy
-        self.oct_handler = domain.pf.h.oct_handler
+        self.oct_handler = domain.oct_handler
         self._last_mask = None
         self._last_selector_id = None
         self._current_particle_type = 'all'

diff -r 3959497fa6a3f90e117788dcd5f570296387ad7e -r ff8d85b3c294be269af13380e3aa881c1df0adcc yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -69,6 +69,7 @@
             setattr(self, "%s_fn" % t, basename % t)
         self._read_amr_header()
         self._read_particle_header()
+        self._read_amr()
 
     _hydro_offset = None
     _level_count = None
@@ -183,21 +184,26 @@
         self.amr_header = hvals
         self.amr_offset = f.tell()
         self.local_oct_count = hvals['numbl'][self.pf.min_level:, self.domain_id - 1].sum()
+        self.total_oct_count = hvals['numbl'][self.pf.min_level:,:].sum(axis=0)
 
-    def _read_amr(self, oct_handler):
+    def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
            For each oct, only the position, index, level and domain 
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
         """
+        self.oct_handler = RAMSESOctreeContainer(self.pf.domain_dimensions/2,
+                self.pf.domain_left_edge, self.pf.domain_right_edge)
+        root_nodes = self.amr_header['numbl'][self.pf.min_level,:].sum()
+        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
         fb = open(self.amr_fn, "rb")
         fb.seek(self.amr_offset)
         f = cStringIO.StringIO()
         f.write(fb.read())
         f.seek(0)
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
-            self.domain_id, self.local_oct_count, self.ngridbound.sum())
+            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
         def _ng(c, l):
             if c < self.amr_header['ncpu']:
                 ng = self.amr_header['numbl'][l, c]
@@ -236,26 +242,18 @@
                 #    rmap[:,i] = fpu.read_vector(f, "I")
                 # We don't want duplicate grids.
                 # Note that we're adding *grids*, not individual cells.
-                if level >= min_level and cpu + 1 >= self.domain_id: 
+                if level >= min_level:
                     assert(pos.shape[0] == ng)
                     if cpu + 1 == self.domain_id:
                         total += ng
-                    oct_handler.add(cpu + 1, level - min_level, ng, pos, 
-                                    self.domain_id)
+                    self.oct_handler.add(cpu + 1, level - min_level,
+                                         pos, self.domain_id)
+        self.oct_handler.finalize()
+        #raise RuntimeError
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
-
-    def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+    def included(self, selector):
+        domain_ids = self.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids
 
 class RAMSESDomainSubset(OctreeSubset):
 
@@ -314,21 +312,6 @@
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
         self.num_grids = total_octs
-        #this merely allocates space for the oct tree
-        #and nothing else
-        self.oct_handler = RAMSESOctreeContainer(
-            self.parameter_file.domain_dimensions/2,
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
-        mylog.debug("Allocating %s octs", total_octs)
-        self.oct_handler.allocate_domains(
-            [dom.local_oct_count #+ dom.ngridbound.sum()
-             for dom in self.domains])
-        #this actually reads every oct and loads it into the octree
-        for dom in self.domains:
-            dom._read_amr(self.oct_handler)
-        #for dom in self.domains:
-        #    self.oct_handler.check(dom.domain_id)
 
     def _detect_fields(self):
         # TODO: Add additional fields
@@ -347,11 +330,11 @@
         if getattr(dobj, "_chunk_info", None) is None:
             base_region = getattr(dobj, "base_region", dobj)
             # Note that domain_ids will be ONE INDEXED
-            domain_ids = self.oct_handler.domain_identify(dobj.selector)
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
             mylog.debug("Identified %s intersecting domains", len(domain_ids))
-            subsets = [RAMSESDomainSubset(base_region, self.domains[did - 1],
-                                          self.parameter_file)
-                       for did in domain_ids]
+            subsets = [RAMSESDomainSubset(base_region, domain, self.parameter_file)
+                       for domain in domains]
             dobj._chunk_info = subsets
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r 3959497fa6a3f90e117788dcd5f570296387ad7e -r ff8d85b3c294be269af13380e3aa881c1df0adcc yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -28,6 +28,7 @@
 from selection_routines cimport SelectorObject, \
     OctVisitorData, oct_visitor_function
 from oct_visitors cimport *
+from libc.stdlib cimport bsearch, qsort
 
 cdef int ORDER_MAX
 
@@ -65,6 +66,7 @@
     cdef public int nocts
     cdef public int max_domain
     cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef Oct *get_root(self, int ind[3])
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
@@ -76,6 +78,8 @@
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains
+    cdef Oct **root_nodes
+    cdef int num_root
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
 

diff -r 3959497fa6a3f90e117788dcd5f570296387ad7e -r ff8d85b3c294be269af13380e3aa881c1df0adcc yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -173,13 +173,16 @@
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0
 
+    cdef Oct *get_root(self, int ind[3]):
+        return self.root_mesh[ind[0]][ind[1]][ind[2]]
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef np.int64_t ind[3]
+        cdef int ind[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur
         cdef int i
@@ -187,7 +190,7 @@
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-        next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        next = self.get_root(ind)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
             cur = next
@@ -427,8 +430,86 @@
             return dest
         return data.index - offset
 
+cdef int root_key_compare(void *key, void *member) nogil:
+    cdef int i
+    cdef np.int64_t vkey = 0
+    cdef Oct *o = <Oct*>member
+    cdef np.int64_t fkey = (<np.int64_t *>key)[0]
+    for i in range(3):
+        vkey |= (o.pos[i] << 20 * (2 - i))
+    if vkey < fkey:
+        return -1
+    elif vkey == fkey:
+        return 0
+    else:
+        return 1
+
+cdef int root_node_compare(void *a, void *b) nogil:
+    cdef int i
+    cdef np.int64_t akey, bkey
+    cdef Oct *ao, *bo
+    ao = <Oct *>a
+    bo = <Oct *>b
+    for i in range(3):
+        akey |= (ao.pos[i] << 20 * (2 - i))
+        bkey |= (bo.pos[i] << 20 * (2 - i))
+    if akey < bkey:
+        return -1
+    elif akey == bkey:
+        return 0
+    else:
+        return 1
+
 cdef class RAMSESOctreeContainer(OctreeContainer):
 
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+        cdef int i, j, k, p
+        for i in range(3):
+            self.nn[i] = domain_dimensions[i]
+        self.max_domain = -1
+        self.nocts = 0 # Increment when initialized
+        self.root_mesh = NULL
+        self.root_nodes = NULL
+        self.num_root = 0
+        # We don't initialize the octs yet
+        for i in range(3):
+            self.DLE[i] = domain_left_edge[i] #0
+            self.DRE[i] = domain_right_edge[i] #num_grid
+
+    def __dealloc__(self):
+        free_octs(self.cont)
+
+    def finalize(self):
+        return
+
+    cdef Oct *get_root(self, int ind[3]):
+        cdef int i
+        cdef np.int64_t key = 0
+        for i in range(3):
+            key |= (ind[i] << 20 * (2 - i))
+        cdef Oct *o = <Oct *> bsearch(&key, self.root_nodes[0],
+                self.num_root, sizeof(Oct *), root_key_compare)
+        return o
+
+    @cython.cdivision(True)
+    cdef void visit_all_octs(self, SelectorObject selector,
+                        oct_visitor_function *func,
+                        OctVisitorData *data):
+        cdef int i, j, k, n
+        data.global_index = -1
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (self.DRE[i] - self.DLE[i]) / self.nn[i]
+        # Pos is the center of the octs
+        cdef Oct *o
+        for i in range(self.num_root):
+            o = self.root_nodes[i]
+            for j in range(3):
+                pos[0] = self.DLE[0] + (o.pos[0] + 0.5) * dds[0]
+                selector.recursively_visit_octs(
+                    o, pos, dds, 0, func, data)
+
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
@@ -448,18 +529,24 @@
         return cont.offset
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
-        cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        # We assume that 20 bits is enough for each index.
+        cdef int i
+        next = self.get_root(ind)
         if next != NULL: return next
+        # Otherwise, we'll have to insert and then qsort
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
         if cont.n_assigned >= cont.n: raise RuntimeError
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
-        self.root_mesh[ind[0]][ind[1]][ind[2]] = next
+        self.root_nodes[self.num_root] = next
+        self.num_root += 1
         next.parent = NULL
         next.level = 0
         for i in range(3):
             next.pos[i] = ind[i]
         self.nocts += 1
+        qsort(self.root_nodes, self.num_root, sizeof(Oct *),
+              root_node_compare)
         return next
 
     cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
@@ -477,7 +564,7 @@
         self.nocts += 1
         return next
 
-    def allocate_domains(self, domain_counts):
+    def allocate_domains(self, domain_counts, int root_nodes):
         cdef int count, i
         cdef OctAllocationContainer *cur = self.cont
         assert(cur == NULL)
@@ -488,20 +575,23 @@
             cur = allocate_octs(count, cur)
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
+        self.root_nodes = <Oct**> malloc(sizeof(Oct*) * root_nodes)
+        for i in range(root_nodes):
+            self.root_nodes[i] = NULL
         
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.
+        if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def add(self, int curdom, int curlevel, int ng,
+    def add(self, int curdom, int curlevel,
             np.ndarray[np.float64_t, ndim=2] pos,
-            int local_domain, int skip_boundary = 1):
+            int skip_boundary = 1):
         cdef int level, no, p, i, j, k, ind[3]
-        cdef int local = (local_domain == curdom)
         cdef Oct *cur, *next = NULL
         cdef np.float64_t pp[3], cp[3], dds[3]
         no = pos.shape[0] #number of octs
@@ -541,8 +631,7 @@
                 cur = self.next_child(curdom, ind, cur)
             # Now we should be at the right level
             cur.domain = curdom
-            if local == 1:
-                cur.file_ind = p
+            cur.file_ind = p
             cur.level = curlevel
         return cont.n_assigned - initial
 


https://bitbucket.org/yt_analysis/yt/commits/eafad7983797/
Changeset:   eafad7983797
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 02:27:43
Summary:     This allows the RAMSES oct container to run.
Affected #:  1 file

diff -r ff8d85b3c294be269af13380e3aa881c1df0adcc -r eafad7983797c4f5f7a1733b6e2a53f54dad0e60 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -212,7 +212,6 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
-        total = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -244,10 +243,7 @@
                 # Note that we're adding *grids*, not individual cells.
                 if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    if cpu + 1 == self.domain_id:
-                        total += ng
-                    self.oct_handler.add(cpu + 1, level - min_level,
-                                         pos, self.domain_id)
+                    self.oct_handler.add(cpu + 1, level - min_level, pos)
         self.oct_handler.finalize()
         #raise RuntimeError
 


https://bitbucket.org/yt_analysis/yt/commits/01540ae0e5fd/
Changeset:   01540ae0e5fd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 05:21:20
Summary:     Switching to using tsearch.  Still lingering issues.
Affected #:  3 files

diff -r eafad7983797c4f5f7a1733b6e2a53f54dad0e60 -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -245,7 +245,6 @@
                     assert(pos.shape[0] == ng)
                     self.oct_handler.add(cpu + 1, level - min_level, pos)
         self.oct_handler.finalize()
-        #raise RuntimeError
 
     def included(self, selector):
         domain_ids = self.oct_handler.domain_identify(selector)
@@ -459,7 +458,7 @@
         self.omega_lambda = rheader["omega_l"]
         self.omega_matter = rheader["omega_m"]
         self.hubble_constant = rheader["H0"] / 100.0 # This is H100
-        self.max_level = rheader['levelmax'] - rheader['levelmin']
+        self.max_level = rheader['levelmax'] - self.min_level
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r eafad7983797c4f5f7a1733b6e2a53f54dad0e60 -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -46,6 +46,10 @@
     Oct *children[2][2][2]
     Oct *parent
 
+cdef struct OctKey:
+    np.int64_t key
+    Oct *node
+
 cdef struct OctInfo:
     np.float64_t left_edge[3]
     np.float64_t dds[3]
@@ -65,8 +69,8 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
-    cdef Oct *get_root(self, int ind[3])
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef int get_root(self, int ind[3], Oct **o)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
@@ -78,8 +82,25 @@
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains
-    cdef Oct **root_nodes
+    cdef OctKey *root_nodes
+    cdef void *tree_root
     cdef int num_root
+    cdef int max_root
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
 
+cdef extern from "search.h" nogil:
+    void *tsearch(const void *key, void **rootp,
+                    int (*compar)(const void *, const void *))
+    void *tfind(const void *key, const void **rootp,
+                    int (*compar)(const void *, const void *))
+    void *tdelete(const void *key, void **rootp,
+                    int (*compar)(const void *, const void *))
+
+cdef inline np.int64_t oct_key(Oct *o):
+    cdef int i
+    if o.level != 0: return -1
+    cdef np.int64_t key = 0
+    for i in range(3):
+        key |= (o.pos[i] << 20 * (2 - i))
+    return key

diff -r eafad7983797c4f5f7a1733b6e2a53f54dad0e60 -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -173,8 +173,9 @@
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0
 
-    cdef Oct *get_root(self, int ind[3]):
-        return self.root_mesh[ind[0]][ind[1]][ind[2]]
+    cdef int get_root(self, int ind[3], Oct **o):
+        o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        return 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -184,13 +185,13 @@
         #refined oct at that time
         cdef int ind[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
-        cdef Oct *cur
+        cdef Oct *cur, *next
         cdef int i
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-        next = self.get_root(ind)
+        self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
             cur = next
@@ -430,32 +431,13 @@
             return dest
         return data.index - offset
 
-cdef int root_key_compare(void *key, void *member) nogil:
-    cdef int i
-    cdef np.int64_t vkey = 0
-    cdef Oct *o = <Oct*>member
-    cdef np.int64_t fkey = (<np.int64_t *>key)[0]
-    for i in range(3):
-        vkey |= (o.pos[i] << 20 * (2 - i))
-    if vkey < fkey:
+cdef int root_node_compare(void *a, void *b) nogil:
+    cdef OctKey *ao, *bo
+    ao = <OctKey *>a
+    bo = <OctKey *>b
+    if ao.key < bo.key:
         return -1
-    elif vkey == fkey:
-        return 0
-    else:
-        return 1
-
-cdef int root_node_compare(void *a, void *b) nogil:
-    cdef int i
-    cdef np.int64_t akey, bkey
-    cdef Oct *ao, *bo
-    ao = <Oct *>a
-    bo = <Oct *>b
-    for i in range(3):
-        akey |= (ao.pos[i] << 20 * (2 - i))
-        bkey |= (bo.pos[i] << 20 * (2 - i))
-    if akey < bkey:
-        return -1
-    elif akey == bkey:
+    elif ao.key == bo.key:
         return 0
     else:
         return 1
@@ -470,26 +452,29 @@
         self.nocts = 0 # Increment when initialized
         self.root_mesh = NULL
         self.root_nodes = NULL
+        self.tree_root = NULL
         self.num_root = 0
         # We don't initialize the octs yet
         for i in range(3):
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
 
-    def __dealloc__(self):
-        free_octs(self.cont)
-
     def finalize(self):
         return
 
-    cdef Oct *get_root(self, int ind[3]):
+    cdef int get_root(self, int ind[3], Oct **o):
+        o[0] = NULL
         cdef int i
         cdef np.int64_t key = 0
         for i in range(3):
             key |= (ind[i] << 20 * (2 - i))
-        cdef Oct *o = <Oct *> bsearch(&key, self.root_nodes[0],
-                self.num_root, sizeof(Oct *), root_key_compare)
-        return o
+        cdef OctKey okey, *oresult
+        okey.key = key
+        okey.node = NULL
+        oresult = <OctKey *> tfind(<void*>&okey,
+            &self.tree_root, root_node_compare)
+        if oresult != NULL:
+            o[0] = oresult.node
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
@@ -504,7 +489,7 @@
         # Pos is the center of the octs
         cdef Oct *o
         for i in range(self.num_root):
-            o = self.root_nodes[i]
+            o = self.root_nodes[i].node
             for j in range(3):
                 pos[0] = self.DLE[0] + (o.pos[0] + 0.5) * dds[0]
                 selector.recursively_visit_octs(
@@ -531,22 +516,23 @@
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         # We assume that 20 bits is enough for each index.
         cdef int i
-        next = self.get_root(ind)
+        cdef Oct *next
+        self.get_root(ind, &next)
         if next != NULL: return next
-        # Otherwise, we'll have to insert and then qsort
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
-        if cont.n_assigned >= cont.n: raise RuntimeError
+        if cont.n_assigned >= cont.n: return NULL
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
-        self.root_nodes[self.num_root] = next
-        self.num_root += 1
         next.parent = NULL
         next.level = 0
         for i in range(3):
             next.pos[i] = ind[i]
         self.nocts += 1
-        qsort(self.root_nodes, self.num_root, sizeof(Oct *),
-              root_node_compare)
+        self.root_nodes[self.num_root].key = oct_key(next)
+        self.root_nodes[self.num_root].node = next
+        tsearch(<void*>&self.root_nodes[self.num_root], &self.tree_root,
+                root_node_compare)
+        self.num_root += 1
         return next
 
     cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
@@ -575,13 +561,16 @@
             cur = allocate_octs(count, cur)
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
-        self.root_nodes = <Oct**> malloc(sizeof(Oct*) * root_nodes)
+        self.root_nodes = <OctKey*> malloc(sizeof(OctKey) * root_nodes)
+        self.max_root = root_nodes
         for i in range(root_nodes):
-            self.root_nodes[i] = NULL
+            self.root_nodes[i].key = -1
+            self.root_nodes[i].node = NULL
         
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.
+        free_octs(self.cont)
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
 
@@ -613,6 +602,7 @@
                     in_boundary = 1
             if skip_boundary == in_boundary == 1: continue
             cur = self.next_root(curdom, ind)
+            if cur == NULL: raise RuntimeError
             # Now we find the location we want
             # Note that RAMSES I think 1-findiceses levels, but we don't.
             for level in range(curlevel):


https://bitbucket.org/yt_analysis/yt/commits/fc1946874090/
Changeset:   fc1946874090
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 05:58:11
Summary:     Inserting and sorting now works for disconnecte RAMSES octree subsets.
Affected #:  3 files

diff -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 -r fc1946874090f5c420f98c457e83d80977c03ee4 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -327,7 +327,7 @@
             # Note that domain_ids will be ONE INDEXED
             domains = [dom for dom in self.domains if
                        dom.included(dobj.selector)]
-            mylog.debug("Identified %s intersecting domains", len(domain_ids))
+            mylog.debug("Identified %s intersecting domains", len(domains))
             subsets = [RAMSESDomainSubset(base_region, domain, self.parameter_file)
                        for domain in domains]
             dobj._chunk_info = subsets

diff -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 -r fc1946874090f5c420f98c457e83d80977c03ee4 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -96,11 +96,3 @@
                     int (*compar)(const void *, const void *))
     void *tdelete(const void *key, void **rootp,
                     int (*compar)(const void *, const void *))
-
-cdef inline np.int64_t oct_key(Oct *o):
-    cdef int i
-    if o.level != 0: return -1
-    cdef np.int64_t key = 0
-    for i in range(3):
-        key |= (o.pos[i] << 20 * (2 - i))
-    return key

diff -r 01540ae0e5fd94ff886d88616ade01c5e0f34f84 -r fc1946874090f5c420f98c457e83d80977c03ee4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -117,13 +117,13 @@
 
     def __dealloc__(self):
         free_octs(self.cont)
+        if self.root_mesh == NULL: return
         for i in range(self.nn[0]):
             for j in range(self.nn[1]):
                 if self.root_mesh[i][j] == NULL: continue
                 free(self.root_mesh[i][j])
             if self.root_mesh[i] == NULL: continue
             free(self.root_mesh[i])
-        if self.root_mesh == NULL: return
         free(self.root_mesh)
 
     def __iter__(self):
@@ -468,13 +468,13 @@
         cdef np.int64_t key = 0
         for i in range(3):
             key |= (ind[i] << 20 * (2 - i))
-        cdef OctKey okey, *oresult
+        cdef OctKey okey, **oresult
         okey.key = key
         okey.node = NULL
-        oresult = <OctKey *> tfind(<void*>&okey,
+        oresult = <OctKey **> tfind(<void*>&okey,
             &self.tree_root, root_node_compare)
         if oresult != NULL:
-            o[0] = oresult.node
+            o[0] = oresult[0].node
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
@@ -521,18 +521,22 @@
         if next != NULL: return next
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
         if cont.n_assigned >= cont.n: return NULL
+        if self.num_root >= self.max_root:
+            return NULL
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
         next.parent = NULL
         next.level = 0
+        cdef np.int64_t key = 0
+        cdef OctKey *ikey = &self.root_nodes[self.num_root]
         for i in range(3):
             next.pos[i] = ind[i]
+            key |= (ind[i] << 20 * (2 - i))
+        self.root_nodes[self.num_root].key = key
+        self.root_nodes[self.num_root].node = next
+        tsearch(<void*>ikey, &self.tree_root, root_node_compare)
+        self.num_root += 1
         self.nocts += 1
-        self.root_nodes[self.num_root].key = oct_key(next)
-        self.root_nodes[self.num_root].node = next
-        tsearch(<void*>&self.root_nodes[self.num_root], &self.tree_root,
-                root_node_compare)
-        self.num_root += 1
         return next
 
     cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
@@ -570,7 +574,6 @@
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.
-        free_octs(self.cont)
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
 


https://bitbucket.org/yt_analysis/yt/commits/b0f74baf53c3/
Changeset:   b0f74baf53c3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 06:02:29
Summary:     Minor fix to grid selection.
Affected #:  1 file

diff -r fc1946874090f5c420f98c457e83d80977c03ee4 -r b0f74baf53c3a667957cd09dbce56bf032362cd4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -491,9 +491,9 @@
         for i in range(self.num_root):
             o = self.root_nodes[i].node
             for j in range(3):
-                pos[0] = self.DLE[0] + (o.pos[0] + 0.5) * dds[0]
-                selector.recursively_visit_octs(
-                    o, pos, dds, 0, func, data)
+                pos[j] = self.DLE[j] + (o.pos[j] + 0.5) * dds[j]
+            selector.recursively_visit_octs(
+                o, pos, dds, 0, func, data)
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask


https://bitbucket.org/yt_analysis/yt/commits/f20de60b7af3/
Changeset:   f20de60b7af3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 15:22:11
Summary:     This change fixes things for O2 and above in dist-mem RAMSES.

Copying arrays is still broken, which is unfortunate since it's the exact thing
I was trying to address when I started down the path of breaking up the Octree
containers!
Affected #:  1 file

diff -r b0f74baf53c3a667957cd09dbce56bf032362cd4 -r f20de60b7af3f9a0c716187292450fccd72d7d07 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -467,7 +467,7 @@
         cdef int i
         cdef np.int64_t key = 0
         for i in range(3):
-            key |= (ind[i] << 20 * (2 - i))
+            key |= ((<np.int64_t>ind[i]) << 20 * (2 - i))
         cdef OctKey okey, **oresult
         okey.key = key
         okey.node = NULL
@@ -520,8 +520,11 @@
         self.get_root(ind, &next)
         if next != NULL: return next
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
-        if cont.n_assigned >= cont.n: return NULL
+        if cont.n_assigned >= cont.n:
+            print "Too many assigned."
+            return NULL
         if self.num_root >= self.max_root:
+            print "Too many roots."
             return NULL
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
@@ -531,7 +534,7 @@
         cdef OctKey *ikey = &self.root_nodes[self.num_root]
         for i in range(3):
             next.pos[i] = ind[i]
-            key |= (ind[i] << 20 * (2 - i))
+            key |= ((<np.int64_t>ind[i]) << 20 * (2 - i))
         self.root_nodes[self.num_root].key = key
         self.root_nodes[self.num_root].node = next
         tsearch(<void*>ikey, &self.tree_root, root_node_compare)


https://bitbucket.org/yt_analysis/yt/commits/01bc11c244dd/
Changeset:   01bc11c244dd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-22 16:10:55
Summary:     This should be signed so we can have it be -1 without trouble.
Affected #:  1 file

diff -r f20de60b7af3f9a0c716187292450fccd72d7d07 -r 01bc11c244ddcd4a8013219a4f9e6dbd63727ffa yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -29,7 +29,7 @@
 cdef struct OctVisitorData:
     np.uint64_t index
     np.uint64_t last
-    np.uint64_t global_index
+    np.int64_t global_index
     int ind[3]
     void *array
     int dims


https://bitbucket.org/yt_analysis/yt/commits/c8f4e92a99e5/
Changeset:   c8f4e92a99e5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-23 20:58:29
Summary:     Domain checking should only show up in the visitation function.
Affected #:  2 files

diff -r 01bc11c244ddcd4a8013219a4f9e6dbd63727ffa -r c8f4e92a99e5c981e6cbf32e08486de472d20b75 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -36,7 +36,6 @@
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
-    if data.domain > 0 and o.domain != data.domain: return
     cdef int i
     # There are this many records between "octs"
     cdef np.int64_t index = (data.global_index * 8)*data.dims
@@ -50,7 +49,6 @@
     # We should always have global_index less than our source.
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
-    if data.domain > 0 and o.domain != data.domain: return
     cdef int i
     cdef np.int64_t index = (data.global_index * 8)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
@@ -62,7 +60,6 @@
 cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.
     # Number of *octs* visited.
-    if data.domain > 0 and o.domain != data.domain: return
     if data.last != o.domain_ind:
         data.index += 1
         data.last = o.domain_ind
@@ -70,7 +67,6 @@
 cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Count even if not selected.
     # Number of *octs* visited.
-    if data.domain > 0 and o.domain != data.domain: return
     data.index += selected
 
 cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -96,7 +92,6 @@
     # Note that we provide an index even if the cell is not selected.
     cdef int i
     cdef np.int64_t *arr
-    if data.domain > 0 and data.domain != o.domain: return
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         arr = <np.int64_t *> data.array

diff -r 01bc11c244ddcd4a8013219a4f9e6dbd63727ffa -r c8f4e92a99e5c981e6cbf32e08486de472d20b75 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -207,9 +207,9 @@
             next_level = 1
             this_level = 0
             increment = 0
-        if level == self.max_level:
+        elif level == self.max_level:
             next_level = 0
-        if level < self.min_level or level > self.max_level:
+        elif level < self.min_level or level > self.max_level:
             this_level = 0
         if res == 0 and this_level == 1:
             return
@@ -227,9 +227,9 @@
                         self.recursively_visit_octs(
                             ch, spos, sdds, level + 1, func, data)
                     elif this_level == 1:
+                        selected = self.select_cell(spos, sdds, eterm)
                         data.global_index += increment
                         increment = 0
-                        selected = self.select_cell(spos, sdds, eterm)
                         data.ind[0] = i
                         data.ind[1] = j
                         data.ind[2] = k
@@ -1110,11 +1110,9 @@
 grid_selector = GridSelector
 
 cdef class OctreeSubsetSelector(SelectorObject):
-    cdef int domain_id
     cdef SelectorObject base_selector
 
     def __init__(self, dobj):
-        self.domain_id = dobj.domain.domain_id
         self.base_selector = dobj.base_selector
 
     @cython.boundscheck(False)
@@ -1153,12 +1151,7 @@
                          Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        cdef int res
-        if o == NULL: return 0
-        res = self.base_selector.select_grid(left_edge, right_edge, level, o)
-        if res != 0 and o.domain != self.domain_id:
-            res = -1
-        return res
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
 
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/c841f83d89f4/
Changeset:   c841f83d89f4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 16:00:38
Summary:     Ditch the 'size' for spatial chunking.
Affected #:  1 file

diff -r c8f4e92a99e5c981e6cbf32e08486de472d20b75 -r c841f83d89f4b0964d879e1f4487a541571b1a0b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -344,9 +344,7 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            yield YTDataChunk(dobj, "spatial", [g], None)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)


https://bitbucket.org/yt_analysis/yt/commits/2089e4129601/
Changeset:   2089e4129601
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 16:53:57
Summary:     First step toward partial coverage of domains/octs for RAMSES.
Affected #:  7 files

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -101,7 +101,7 @@
     @property
     def domain_ind(self):
         if self._domain_ind is None:
-            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            di = self.oct_handler.domain_ind(self.selector)
             self._domain_ind = di
         return self._domain_ind
 
@@ -188,11 +188,3 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
-    _domain_ind = None
-
-    @property
-    def domain_ind(self):
-        if self._domain_ind is None:
-            di = self.oct_handler.domain_ind(self.selector)
-            self._domain_ind = di
-        return self._domain_ind

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -243,7 +243,8 @@
                 # Note that we're adding *grids*, not individual cells.
                 if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    self.oct_handler.add(cpu + 1, level - min_level, pos)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos)
+                    assert(n == ng)
         self.oct_handler.finalize()
 
     def included(self, selector):

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -65,6 +65,7 @@
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
     cdef Oct ****root_mesh
+    cdef int partial_coverage
     cdef int nn[3]
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -97,6 +97,7 @@
 
     def __init__(self, oct_domain_dimensions, domain_left_edge, domain_right_edge):
         # This will just initialize the root mesh octs
+        self.partial_coverage = 0
         cdef int i, j, k, p
         for i in range(3):
             self.nn[i] = oct_domain_dimensions[i]
@@ -143,7 +144,8 @@
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
                         OctVisitorData *data):
-        cdef int i, j, k, n
+        cdef int i, j, k, n, vc
+        vc = self.partial_coverage
         data.global_index = -1
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
@@ -159,7 +161,7 @@
                     if self.root_mesh[i][j][k] == NULL: continue
                     selector.recursively_visit_octs(
                         self.root_mesh[i][j][k],
-                        pos, dds, 0, func, data)
+                        pos, dds, 0, func, data, vc)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
@@ -314,7 +316,7 @@
     def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef OctVisitorData data
@@ -330,7 +332,7 @@
     def ires(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id) * 8
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(num_cells, dtype="int64")
@@ -347,7 +349,7 @@
     def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id) * 8
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
         fwidth = np.empty((num_cells, 3), dtype="float64")
         cdef OctVisitorData data
@@ -367,7 +369,7 @@
     def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id) * 8
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
@@ -392,14 +394,17 @@
         # This is actually not correct.  The hard part is that we need to
         # iterate the same way visit_all_octs does, but we need to track the
         # number of octs total visited.
-        cdef np.int64_t num_cells = -1
+        cdef np.int64_t num_octs = -1
         if dest is None:
-            num_cells = selector.count_octs(self, domain_id)
+            # Note that RAMSES can have partial refinement inside an Oct.  This
+            # means we actually do want the number of Octs, not the number of
+            # cells.
+            num_cells = selector.count_oct_cells(self, domain_id)
             if dims > 1:
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
             else:
-                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
+                dest = np.zeros(num_cells * 8, dtype=source.dtype, order='C')
         cdef OctVisitorData data
         data.index = offset
         data.domain = domain_id
@@ -422,6 +427,7 @@
         if (data.global_index + 1) * 8 * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
             print (data.global_index + 1) * 8 * data.dims - source.size
+            print dest.size, source.size, num_cells
             raise RuntimeError
         if data.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
@@ -446,6 +452,7 @@
 
     def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
         cdef int i, j, k, p
+        self.partial_coverage = 1
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1
@@ -480,8 +487,9 @@
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
                         OctVisitorData *data):
-        cdef int i, j, k, n
+        cdef int i, j, k, n, vc
         data.global_index = -1
+        vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -65,8 +65,7 @@
         data.last = o.domain_ind
 
 cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Count even if not selected.
-    # Number of *octs* visited.
+    # Number of *cells* visited and selected.
     data.index += selected
 
 cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -46,7 +46,8 @@
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int visit_covered = ?)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r c841f83d89f4b0964d879e1f4487a541571b1a0b -r 2089e412960103a0ca76966220d285eac65a1f32 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -166,6 +166,13 @@
         cdef OctVisitorData data
         data.index = 0
         data.domain = domain_id
+        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
+        return data.index
+
+    def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
+        cdef OctVisitorData data
+        data.index = 0
+        data.domain = domain_id
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
@@ -176,7 +183,8 @@
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level, 
                         oct_visitor_function *func,
-                        OctVisitorData *data):
+                        OctVisitorData *data,
+                        int visit_covered = 0):
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
         cdef int i, j, k, res, ii
         cdef Oct *ch
@@ -206,7 +214,6 @@
             # not get accessed itself.
             next_level = 1
             this_level = 0
-            increment = 0
         elif level == self.max_level:
             next_level = 0
         elif level < self.min_level or level > self.max_level:
@@ -223,7 +230,7 @@
                 for k in range(2):
                     ii = ((k*2)+j)*2+i
                     ch = root.children[i][j][k]
-                    if next_level == 1 and ch != NULL:
+                    if visit_covered == 0 and next_level == 1 and ch != NULL:
                         self.recursively_visit_octs(
                             ch, spos, sdds, level + 1, func, data)
                     elif this_level == 1:
@@ -237,6 +244,11 @@
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]
             spos[0] += sdds[0]
+        if visit_covered == 1:
+            # On our first pass through, we always only look at the current
+            # level.  So we make a second pass to go downwards.
+            self.recursively_visit_octs(root, pos, dds, level, func,
+                                        data, 0)
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],


https://bitbucket.org/yt_analysis/yt/commits/0421a6418d61/
Changeset:   0421a6418d61
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 20:23:10
Summary:     Try another tactic for filling partially-covered Octrees.
Affected #:  2 files

diff -r 2089e412960103a0ca76966220d285eac65a1f32 -r 0421a6418d618c59f9e065a6666376b4d1eb4a4d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -316,7 +316,7 @@
     def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_oct_cells(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id) * 8
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef OctVisitorData data
@@ -404,7 +404,7 @@
                 dest = np.zeros((num_cells, dims), dtype=source.dtype,
                     order='C')
             else:
-                dest = np.zeros(num_cells * 8, dtype=source.dtype, order='C')
+                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
         cdef OctVisitorData data
         data.index = offset
         data.domain = domain_id
@@ -501,7 +501,7 @@
             for j in range(3):
                 pos[j] = self.DLE[j] + (o.pos[j] + 0.5) * dds[j]
             selector.recursively_visit_octs(
-                o, pos, dds, 0, func, data)
+                o, pos, dds, 0, func, data, vc)
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask

diff -r 2089e412960103a0ca76966220d285eac65a1f32 -r 0421a6418d618c59f9e065a6666376b4d1eb4a4d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -185,6 +185,10 @@
                         oct_visitor_function *func,
                         OctVisitorData *data,
                         int visit_covered = 0):
+        # visit_covered tells us whether this octree supports partial
+        # refinement.  If it does, we need to handle this specially -- first
+        # we visit *this* oct, then we make a second pass to check any child
+        # octs.
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
         cdef int i, j, k, res, ii
         cdef Oct *ch
@@ -222,33 +226,33 @@
             return
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
-        spos[0] = pos[0] - sdds[0]/2.0
-        for i in range(2):
-            spos[1] = pos[1] - sdds[1]/2.0
-            for j in range(2):
-                spos[2] = pos[2] - sdds[2]/2.0
-                for k in range(2):
-                    ii = ((k*2)+j)*2+i
-                    ch = root.children[i][j][k]
-                    if visit_covered == 0 and next_level == 1 and ch != NULL:
-                        self.recursively_visit_octs(
-                            ch, spos, sdds, level + 1, func, data)
-                    elif this_level == 1:
-                        selected = self.select_cell(spos, sdds, eterm)
-                        data.global_index += increment
-                        increment = 0
-                        data.ind[0] = i
-                        data.ind[1] = j
-                        data.ind[2] = k
-                        func(root, data, selected)
-                    spos[2] += sdds[2]
-                spos[1] += sdds[1]
-            spos[0] += sdds[0]
-        if visit_covered == 1:
-            # On our first pass through, we always only look at the current
-            # level.  So we make a second pass to go downwards.
-            self.recursively_visit_octs(root, pos, dds, level, func,
-                                        data, 0)
+        cdef int iter = 1 - visit_covered # 2 if 1, 1 if 0.
+        while iter < 2:
+            spos[0] = pos[0] - sdds[0]/2.0
+            for i in range(2):
+                spos[1] = pos[1] - sdds[1]/2.0
+                for j in range(2):
+                    spos[2] = pos[2] - sdds[2]/2.0
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        ch = root.children[i][j][k]
+                        if iter == 1 and next_level == 1 and ch != NULL:
+                            self.recursively_visit_octs(
+                                ch, spos, sdds, level + 1, func, data,
+                                visit_covered)
+                        elif this_level == 1:
+                            selected = self.select_cell(spos, sdds, eterm)
+                            data.global_index += increment
+                            increment = 0
+                            data.ind[0] = i
+                            data.ind[1] = j
+                            data.ind[2] = k
+                            func(root, data, selected)
+                        spos[2] += sdds[2]
+                    spos[1] += sdds[1]
+                spos[0] += sdds[0]
+            this_level = 0 # We turn this off for the second pass.
+            iter += 1
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],


https://bitbucket.org/yt_analysis/yt/commits/6a4182f19d47/
Changeset:   6a4182f19d47
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 20:31:03
Summary:     Adding a _domain_offset, like _id_offset for grids.
Affected #:  3 files

diff -r 0421a6418d618c59f9e065a6666376b4d1eb4a4d -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -45,6 +45,7 @@
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
+    _domain_offset = 0
 
     def __init__(self, base_region, domain, pf):
         self.field_data = YTFieldData()
@@ -113,7 +114,8 @@
         nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields, 0)
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset)
         vals = op.finalize()
         return np.asfortranarray(vals)
 

diff -r 0421a6418d618c59f9e065a6666376b4d1eb4a4d -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -253,6 +253,8 @@
 
 class RAMSESDomainSubset(OctreeSubset):
 
+    _domain_offset = 1
+
     def fill(self, content, fields):
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.

diff -r 0421a6418d618c59f9e065a6666376b4d1eb4a4d -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -50,7 +50,8 @@
     def process_octree(self, OctreeContainer octree,
                      np.ndarray[np.int64_t, ndim=1] dom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
-                     fields = None, int domain_id = -1):
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0):
         cdef int nf, i, j
         self.bad_indices = 0
         if fields is None:
@@ -68,7 +69,7 @@
         cdef OctInfo oi
         cdef np.int64_t offset, moff
         cdef Oct *oct
-        moff = octree.get_domain_offset(domain_id)
+        moff = octree.get_domain_offset(domain_id + domain_offset)
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
             for j in range(nf):


https://bitbucket.org/yt_analysis/yt/commits/5dcf99cf0493/
Changeset:   5dcf99cf0493
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 20:55:24
Summary:     Adding AlwaysSelector and fixing a few things from the multi/single domain
transition.
Affected #:  6 files

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -33,6 +33,7 @@
     OctreeContainer, ORDER_MAX
 from selection_routines cimport SelectorObject, \
     OctVisitorData, oct_visitor_function
+import selection_routines
 cimport oct_visitors
 cimport cython
 
@@ -188,6 +189,7 @@
         cdef int ind[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
+        cur = next = NULL
         cdef int i
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
@@ -467,7 +469,12 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
 
     def finalize(self):
-        return
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        data.index = 0
+
+        self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
+        assert ((data.global_index+1)*8 == data.index)
 
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
@@ -518,8 +525,7 @@
         return domain_ids
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
-        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
-        return cont.offset
+        return 0 # We no longer have a domain offset.
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         # We assume that 20 bits is enough for each index.

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -41,6 +41,7 @@
 cdef oct_visitor_function copy_array_f64
 cdef oct_visitor_function copy_array_i64
 cdef oct_visitor_function identify_octs
+cdef oct_visitor_function assign_domain_ind
 
 cdef inline int oind(OctVisitorData *data):
     return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -147,3 +147,7 @@
     if selected == 0: return
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
     arr[o.domain - 1] = 1
+
+cdef void assign_domain_ind(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    o.domain_ind = data.global_index
+    data.index += 1

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -67,8 +67,9 @@
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 2
         cdef OctInfo oi
-        cdef np.int64_t offset, moff
+        cdef np.int64_t offset, moff, missed = 0
         cdef Oct *oct
+        cdef np.int64_t numpart = positions.shape[0]
         moff = octree.get_domain_offset(domain_id + domain_offset)
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
@@ -83,6 +84,9 @@
             # full octree structure.  All we *really* care about is some
             # arbitrary offset into a field value for deposition.
             oct = octree.get(pos, &oi)
+            if oct == NULL:
+                missed += 1
+                continue
             # This next line is unfortunate.  Basically it says, sometimes we
             # might have particles that belong to octs outside our domain.
             if oct.domain != domain_id: continue
@@ -92,6 +96,9 @@
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
+        if missed > 0:
+            print "MISSED %s out of %s on domain %s" % (
+                missed, positions.shape[0], domain_id)
         
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -56,4 +56,3 @@
     cdef void set_bounds(self,
                          np.float64_t left_edge[3], np.float64_t right_edge[3],
                          np.float64_t dds[3], int ind[3][2], int *check)
-

diff -r 6a4182f19d47a1fdd4f977d7490773851587d8d5 -r 5dcf99cf04933f874b840409288051360cdd5899 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1223,3 +1223,47 @@
 
 particle_octree_subset_selector = ParticleOctreeSubsetSelector
 
+cdef class AlwaysSelector(SelectorObject):
+
+    def __init__(self, dobj):
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_octs(self, OctreeContainer octree):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        check[0] = 0
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        cdef int ng = left_edges.shape[0]
+        cdef np.ndarray[np.uint8_t, ndim=1] gridi = np.ones(ng, dtype='uint8')
+        return gridi.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        return 1
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        return 1
+
+always_selector = AlwaysSelector


https://bitbucket.org/yt_analysis/yt/commits/cdf4fdbb839d/
Changeset:   cdf4fdbb839d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 21:37:54
Summary:     Enable sub-domain on sub-domain selection.

RAMSES is now working as before for particle deposition.  Baryon field
deposition will require further work.
Affected #:  5 files

diff -r 5dcf99cf04933f874b840409288051360cdd5899 -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,6 +36,7 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
@@ -114,6 +115,8 @@
         nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
+        mylog.debug("Depositing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
             self.domain_id, self._domain_offset)
         vals = op.finalize()

diff -r 5dcf99cf04933f874b840409288051360cdd5899 -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -248,6 +248,8 @@
         self.oct_handler.finalize()
 
     def included(self, selector):
+        if getattr(selector, "domain_id", -1) == self.domain_id:
+            return [self.domain_id]
         domain_ids = self.oct_handler.domain_identify(selector)
         return self.domain_id in domain_ids
 
@@ -326,11 +328,11 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            base_region = getattr(dobj, "base_region", dobj)
-            # Note that domain_ids will be ONE INDEXED
             domains = [dom for dom in self.domains if
                        dom.included(dobj.selector)]
-            mylog.debug("Identified %s intersecting domains", len(domains))
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
             subsets = [RAMSESDomainSubset(base_region, domain, self.parameter_file)
                        for domain in domains]
             dobj._chunk_info = subsets

diff -r 5dcf99cf04933f874b840409288051360cdd5899 -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -396,7 +396,7 @@
         # This is actually not correct.  The hard part is that we need to
         # iterate the same way visit_all_octs does, but we need to track the
         # number of octs total visited.
-        cdef np.int64_t num_octs = -1
+        cdef np.int64_t num_cells = -1
         if dest is None:
             # Note that RAMSES can have partial refinement inside an Oct.  This
             # means we actually do want the number of Octs, not the number of

diff -r 5dcf99cf04933f874b840409288051360cdd5899 -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -67,7 +67,7 @@
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 2
         cdef OctInfo oi
-        cdef np.int64_t offset, moff, missed = 0
+        cdef np.int64_t offset, moff
         cdef Oct *oct
         cdef np.int64_t numpart = positions.shape[0]
         moff = octree.get_domain_offset(domain_id + domain_offset)
@@ -84,21 +84,23 @@
             # full octree structure.  All we *really* care about is some
             # arbitrary offset into a field value for deposition.
             oct = octree.get(pos, &oi)
-            if oct == NULL:
-                missed += 1
-                continue
             # This next line is unfortunate.  Basically it says, sometimes we
             # might have particles that belong to octs outside our domain.
-            if oct.domain != domain_id: continue
+            # For the distributed-memory octrees, this will manifest as a NULL
+            # oct.  For the non-distributed memory octrees, we'll simply see
+            # this as a domain_id that is not the current domain id.  Note that
+            # this relies on the idea that all the particles in a region are
+            # all fed to sequential domain subsets, which will not be true with
+            # RAMSES, where we *will* miss particles that live in ghost
+            # regions on other processors.  Addressing this is on the TODO
+            # list.
+            if oct == NULL or oct.domain != domain_id: continue
             # Note that this has to be our local index, not our in-file index.
             offset = dom_ind[oct.domain_ind - moff] * 8
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
-        if missed > 0:
-            print "MISSED %s out of %s on domain %s" % (
-                missed, positions.shape[0], domain_id)
         
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 5dcf99cf04933f874b840409288051360cdd5899 -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1127,9 +1127,11 @@
 
 cdef class OctreeSubsetSelector(SelectorObject):
     cdef SelectorObject base_selector
+    cdef public np.int64_t domain_id
 
     def __init__(self, dobj):
         self.base_selector = dobj.base_selector
+        self.domain_id = dobj.domain_id
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1167,7 +1169,11 @@
                          Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        return self.base_selector.select_grid(left_edge, right_edge, level, o)
+        cdef int res
+        res = self.base_selector.select_grid(left_edge, right_edge, level, o)
+        if res == 1 and o != NULL and o.domain != self.domain_id:
+            return -1
+        return res
 
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/0f5a5c53607b/
Changeset:   0f5a5c53607b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 21:43:11
Summary:     This line, which was *incorrect*, actually doubled runtime.

It's now fixed to be correct *and* faster.
Affected #:  1 file

diff -r cdf4fdbb839d408c05f7c80b17b583c32f5f3ae4 -r 0f5a5c53607b7e1d221551c0e000bf2027e2589b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -248,8 +248,8 @@
         self.oct_handler.finalize()
 
     def included(self, selector):
-        if getattr(selector, "domain_id", -1) == self.domain_id:
-            return [self.domain_id]
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
         domain_ids = self.oct_handler.domain_identify(selector)
         return self.domain_id in domain_ids
 


https://bitbucket.org/yt_analysis/yt/commits/4d5fcc4699c5/
Changeset:   4d5fcc4699c5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 21:47:12
Summary:     Starting to remove obsolete routines.
Affected #:  1 file

diff -r 0f5a5c53607b7e1d221551c0e000bf2027e2589b -r 4d5fcc4699c538125574d861c913557d0fb8102c yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -141,27 +141,6 @@
                 gridi[n] = self.select_grid(LE, RE, levels[n,0])
         return gridi.astype("bool")
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def select_octs(self, OctreeContainer octree):
-        # There has to be a better way to do this.
-        raise RuntimeError
-        cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        data.global_index = -1
-        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
-                np.zeros((2, 2, 2, data.index), 'uint8', order='C')
-        # This is where we'll -- in the future -- cut up based on indices of
-        # the octs.
-        data.index = -1
-        data.last = -1
-        data.array = m2.data
-        octree.visit_all_octs(self, oct_visitors.mask_octs, &data)
-        return m2.astype("bool")
-
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
         data.index = 0
@@ -1136,12 +1115,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def select_octs(self, OctreeContainer octree):
-        raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void set_bounds(self,
                          np.float64_t left_edge[3], np.float64_t right_edge[3],
                          np.float64_t dds[3], int ind[3][2], int *check):
@@ -1191,13 +1164,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def select_octs(self, OctreeContainer octree):
-        # There has to be a better way to do this.
-        raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void set_bounds(self,
                          np.float64_t left_edge[3], np.float64_t right_edge[3],
                          np.float64_t dds[3], int ind[3][2], int *check):
@@ -1237,12 +1203,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def select_octs(self, OctreeContainer octree):
-        raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef void set_bounds(self,
                          np.float64_t left_edge[3], np.float64_t right_edge[3],
                          np.float64_t dds[3], int ind[3][2], int *check):


https://bitbucket.org/yt_analysis/yt/commits/40184b22d3e4/
Changeset:   40184b22d3e4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 22:06:26
Summary:     Change static allocation of 8 pointers to dynamic allocation on Octs.
Affected #:  6 files

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -27,6 +27,7 @@
 
 from libc.stdlib cimport malloc, free, rand, RAND_MAX
 cimport numpy as np
+from oct_visitors cimport cind
 import numpy as np
 cimport cython
 
@@ -68,7 +69,7 @@
                     long max_noct, long max_level, float fsubdivide,
                     np.ndarray[np.uint8_t, ndim=2] mask):
     print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
-    cdef int ddr[3]
+    cdef int ddr[3], ii
     cdef long i,j,k
     cdef float rf #random float from 0-1
     if cur_level >= max_level: 
@@ -80,7 +81,8 @@
         ddr[i] = 2
     rf = rand() * 1.0 / RAND_MAX
     if rf > fsubdivide:
-        if parent.children[ind[0]][ind[1]][ind[2]] == NULL:
+        ii = cind(ind[0], ind[1], ind[2])
+        if parent.children[ii] == NULL:
             cur_leaf += 7 
         oct = oct_handler.next_child(1, ind, parent)
         oct.domain = 1

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -43,7 +43,7 @@
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
-    Oct *children[2][2][2]
+    Oct **children          # Up to 8 long
     Oct *parent
 
 cdef struct OctKey:

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -35,6 +35,7 @@
     OctVisitorData, oct_visitor_function
 import selection_routines
 cimport oct_visitors
+from oct_visitors cimport cind
 cimport cython
 
 ORDER_MAX = 20
@@ -66,10 +67,7 @@
         oct.file_ind = oct.domain = -1
         oct.domain_ind = n + n_cont.offset
         oct.level = -1
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    oct.children[i][j][k] = NULL
+        oct.children = NULL
     if prev != NULL:
         prev.next = n_cont
     n_cont.next = NULL
@@ -80,6 +78,9 @@
     cdef OctAllocationContainer *cur
     while first != NULL:
         cur = first
+        for i in range(cur.n):
+            if cur.my_octs[i].children != NULL:
+                free(cur.my_octs[i].children)
         free(first.my_octs)
         first = cur.next
         free(cur)
@@ -207,7 +208,10 @@
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
-            next = cur.children[ind[0]][ind[1]][ind[2]]
+            if cur.children != NULL:
+                next = cur.children[cind(ind[0],ind[1],ind[2])]
+            else:
+                next = NULL
         if oinfo == NULL: return cur
         for i in range(3):
             # This will happen *after* we quit out, so we need to back out the
@@ -285,8 +289,10 @@
                         dl = o.level - (candidate.level + 1)
                         for i in range(3):
                             ind[i] = (npos[i] >> dl) & 1
-                        if candidate.children[0][0][0] == NULL: break
-                        candidate = candidate.children[ind[0]][ind[1]][ind[2]]
+                        if candidate.children[cind(ind[0],ind[1],ind[2])] \
+                                == NULL:
+                            break
+                        candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
                     neighbors[nn] = candidate
                     nn += 1
 
@@ -557,13 +563,20 @@
         return next
 
     cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
-        cdef Oct *next = parent.children[ind[0]][ind[1]][ind[2]]
+        cdef int i
+        cdef Oct *next = NULL
+        if parent.children != NULL:
+            next = parent.children[cind(ind[0],ind[1],ind[2])]
+        else:
+            parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                parent.children[i] = NULL
         if next != NULL: return next
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
         if cont.n_assigned >= cont.n: raise RuntimeError
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
-        parent.children[ind[0]][ind[1]][ind[2]] = next
+        parent.children[cind(ind[0],ind[1],ind[2])] = next
         next.parent = parent
         next.level = parent.level + 1
         for i in range(3):

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -43,6 +43,9 @@
 cdef oct_visitor_function identify_octs
 cdef oct_visitor_function assign_domain_ind
 
+cdef inline int cind(int i, int j, int k):
+    return (((i*2)+j)*2+k)
+
 cdef inline int oind(OctVisitorData *data):
     return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
 

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -27,6 +27,7 @@
 
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX
 cimport oct_visitors
+from oct_visitors cimport cind
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
@@ -75,8 +76,10 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    if o.children[i][j][k] == NULL: continue
-                    self.visit_free(o.children[i][j][k])
+                    if o.children != NULL \
+                       and o.children[cind(i,j,k)] != NULL:
+                        self.visit_free(o.children[cind(i,j,k)])
+        free(o.children)
         free(o)
 
     def clear_fileind(self):
@@ -93,8 +96,9 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    if o.children[i][j][k] == NULL: continue
-                    self.visit_clear(o.children[i][j][k])
+                    if o.children != NULL \
+                       and o.children[cind(i,j,k)] != NULL:
+                        self.visit_clear(o.children[cind(i,j,k)])
 
     def __iter__(self):
         #Get the next oct, will traverse domains
@@ -139,8 +143,9 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    if o.children[i][j][k] != NULL:
-                        self.visit_assign(o.children[i][j][k], lpos)
+                    if o.children != NULL \
+                       and o.children[cind(i,j,k)] != NULL:
+                        self.visit_assign(o.children[cind(i,j,k)], lpos)
         return
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
@@ -158,10 +163,7 @@
         my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    my_oct.children[i][j][k] = NULL
+        my_oct.children = NULL
         my_oct.parent = NULL
         return my_oct
 
@@ -191,11 +193,12 @@
                 level += 1
                 for i in range(3):
                     ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
-                if cur.children[ind[0]][ind[1]][ind[2]] == NULL:
+                if cur.children == NULL or \
+                   cur.children[cind(ind[0],ind[1],ind[2])] == NULL:
                     cur = self.refine_oct(cur, index)
                     self.filter_particles(cur, data, p)
                 else:
-                    cur = cur.children[ind[0]][ind[1]][ind[2]]
+                    cur = cur.children[cind(ind[0],ind[1],ind[2])]
             cur.file_ind += 1
 
     @cython.boundscheck(False)
@@ -208,6 +211,7 @@
         cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
         cdef np.uint64_t prefix1, prefix2
+        o.children = <Oct **> malloc(sizeof(Oct *)*8)
         for i in range(2):
             for j in range(2):
                 for k in range(2):
@@ -219,11 +223,11 @@
                     noct.pos[1] = (o.pos[1] << 1) + j
                     noct.pos[2] = (o.pos[2] << 1) + k
                     noct.parent = o
-                    o.children[i][j][k] = noct
+                    o.children[cind(i,j,k)] = noct
         o.file_ind = self.n_ref + 1
         for i in range(3):
             ind[i] = (index >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
-        noct = o.children[ind[0]][ind[1]][ind[2]]
+        noct = o.children[cind(ind[0],ind[1],ind[2])]
         return noct
 
     cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p):
@@ -262,8 +266,9 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    if o.children[i][j][k] != NULL:
-                        self.visit(o.children[i][j][k], counts, level + 1)
+                    if o.children != NULL \
+                       and o.children[cind(i,j,k)] != NULL:
+                        self.visit(o.children[cind(i,j,k)], counts, level + 1)
         return
 
 cdef class ParticleRegions:

diff -r 4d5fcc4699c538125574d861c913557d0fb8102c -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -31,6 +31,7 @@
 from selection_routines cimport SelectorObject
 from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
 cimport oct_visitors
+from oct_visitors cimport cind
 #from geometry_utils cimport point_to_hilbert
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
@@ -214,7 +215,9 @@
                     spos[2] = pos[2] - sdds[2]/2.0
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        ch = root.children[i][j][k]
+                        ch = NULL
+                        if root.children != NULL:
+                            ch = root.children[cind(i,j,k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
                             self.recursively_visit_octs(
                                 ch, spos, sdds, level + 1, func, data,


https://bitbucket.org/yt_analysis/yt/commits/ae57aa843411/
Changeset:   ae57aa843411
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 22:32:05
Summary:     Removing 'parent' attribute, which will require cleverness to get the neighbors.
Affected #:  3 files

diff -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -37,14 +37,11 @@
     np.int64_t file_ind     # index with respect to the order in which it was
                             # added
     np.int64_t domain_ind   # index within the global set of domains
-                            # note that moving to a local index will require
-                            # moving to split-up masks, which is part of a
-                            # bigger refactor
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
     Oct **children          # Up to 8 long
-    Oct *parent
+    #Oct *parent
 
 cdef struct OctKey:
     np.int64_t key

diff -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -63,7 +63,7 @@
     n_cont.n_assigned = 0
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
-        oct.parent = NULL
+        #oct.parent = NULL
         oct.file_ind = oct.domain = -1
         oct.domain_ind = n + n_cont.offset
         oct.level = -1
@@ -278,7 +278,10 @@
                             curnpos[i] = (curnpos[i] >> 1)
                         # Now we update to the candidate's parent, which should
                         # have a matching position to curopos[]
-                        candidate = candidate.parent
+                        # TODO: This has not survived the transition to
+                        # mostly-stateless Octs!
+                        raise RuntimeError
+                        #candidate = candidate.parent
                     if candidate == NULL:
                         # Worst case scenario
                         for i in range(3):
@@ -548,7 +551,7 @@
             return NULL
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
-        next.parent = NULL
+        #next.parent = NULL
         next.level = 0
         cdef np.int64_t key = 0
         cdef OctKey *ikey = &self.root_nodes[self.num_root]
@@ -577,7 +580,7 @@
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
         parent.children[cind(ind[0],ind[1],ind[2])] = next
-        next.parent = parent
+        #next.parent = parent
         next.level = parent.level + 1
         for i in range(3):
             next.pos[i] = ind[i] + (parent.pos[i] << 1)

diff -r 40184b22d3e4e15cee3a0b1efa82c495d426f900 -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -56,7 +56,7 @@
                     cur.pos[0] = i
                     cur.pos[1] = j
                     cur.pos[2] = k
-                    cur.parent = NULL
+                    #cur.parent = NULL
                     self.root_mesh[i][j][k] = cur
 
     def __dealloc__(self):
@@ -164,7 +164,7 @@
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
         my_oct.children = NULL
-        my_oct.parent = NULL
+        #my_oct.parent = NULL
         return my_oct
 
     @cython.boundscheck(False)
@@ -222,7 +222,7 @@
                     noct.pos[0] = (o.pos[0] << 1) + i
                     noct.pos[1] = (o.pos[1] << 1) + j
                     noct.pos[2] = (o.pos[2] << 1) + k
-                    noct.parent = o
+                    #noct.parent = o
                     o.children[cind(i,j,k)] = noct
         o.file_ind = self.n_ref + 1
         for i in range(3):


https://bitbucket.org/yt_analysis/yt/commits/00d2f58f5f57/
Changeset:   00d2f58f5f57
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-24 22:57:02
Summary:     Removing pos, level and parent pointers.
Affected #:  6 files

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -38,10 +38,7 @@
                             # added
     np.int64_t domain_ind   # index within the global set of domains
     np.int64_t domain       # (opt) addl int index
-    np.int64_t pos[3]       # position in ints
-    np.int8_t level
     Oct **children          # Up to 8 long
-    #Oct *parent
 
 cdef struct OctKey:
     np.int64_t key

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -63,10 +63,8 @@
     n_cont.n_assigned = 0
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
-        #oct.parent = NULL
         oct.file_ind = oct.domain = -1
         oct.domain_ind = n + n_cont.offset
-        oct.level = -1
         oct.children = NULL
     if prev != NULL:
         prev.next = n_cont
@@ -149,6 +147,7 @@
         cdef int i, j, k, n, vc
         vc = self.partial_coverage
         data.global_index = -1
+        data.level = 0
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):
@@ -161,6 +160,9 @@
                 pos[2] = self.DLE[2] + dds[2]/2.0
                 for k in range(self.nn[2]):
                     if self.root_mesh[i][j][k] == NULL: continue
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
                     selector.recursively_visit_octs(
                         self.root_mesh[i][j][k],
                         pos, dds, 0, func, data, vc)
@@ -170,9 +172,9 @@
 
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
         cdef int i
-        for i in range(3):
-            size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
-            corner[i] = o.pos[i] * size[i] + self.DLE[i]
+        #for i in range(3):
+        #    size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
+        #    corner[i] = o.pos[i] * size[i] + self.DLE[i]
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0
@@ -241,63 +243,64 @@
         cdef Oct* candidate
         for i in range(27): neighbors[i] = NULL
         nn = 0
-        for ni in range(3):
-            for nj in range(3):
-                for nk in range(3):
-                    if ni == nj == nk == 1:
-                        neighbors[nn] = o
-                        nn += 1
-                        continue
-                    npos[0] = o.pos[0] + (ni - 1)
-                    npos[1] = o.pos[1] + (nj - 1)
-                    npos[2] = o.pos[2] + (nk - 1)
-                    for i in range(3):
-                        # Periodicity
-                        if npos[i] == -1:
-                            npos[i] = (self.nn[i]  << o.level) - 1
-                        elif npos[i] == (self.nn[i] << o.level):
-                            npos[i] = 0
-                        curopos[i] = o.pos[i]
-                        curnpos[i] = npos[i] 
-                    # Now we have our neighbor position and a safe place to
-                    # keep it.  curnpos will be the root index of the neighbor
-                    # at a given level, and npos will be constant.  curopos is
-                    # the candidate root at a level.
-                    candidate = o
-                    while candidate != NULL:
-                        if ((curopos[0] == curnpos[0]) and 
-                            (curopos[1] == curnpos[1]) and
-                            (curopos[2] == curnpos[2])):
-                            break
-                        # This one doesn't meet it, so we pop up a level.
-                        # First we update our positions, then we update our
-                        # candidate.
-                        for i in range(3):
-                            # We strip a digit off the right
-                            curopos[i] = (curopos[i] >> 1)
-                            curnpos[i] = (curnpos[i] >> 1)
-                        # Now we update to the candidate's parent, which should
-                        # have a matching position to curopos[]
-                        # TODO: This has not survived the transition to
-                        # mostly-stateless Octs!
-                        raise RuntimeError
-                        #candidate = candidate.parent
-                    if candidate == NULL:
-                        # Worst case scenario
-                        for i in range(3):
-                            ind[i] = (npos[i] >> (o.level))
-                        candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
-                    # Now we have the common root, which may be NULL
-                    while candidate.level < o.level:
-                        dl = o.level - (candidate.level + 1)
-                        for i in range(3):
-                            ind[i] = (npos[i] >> dl) & 1
-                        if candidate.children[cind(ind[0],ind[1],ind[2])] \
-                                == NULL:
-                            break
-                        candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
-                    neighbors[nn] = candidate
-                    nn += 1
+        raise RuntimeError
+        #for ni in range(3):
+        #    for nj in range(3):
+        #        for nk in range(3):
+        #            if ni == nj == nk == 1:
+        #                neighbors[nn] = o
+        #                nn += 1
+        #                continue
+        #            npos[0] = o.pos[0] + (ni - 1)
+        #            npos[1] = o.pos[1] + (nj - 1)
+        #            npos[2] = o.pos[2] + (nk - 1)
+        #            for i in range(3):
+        #                # Periodicity
+        #                if npos[i] == -1:
+        #                    npos[i] = (self.nn[i]  << o.level) - 1
+        #                elif npos[i] == (self.nn[i] << o.level):
+        #                    npos[i] = 0
+        #                curopos[i] = o.pos[i]
+        #                curnpos[i] = npos[i] 
+        #            # Now we have our neighbor position and a safe place to
+        #            # keep it.  curnpos will be the root index of the neighbor
+        #            # at a given level, and npos will be constant.  curopos is
+        #            # the candidate root at a level.
+        #            candidate = o
+        #            while candidate != NULL:
+        #                if ((curopos[0] == curnpos[0]) and 
+        #                    (curopos[1] == curnpos[1]) and
+        #                    (curopos[2] == curnpos[2])):
+        #                    break
+        #                # This one doesn't meet it, so we pop up a level.
+        #                # First we update our positions, then we update our
+        #                # candidate.
+        #                for i in range(3):
+        #                    # We strip a digit off the right
+        #                    curopos[i] = (curopos[i] >> 1)
+        #                    curnpos[i] = (curnpos[i] >> 1)
+        #                # Now we update to the candidate's parent, which should
+        #                # have a matching position to curopos[]
+        #                # TODO: This has not survived the transition to
+        #                # mostly-stateless Octs!
+        #                raise RuntimeError
+        #                candidate = candidate.parent
+        #            if candidate == NULL:
+        #                # Worst case scenario
+        #                for i in range(3):
+        #                    ind[i] = (npos[i] >> (o.level))
+        #                candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        #            # Now we have the common root, which may be NULL
+        #            while candidate.level < o.level:
+        #                dl = o.level - (candidate.level + 1)
+        #                for i in range(3):
+        #                    ind[i] = (npos[i] >> dl) & 1
+        #                if candidate.children[cind(ind[0],ind[1],ind[2])] \
+        #                        == NULL:
+        #                    break
+        #                candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
+        #            neighbors[nn] = candidate
+        #            nn += 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -314,6 +317,7 @@
         cdef np.float64_t corner[3], size[3]
         bounds = np.zeros((27,6), dtype="float64")
         tnp = 0
+        raise RuntimeError
         for i in range(27):
             self.oct_bounds(neighbors[i], corner, size)
             for ii in range(3):
@@ -504,7 +508,9 @@
                         oct_visitor_function *func,
                         OctVisitorData *data):
         cdef int i, j, k, n, vc
+        cdef np.int64_t key, ukey
         data.global_index = -1
+        data.level = 0
         vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
@@ -512,10 +518,15 @@
             dds[i] = (self.DRE[i] - self.DLE[i]) / self.nn[i]
         # Pos is the center of the octs
         cdef Oct *o
+        ukey = 0
+        for i in range(20):
+            ukey |= (1 << i)
         for i in range(self.num_root):
             o = self.root_nodes[i].node
+            key = self.root_nodes[i].key
             for j in range(3):
-                pos[j] = self.DLE[j] + (o.pos[j] + 0.5) * dds[j]
+                pos[j] = self.DLE[j] + \
+                    ((key >> 20 * (2 - j) & ukey) + 0.5) * dds[j]
             selector.recursively_visit_octs(
                 o, pos, dds, 0, func, data, vc)
 
@@ -551,12 +562,9 @@
             return NULL
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
-        #next.parent = NULL
-        next.level = 0
         cdef np.int64_t key = 0
         cdef OctKey *ikey = &self.root_nodes[self.num_root]
         for i in range(3):
-            next.pos[i] = ind[i]
             key |= ((<np.int64_t>ind[i]) << 20 * (2 - i))
         self.root_nodes[self.num_root].key = key
         self.root_nodes[self.num_root].node = next
@@ -580,10 +588,6 @@
         next = &cont.my_octs[cont.n_assigned]
         cont.n_assigned += 1
         parent.children[cind(ind[0],ind[1],ind[2])] = next
-        #next.parent = parent
-        next.level = parent.level + 1
-        for i in range(3):
-            next.pos[i] = ind[i] + (parent.pos[i] << 1)
         self.nocts += 1
         return next
 
@@ -658,7 +662,6 @@
             # Now we should be at the right level
             cur.domain = curdom
             cur.file_ind = p
-            cur.level = curlevel
         return cont.n_assigned - initial
 
     @cython.boundscheck(False)
@@ -685,9 +688,10 @@
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
                             if mask[o.domain_ind, ii] == 0: continue
-                            if o.level == level:
-                                dest[local_filled] = \
-                                    source[o.file_ind, ii]
+                            # TODO: Uncomment this!
+                            #if o.level == level:
+                            #    dest[local_filled] = \
+                            #        source[o.file_ind, ii]
                             local_filled += 1
         return local_filled
 
@@ -729,7 +733,8 @@
             for n in range(dom.n):
                 o = &dom.my_octs[n]
                 index = o.file_ind-subchunk_offset
-                if o.level != level: continue
+                # TODO: Uncomment this!
+                #if o.level != level: continue
                 if index < 0: continue
                 if index >= subchunk_max: 
                     #if we hit the end of the array,
@@ -759,6 +764,9 @@
         #As a result, source is 3D grid with 8 times as many
         #elements as the number of octs on this level in this domain
         #and with the shape of an equal-sided cube
+        #
+        # TODO: Convert to a recrusive function.
+        # Note that the .pos[0] etc calls need to be uncommented.
         cdef np.ndarray[np.float64_t, ndim=3] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef OctAllocationContainer *dom = self.domains[domain - 1]
@@ -774,15 +782,16 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                if o.level != level: continue
+                # TODO: Uncomment this!
+                #if o.level != level: continue
                 for i in range(2):
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
                             if mask[o.domain_ind, ii] == 0: continue
-                            ox = (o.pos[0] << 1) + i
-                            oy = (o.pos[1] << 1) + j
-                            oz = (o.pos[2] << 1) + k
+                            #ox = (o.pos[0] << 1) + i
+                            #oy = (o.pos[1] << 1) + j
+                            #oz = (o.pos[2] << 1) + k
                             dest[local_filled + offset] = source[ox,oy,oz]
                             local_filled += 1
         return local_filled

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -102,13 +102,13 @@
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
     for i in range(3):
-        coords[data.index * 3 + i] = (o.pos[i] << 1) + data.ind[i]
+        coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
     data.index += 1
 
 cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     if selected == 0: return
     cdef np.int64_t *ires = <np.int64_t*> data.array
-    ires[data.index] = o.level
+    ires[data.index] = data.level
     data.index += 1
 
 @cython.cdivision(True)
@@ -120,9 +120,9 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
-    dx = 1.0 / (2 << o.level)
+    dx = 1.0 / (2 << data.level)
     for i in range(3):
-        c = <np.float64_t> ((o.pos[i] << 1 ) + data.ind[i]) 
+        c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
@@ -135,7 +135,7 @@
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 
-    dx = 1.0 / (2 << o.level)
+    dx = 1.0 / (2 << data.level)
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -52,11 +52,6 @@
             for j in range(self.nn[1]):
                 for k in range(self.nn[2]):
                     cur = self.allocate_oct()
-                    cur.level = 0
-                    cur.pos[0] = i
-                    cur.pos[1] = j
-                    cur.pos[2] = k
-                    #cur.parent = NULL
                     self.root_mesh[i][j][k] = cur
 
     def __dealloc__(self):
@@ -127,25 +122,27 @@
         for i in range(self.nn[0]):
             for j in range(self.nn[1]):
                 for k in range(self.nn[2]):
-                    self.visit_assign(self.root_mesh[i][j][k], &lpos)
+                    self.visit_assign(self.root_mesh[i][j][k], &lpos,
+                                      0, &max_level)
         assert(lpos == self.nocts)
         for i in range(self.nocts):
             self.oct_list[i].domain_ind = i
             self.oct_list[i].domain = 0
             self.oct_list[i].file_ind = -1
-            max_level = imax(max_level, self.oct_list[i].level)
         self.max_level = max_level
 
-    cdef visit_assign(self, Oct *o, np.int64_t *lpos):
+    cdef visit_assign(self, Oct *o, np.int64_t *lpos, int level, int *max_level):
         cdef int i, j, k
         self.oct_list[lpos[0]] = o
         lpos[0] += 1
+        max_level[0] = imax(max_level[0], level)
         for i in range(2):
             for j in range(2):
                 for k in range(2):
                     if o.children != NULL \
                        and o.children[cind(i,j,k)] != NULL:
-                        self.visit_assign(o.children[cind(i,j,k)], lpos)
+                        self.visit_assign(o.children[cind(i,j,k)], lpos,
+                                level + 1, max_level)
         return
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
@@ -161,10 +158,7 @@
         my_oct.domain = -1
         my_oct.file_ind = 0
         my_oct.domain_ind = self.nocts - 1
-        my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
-        my_oct.level = -1
         my_oct.children = NULL
-        #my_oct.parent = NULL
         return my_oct
 
     @cython.boundscheck(False)
@@ -195,8 +189,8 @@
                     ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
                 if cur.children == NULL or \
                    cur.children[cind(ind[0],ind[1],ind[2])] == NULL:
-                    cur = self.refine_oct(cur, index)
-                    self.filter_particles(cur, data, p)
+                    cur = self.refine_oct(cur, index, level)
+                    self.filter_particles(cur, data, p, level + 1)
                 else:
                     cur = cur.children[cind(ind[0],ind[1],ind[2])]
             cur.file_ind += 1
@@ -204,7 +198,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *refine_oct(self, Oct *o, np.uint64_t index):
+    cdef Oct *refine_oct(self, Oct *o, np.uint64_t index, int level):
         #Allocate and initialize child octs
         #Attach particles to child octs
         #Remove particles from this oct entirely
@@ -218,31 +212,27 @@
                     noct = self.allocate_oct()
                     noct.domain = o.domain
                     noct.file_ind = 0
-                    noct.level = o.level + 1
-                    noct.pos[0] = (o.pos[0] << 1) + i
-                    noct.pos[1] = (o.pos[1] << 1) + j
-                    noct.pos[2] = (o.pos[2] << 1) + k
-                    #noct.parent = o
                     o.children[cind(i,j,k)] = noct
         o.file_ind = self.n_ref + 1
         for i in range(3):
-            ind[i] = (index >> ((ORDER_MAX - (o.level + 1))*3 + (2 - i))) & 1
+            ind[i] = (index >> ((ORDER_MAX - (level + 1))*3 + (2 - i))) & 1
         noct = o.children[cind(ind[0],ind[1],ind[2])]
         return noct
 
-    cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p):
+    cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p,
+                               int level):
         # Now we look at the last nref particles to decide where they go.
         cdef int n = imin(p, self.n_ref)
         cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0)
         # Now we figure out our prefix, which is the oct address at this level.
         # As long as we're actually in Morton order, we do not need to worry
         # about *any* of the other children of the oct.
-        prefix1 = data[p] >> (ORDER_MAX - o.level)*3
+        prefix1 = data[p] >> (ORDER_MAX - level)*3
         for i in range(n):
-            prefix2 = arr[i] >> (ORDER_MAX - o.level)*3
+            prefix2 = arr[i] >> (ORDER_MAX - level)*3
             if (prefix1 == prefix2):
                 o.file_ind += 1
-        #print ind[0], ind[1], ind[2], o.file_ind, o.level
+        #print ind[0], ind[1], ind[2], o.file_ind, level
 
     def recursively_count(self):
         #Visit every cell, accumulate the # of cells per level

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -30,10 +30,12 @@
     np.uint64_t index
     np.uint64_t last
     np.int64_t global_index
-    int ind[3]
+    np.int64_t pos[3]       # position in ints
+    np.uint8_t ind[3]              # cell position
     void *array
     int dims
-    int domain
+    np.int32_t domain
+    np.int8_t level
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)

diff -r ae57aa843411ef00c7f66f5c2d3508d388cfced8 -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -170,7 +170,7 @@
         # we visit *this* oct, then we make a second pass to check any child
         # octs.
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
-        cdef int i, j, k, res, ii
+        cdef int i, j, k, res, ii, mi
         cdef Oct *ch
         cdef np.uint8_t selected
         # Remember that pos is the *center* of the oct, and dds is the oct
@@ -219,9 +219,17 @@
                         if root.children != NULL:
                             ch = root.children[cind(i,j,k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
+                            data.pos[0] = (data.pos[0] << 1) + i
+                            data.pos[1] = (data.pos[1] << 1) + j
+                            data.pos[2] = (data.pos[2] << 1) + k
+                            data.level += 1
                             self.recursively_visit_octs(
                                 ch, spos, sdds, level + 1, func, data,
                                 visit_covered)
+                            data.pos[0] = (data.pos[0] >> 1)
+                            data.pos[1] = (data.pos[1] >> 1)
+                            data.pos[2] = (data.pos[2] >> 1)
+                            data.level -= 1
                         elif this_level == 1:
                             selected = self.select_cell(spos, sdds, eterm)
                             data.global_index += increment


https://bitbucket.org/yt_analysis/yt/commits/aef64c0bb95c/
Changeset:   aef64c0bb95c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-25 19:37:11
Summary:     Switching order of key bit shifting.
Affected #:  1 file

diff -r 00d2f58f5f57f7e9d9b73bc37376e5f64d697be5 -r aef64c0bb95c9725e36da890bedceb5d79972691 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -525,8 +525,10 @@
             o = self.root_nodes[i].node
             key = self.root_nodes[i].key
             for j in range(3):
-                pos[j] = self.DLE[j] + \
-                    ((key >> 20 * (2 - j) & ukey) + 0.5) * dds[j]
+                data.pos[2 - j] = (key & ukey)
+                key = key >> 20
+            for j in range(3):
+                pos[j] = self.DLE[j] + (data.pos[j] + 0.5) * dds[j]
             selector.recursively_visit_octs(
                 o, pos, dds, 0, func, data, vc)
 


https://bitbucket.org/yt_analysis/yt/commits/c693384fdeb0/
Changeset:   c693384fdeb0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-25 19:53:03
Summary:     Re-arranging Oct definitions.
Affected #:  4 files

diff -r aef64c0bb95c9725e36da890bedceb5d79972691 -r c693384fdeb083faf9429d687b928c371f970477 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -25,20 +25,13 @@
 
 cimport numpy as np
 from fp_utils cimport *
-from selection_routines cimport SelectorObject, \
-    OctVisitorData, oct_visitor_function
-from oct_visitors cimport *
+from selection_routines cimport SelectorObject
+from oct_visitors cimport \
+    OctVisitorData, oct_visitor_function, Oct
 from libc.stdlib cimport bsearch, qsort
 
 cdef int ORDER_MAX
 
-cdef struct Oct
-cdef struct Oct:
-    np.int64_t file_ind     # index with respect to the order in which it was
-                            # added
-    np.int64_t domain_ind   # index within the global set of domains
-    np.int64_t domain       # (opt) addl int index
-    Oct **children          # Up to 8 long
 
 cdef struct OctKey:
     np.int64_t key

diff -r aef64c0bb95c9725e36da890bedceb5d79972691 -r c693384fdeb083faf9429d687b928c371f970477 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -24,10 +24,28 @@
 """
 
 cimport numpy as np
-from selection_routines cimport \
-    OctVisitorData, oct_visitor_function
-from oct_container cimport \
-    Oct
+
+cdef struct Oct
+cdef struct Oct:
+    np.int64_t file_ind     # index with respect to the order in which it was
+                            # added
+    np.int64_t domain_ind   # index within the global set of domains
+    np.int64_t domain       # (opt) addl int index
+    Oct **children          # Up to 8 long
+
+cdef struct OctVisitorData:
+    np.uint64_t index
+    np.uint64_t last
+    np.int64_t global_index
+    np.int64_t pos[3]       # position in ints
+    np.uint8_t ind[3]              # cell position
+    void *array
+    int dims
+    np.int32_t domain
+    np.int8_t level
+
+ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
+                                   np.uint8_t selected)
 
 cdef oct_visitor_function count_total_octs
 cdef oct_visitor_function count_total_cells

diff -r aef64c0bb95c9725e36da890bedceb5d79972691 -r c693384fdeb083faf9429d687b928c371f970477 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -24,21 +24,8 @@
 """
 
 cimport numpy as np
-
-cdef struct Oct
-cdef struct OctVisitorData:
-    np.uint64_t index
-    np.uint64_t last
-    np.int64_t global_index
-    np.int64_t pos[3]       # position in ints
-    np.uint8_t ind[3]              # cell position
-    void *array
-    int dims
-    np.int32_t domain
-    np.int8_t level
-
-ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
-                                   np.uint8_t selected)
+from oct_visitors cimport Oct, OctVisitorData, \
+    oct_visitor_function
 
 cdef class SelectorObject:
     cdef public np.int32_t min_level

diff -r aef64c0bb95c9725e36da890bedceb5d79972691 -r c693384fdeb083faf9429d687b928c371f970477 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -170,7 +170,7 @@
         # we visit *this* oct, then we make a second pass to check any child
         # octs.
         cdef np.float64_t LE[3], RE[3], sdds[3], spos[3]
-        cdef int i, j, k, res, ii, mi
+        cdef int i, j, k, res, mi
         cdef Oct *ch
         cdef np.uint8_t selected
         # Remember that pos is the *center* of the oct, and dds is the oct
@@ -214,7 +214,6 @@
                 for j in range(2):
                     spos[2] = pos[2] - sdds[2]/2.0
                     for k in range(2):
-                        ii = ((k*2)+j)*2+i
                         ch = NULL
                         if root.children != NULL:
                             ch = root.children[cind(i,j,k)]


https://bitbucket.org/yt_analysis/yt/commits/d7db8bf1f2a1/
Changeset:   d7db8bf1f2a1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-25 21:32:51
Summary:     This fixes an error that showed up at O0, but not O3.  data.last was uninitialized.
Affected #:  2 files

diff -r c693384fdeb083faf9429d687b928c371f970477 -r d7db8bf1f2a13d7ec9550662789d7e677048c18b yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -331,7 +331,7 @@
     def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id) * 8
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
         cdef OctVisitorData data
@@ -347,7 +347,7 @@
     def ires(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id) * 8
+            num_cells = selector.count_octs(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(num_cells, dtype="int64")
@@ -364,7 +364,7 @@
     def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id) * 8
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
         fwidth = np.empty((num_cells, 3), dtype="float64")
         cdef OctVisitorData data
@@ -384,7 +384,7 @@
     def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
                 int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id) * 8
+            num_cells = selector.count_octs(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")

diff -r c693384fdeb083faf9429d687b928c371f970477 -r d7db8bf1f2a13d7ec9550662789d7e677048c18b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -145,9 +145,10 @@
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
         data.index = 0
+        data.last = -1
         data.domain = domain_id
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        return data.index
+        return data.index * 8
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data


https://bitbucket.org/yt_analysis/yt/commits/96ff7315a44a/
Changeset:   96ff7315a44a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-25 21:41:01
Summary:     Add oct-count caching.
Affected #:  2 files

diff -r d7db8bf1f2a13d7ec9550662789d7e677048c18b -r 96ff7315a44a131c07cabb0580a212ff198fe556 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -47,6 +47,7 @@
     _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
     _domain_offset = 0
+    _num_octs = -1
 
     def __init__(self, base_region, domain, pf):
         self.field_data = YTFieldData()
@@ -123,25 +124,33 @@
         return np.asfortranarray(vals)
 
     def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id)
+        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
+                                     num_octs = self._num_octs)
+        self._num_octs = d.shape[0]
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id)
+        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
+                                     num_octs = self._num_octs)
+        self._num_octs = d.shape[0]
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id)
+        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
+                                  num_octs = self._num_octs)
+        self._num_octs = d.shape[0]
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
 
     def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id)
+        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
+                                  num_octs = self._num_octs)
+        self._num_octs = d.shape[0]
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
                                             domain_id = self.domain_id)
         return tr

diff -r d7db8bf1f2a13d7ec9550662789d7e677048c18b -r 96ff7315a44a131c07cabb0580a212ff198fe556 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -328,12 +328,12 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
                 int domain_id = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+        if num_octs == -1:
+            num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_cells, 3), dtype="int64")
+        coords = np.empty((num_octs, 3), dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
@@ -344,13 +344,13 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def ires(self, SelectorObject selector, np.uint64_t num_cells = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
                 int domain_id = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+        if num_octs == -1:
+            num_octs = selector.count_octs(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_cells, dtype="int64")
+        res = np.empty(num_octs, dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> res.data
         data.index = 0
@@ -361,12 +361,12 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fwidth(self, SelectorObject selector, np.uint64_t num_cells = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
                 int domain_id = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+        if num_octs == -1:
+            num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_cells, 3), dtype="float64")
+        fwidth = np.empty((num_octs, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> fwidth.data
         data.index = 0
@@ -381,13 +381,13 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fcoords(self, SelectorObject selector, np.uint64_t num_cells = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
                 int domain_id = -1):
-        if num_cells == -1:
-            num_cells = selector.count_octs(self, domain_id)
+        if num_octs == -1:
+            num_octs = selector.count_octs(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_cells, 3), dtype="float64")
+        coords = np.empty((num_octs, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0


https://bitbucket.org/yt_analysis/yt/commits/d17386ccee37/
Changeset:   d17386ccee37
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-26 14:41:22
Summary:     Switching to count_octs meaning what it says and moving domain_ind into baseclass.
Affected #:  3 files

diff -r 96ff7315a44a131c07cabb0580a212ff198fe556 -r d17386ccee37a9818ecd6ce25b04f66bb80983f3 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -126,7 +126,7 @@
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0]
+        self._num_octs = d.shape[0] / 8
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -134,7 +134,7 @@
     def select_fcoords(self, dobj):
         d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0]
+        self._num_octs = d.shape[0] / 8
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -142,7 +142,7 @@
     def select_fwidth(self, dobj):
         d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0]
+        self._num_octs = d.shape[0] / 8
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -150,7 +150,7 @@
     def select_ires(self, dobj):
         d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0]
+        self._num_octs = d.shape[0] / 8
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
                                             domain_id = self.domain_id)
         return tr

diff -r 96ff7315a44a131c07cabb0580a212ff198fe556 -r d17386ccee37a9818ecd6ce25b04f66bb80983f3 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -333,7 +333,7 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="int64")
+        coords = np.empty((num_octs * 8, 3), dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
@@ -350,7 +350,7 @@
             num_octs = selector.count_octs(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_octs, dtype="int64")
+        res = np.empty(num_octs * 8, dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> res.data
         data.index = 0
@@ -366,7 +366,7 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_octs, 3), dtype="float64")
+        fwidth = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> fwidth.data
         data.index = 0
@@ -387,7 +387,7 @@
             num_octs = selector.count_octs(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="float64")
+        coords = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
         data.index = 0
@@ -452,6 +452,18 @@
             return dest
         return data.index - offset
 
+    def domain_ind(self, selector, int domain_id = -1):
+        cdef np.ndarray[np.int64_t, ndim=1] ind
+        # Here's where we grab the masked items.
+        ind = np.zeros(self.nocts, 'int64') - 1
+        cdef OctVisitorData data
+        data.domain = domain_id
+        data.array = ind.data
+        data.index = 0
+        data.last = -1
+        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
+        return ind
+
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
     ao = <OctKey *>a
@@ -697,18 +709,6 @@
                             local_filled += 1
         return local_filled
 
-    def domain_ind(self, selector, int domain_id = -1):
-        cdef np.ndarray[np.int64_t, ndim=1] ind
-        # Here's where we grab the masked items.
-        ind = np.zeros(self.nocts, 'int64') - 1
-        cdef OctVisitorData data
-        data.domain = domain_id
-        data.array = ind.data
-        data.index = 0
-        data.last = -1
-        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
-        return ind
-
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
     @cython.boundscheck(True)

diff -r 96ff7315a44a131c07cabb0580a212ff198fe556 -r d17386ccee37a9818ecd6ce25b04f66bb80983f3 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -148,7 +148,7 @@
         data.last = -1
         data.domain = domain_id
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        return data.index * 8
+        return data.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data


https://bitbucket.org/yt_analysis/yt/commits/08d4ff8c0204/
Changeset:   08d4ff8c0204
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-26 14:47:15
Summary:     This fixes problems with particle code depositions.
Affected #:  1 file

diff -r d17386ccee37a9818ecd6ce25b04f66bb80983f3 -r 08d4ff8c02043994d944f8bcd36f03c1d38aca45 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -94,7 +94,8 @@
             # RAMSES, where we *will* miss particles that live in ghost
             # regions on other processors.  Addressing this is on the TODO
             # list.
-            if oct == NULL or oct.domain != domain_id: continue
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
             # Note that this has to be our local index, not our in-file index.
             offset = dom_ind[oct.domain_ind - moff] * 8
             if offset < 0: continue


https://bitbucket.org/yt_analysis/yt/commits/8ced18e7c9ac/
Changeset:   8ced18e7c9ac
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-14 08:30:15
Summary:     Taking a whack at #589.  Still needs a little bit of work to interface with the cosmology fields.
Affected #:  3 files

diff -r 266b7da460ae2be60f36d5713ec6676ce46e9c7c -r 8ced18e7c9aca80531343bf5f2cdf6b196899bc7 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -420,8 +420,8 @@
 def _Convert_Overdensity(data):
     return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
-add_field("Overdensity",function=_Matter_Density,
-          convert_function=_Convert_Overdensity, units=r"")
+#add_field("Overdensity",function=_Matter_Density,
+#          convert_function=_Convert_Overdensity, units=r"")
 
 # This is (rho_total - <rho_total>) / <rho_total>.
 def _DensityPerturbation(field, data):
@@ -463,9 +463,9 @@
     return (((DL * DLS) / DS) * (1.5e14 * data.pf.omega_matter * 
                                 (data.pf.hubble_constant / speed_of_light_cgs)**2 *
                                 (1 + data.pf.current_redshift)))
-add_field("WeakLensingConvergence", function=_DensityPerturbation, 
-          convert_function=_convertConvergence, 
-          projection_conversion='mpccm')
+#add_field("WeakLensingConvergence", function=_DensityPerturbation, 
+#          convert_function=_convertConvergence, 
+#          projection_conversion='mpccm')
 
 def _CellVolume(field, data):
     if data['dx'].size == 1:
@@ -950,10 +950,10 @@
 def _pdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                 data["particle_position_y"].astype(np.float64),
-                 data["particle_position_z"].astype(np.float64),
-                 data["ParticleMass"],
+    CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
+                 data["particle_position_y"].astype(np.float64).ravel(),
+                 data["particle_position_z"].astype(np.float64).ravel(),
+                 data["ParticleMass"].ravel(),
                  data["particle_position_x"].size,
                  blank, np.array(data.LeftEdge).astype(np.float64),
                  np.array(data.ActiveDimensions).astype(np.int32),

diff -r 266b7da460ae2be60f36d5713ec6676ce46e9c7c -r 8ced18e7c9aca80531343bf5f2cdf6b196899bc7 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -365,9 +365,9 @@
     if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                           data["particle_position_y"][filter].astype(np.float64),
-                           data["particle_position_z"][filter].astype(np.float64),
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
+                           data["particle_position_y"][filter].astype(np.float64).ravel(),
+                           data["particle_position_z"][filter].astype(np.float64).ravel(),
                            data["particle_mass"][filter],
                            np.int64(np.where(filter)[0].size),
                            blank, np.array(data.LeftEdge).astype(np.float64),
@@ -387,10 +387,10 @@
     else:
         filter = Ellipsis
         num = data["particle_position_x"].size
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                           data["particle_position_y"][filter].astype(np.float64),
-                           data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
+                           data["particle_position_y"][filter].astype(np.float64).ravel(),
+                           data["particle_position_z"][filter].astype(np.float64).ravel(),
+                           data["particle_mass"][filter].ravel(),
                            num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -408,10 +408,10 @@
     top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                           data["particle_position_y"].astype(np.float64),
-                           data["particle_position_z"].astype(np.float64),
-                           particle_field_data,
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
+                           data["particle_position_y"].astype(np.float64).ravel(),
+                           data["particle_position_z"].astype(np.float64).ravel(),
+                           particle_field_data.ravel(),
                            data["particle_position_x"].size,
                            top, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
@@ -419,11 +419,11 @@
     del particle_field_data
 
     bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                           data["particle_position_y"].astype(np.float64),
-                           data["particle_position_z"].astype(np.float64),
-                           data["particle_mass"],
-                           data["particle_position_x"].size,
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
+                           data["particle_position_y"].astype(np.float64).ravel(),
+                           data["particle_position_z"].astype(np.float64).ravel(),
+                           data["particle_mass"].ravel(),
+                           data["particle_position_x"].ravel().size,
                            bottom, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            just_one(data['dx']))
@@ -446,12 +446,13 @@
     particle_field = field.name[5:]
     top = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return top
-    filter = data['creation_time'] > 0.0
-    if not filter.any(): return top
-    particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                          data["particle_position_y"][filter].astype(np.float64),
-                          data["particle_position_z"][filter].astype(np.float64),
+    filter = np.nonzero(data['creation_time'])
+    if True in [f == np.array([]) for f in filter]: return top
+    particle_field_data = data[particle_field][filter] * \
+        data['particle_mass'][filter]
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
+                          data["particle_position_y"][filter].astype(np.float64).ravel(),
+                          data["particle_position_z"][filter].astype(np.float64).ravel(),
                           particle_field_data,
                           np.int64(np.where(filter)[0].size),
                           top, np.array(data.LeftEdge).astype(np.float64),
@@ -460,9 +461,9 @@
     del particle_field_data
 
     bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                          data["particle_position_y"][filter].astype(np.float64),
-                          data["particle_position_z"][filter].astype(np.float64),
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
+                          data["particle_position_y"][filter].astype(np.float64).ravel(),
+                          data["particle_position_z"][filter].astype(np.float64).ravel(),
                           data["particle_mass"][filter],
                           np.int64(np.where(filter)[0].size),
                           bottom, np.array(data.LeftEdge).astype(np.float64),
@@ -531,8 +532,8 @@
 
 for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
-    add_enzo_field(("all", "particle_%s" % pf), NullFunc, particle_type=True)
-    
+    add_field(("all", "particle_%s" % pf), NullFunc, particle_type=True)
+
 def _convRetainInt(data):
     return 1
 add_enzo_field(("all", "particle_index"), function=NullFunc,
@@ -552,7 +553,7 @@
     add_enzo_field(("all", pf), function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field("particle_mass", function=NullFunc, particle_type=True)
+add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time

diff -r 266b7da460ae2be60f36d5713ec6676ce46e9c7c -r 8ced18e7c9aca80531343bf5f2cdf6b196899bc7 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -42,6 +42,7 @@
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_splitter
+from yt.utilities.exceptions import YTFieldNotFound
 
 class GeometryHandler(ParallelAnalysisInterface):
 
@@ -187,7 +188,7 @@
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
-            except Exception as e:
+            except YTFieldNotFound as e:
                 continue
             missing = False
             # This next bit checks that we can't somehow generate everything.


https://bitbucket.org/yt_analysis/yt/commits/3f7c2391771a/
Changeset:   3f7c2391771a
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-14 08:46:03
Summary:     Adding dummy parameters for the stream frontend.

This is necessary for field detection to work correctly during the unit tests.
Affected #:  1 file

diff -r 8ced18e7c9aca80531343bf5f2cdf6b196899bc7 -r 3f7c2391771a03b590913854829eb3b74048d78b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -285,6 +285,8 @@
         self.periodicity = self.stream_handler.periodicity
         self.domain_dimensions = self.stream_handler.domain_dimensions
         self.current_time = self.stream_handler.simulation_time
+        self.parameters['Gamma'] = 5/3
+        self.parameters['EOSType'] = -1
         if self.stream_handler.cosmology_simulation:
             self.cosmological_simulation = 1
             self.current_redshift = self.stream_handler.current_redshift


https://bitbucket.org/yt_analysis/yt/commits/c91712d5ab99/
Changeset:   c91712d5ab99
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-15 00:26:20
Summary:     Switching CICDeposit_3 fields to use the new "deposit"-style fields.
Affected #:  2 files

diff -r 3f7c2391771a03b590913854829eb3b74048d78b -r c91712d5ab99fae87d2dc5315afef2ae64ede886 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -395,7 +395,7 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data["particle_density"]) * data["CellVolume"]
+    return (data["Density"]+data[("deposit", "particle_density")]) * data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
@@ -948,18 +948,9 @@
         units=r"UNDEFINED")
 
 def _pdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
-                 data["particle_position_y"].astype(np.float64).ravel(),
-                 data["particle_position_z"].astype(np.float64).ravel(),
-                 data["ParticleMass"].ravel(),
-                 data["particle_position_x"].size,
-                 blank, np.array(data.LeftEdge).astype(np.float64),
-                 np.array(data.ActiveDimensions).astype(np.int32),
-                 just_one(data['dx']))
-    np.divide(blank, data["CellVolume"], blank)
-    return blank
+    pmass = data[('deposit','all_mass')]
+    np.divide(pmass, data["CellVolume"], pmass)
+    return pmass
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()],
           display_name=r"\mathrm{Particle}\/\mathrm{Density}")

diff -r 3f7c2391771a03b590913854829eb3b74048d78b -r c91712d5ab99fae87d2dc5315afef2ae64ede886 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -361,41 +361,23 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
-    if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
-                           data["particle_position_y"][filter].astype(np.float64).ravel(),
-                           data["particle_position_z"][filter].astype(np.float64).ravel(),
-                           data["particle_mass"][filter],
-                           np.int64(np.where(filter)[0].size),
-                           blank, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    return blank
+    pos = data["all", "Coordinates"][filter, :]
+    d = data.deposit(pos, [data['all', 'Mass'][filter]], method='sum')
+    d /= data['CellVolume']
+    return d
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
-        if not filter.any(): return blank
-        num = filter.sum()
     else:
         filter = Ellipsis
-        num = data["particle_position_x"].size
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
-                           data["particle_position_y"][filter].astype(np.float64).ravel(),
-                           data["particle_position_z"][filter].astype(np.float64).ravel(),
-                           data["particle_mass"][filter].ravel(),
-                           num,
-                           blank, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    return blank
+    pos = data["all", "Coordinates"][filter, :]
+    d = data.deposit(pos, [data['all', 'Mass'][filter]], method='sum')
+    d /= data['CellVolume']
+    return d
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
@@ -405,28 +387,17 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return top
-    particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
-                           data["particle_position_y"].astype(np.float64).ravel(),
-                           data["particle_position_z"].astype(np.float64).ravel(),
-                           particle_field_data.ravel(),
-                           data["particle_position_x"].size,
-                           top, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    del particle_field_data
-
-    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64).ravel(),
-                           data["particle_position_y"].astype(np.float64).ravel(),
-                           data["particle_position_z"].astype(np.float64).ravel(),
-                           data["particle_mass"].ravel(),
-                           data["particle_position_x"].ravel().size,
-                           bottom, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
+    pos = data[('all', 'Coordinates')]
+    top = data.deposit(
+        pos,
+        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        method = 'cic'
+        )
+    bottom = data.deposit(
+        pos,
+        [data[('all', 'particle_mass')]],
+        method = 'cic'
+        )
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -444,31 +415,18 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return top
-    filter = np.nonzero(data['creation_time'])
-    if True in [f == np.array([]) for f in filter]: return top
-    particle_field_data = data[particle_field][filter] * \
-        data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
-                          data["particle_position_y"][filter].astype(np.float64).ravel(),
-                          data["particle_position_z"][filter].astype(np.float64).ravel(),
-                          particle_field_data,
-                          np.int64(np.where(filter)[0].size),
-                          top, np.array(data.LeftEdge).astype(np.float64),
-                          np.array(data.ActiveDimensions).astype(np.int32), 
-                          just_one(data['dx']))
-    del particle_field_data
-
-    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64).ravel(),
-                          data["particle_position_y"][filter].astype(np.float64).ravel(),
-                          data["particle_position_z"][filter].astype(np.float64).ravel(),
-                          data["particle_mass"][filter],
-                          np.int64(np.where(filter)[0].size),
-                          bottom, np.array(data.LeftEdge).astype(np.float64),
-                          np.array(data.ActiveDimensions).astype(np.int32), 
-                          just_one(data['dx']))
+    filter = data['creation_time'] > 0.0
+    pos = data['all', 'Coordinates'][filter, :]
+    top = data.deposit(
+        pos,
+        [data['all', particle_field][filter]*data['all', 'Mass'][filter]],
+        method='sum'
+        )
+    bottom = data.deposit(
+        pos,
+        [data['all', 'Mass'][filter]],
+        method='sum'
+        )
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -532,7 +490,7 @@
 
 for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
-    add_field(("all", "particle_%s" % pf), NullFunc, particle_type=True)
+    add_enzo_field(('all',"particle_%s" % pf), NullFunc, particle_type=True)
 
 def _convRetainInt(data):
     return 1
@@ -550,7 +508,7 @@
               particle_type=True)
 
 for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    add_enzo_field(("all", pf), function=NullFunc,
+    add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
 add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
@@ -565,7 +523,7 @@
           particle_type=True, convert_function=_convertParticleAge)
 
 def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64') * \
+    particles = data['all', "particle_mass"].astype('float64') * \
                 just_one(data["CellVolumeCode"].ravel())
     # Note that we mandate grid-type here, so this is okay
     return particles


https://bitbucket.org/yt_analysis/yt/commits/1382f43c0c56/
Changeset:   1382f43c0c56
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-15 00:27:59
Summary:     These are hacks that I needed to add to get the tests to pass.  There might be a better way to do this.
Affected #:  2 files

diff -r c91712d5ab99fae87d2dc5315afef2ae64ede886 -r 1382f43c0c562d55cdfef31c4bfe3f1cc83af9c4 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -54,7 +54,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype != "gas" for ftype, fname in fields)):
+        if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:
@@ -65,6 +65,8 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
+            if ftype == 'deposit':
+                fname = field
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:

diff -r c91712d5ab99fae87d2dc5315afef2ae64ede886 -r 1382f43c0c562d55cdfef31c4bfe3f1cc83af9c4 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -148,7 +148,7 @@
         fields = getattr(self.data_source, "fields", [])
         fields += getattr(self.data_source, "field_data", {}).keys()
         for f in fields:
-            if f not in exclude:
+            if f not in exclude and f[0] not in self.data_source.pf.particle_types:
                 self[f]
 
     def _get_info(self, item):


https://bitbucket.org/yt_analysis/yt/commits/5077106a5979/
Changeset:   5077106a5979
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-15 00:28:25
Summary:     Reverting to catching the generic exception but at least now we record that there was an exception in the first place.
Affected #:  1 file

diff -r 1382f43c0c562d55cdfef31c4bfe3f1cc83af9c4 -r 5077106a597949a6939289f7fb0aa9418464fcdc yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -188,7 +188,10 @@
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
-            except YTFieldNotFound as e:
+            except Exception as e:
+                if type(e) != YTFieldNotFound:
+                    mylog.debug("Exception %s raised during field detection" %
+                                str(type(e)))
                 continue
             missing = False
             # This next bit checks that we can't somehow generate everything.


https://bitbucket.org/yt_analysis/yt/commits/718e8f89a721/
Changeset:   718e8f89a721
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-15 00:30:52
Summary:     Reverting the changes to Overdensity and WeakLensingConvergence.
Affected #:  1 file

diff -r 5077106a597949a6939289f7fb0aa9418464fcdc -r 718e8f89a721b916b7b5d3ce0f05b00138e10d32 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -420,8 +420,8 @@
 def _Convert_Overdensity(data):
     return 1.0 / (rho_crit_now * data.pf.hubble_constant**2 * 
                 (1+data.pf.current_redshift)**3)
-#add_field("Overdensity",function=_Matter_Density,
-#          convert_function=_Convert_Overdensity, units=r"")
+add_field("Overdensity",function=_Matter_Density,
+          convert_function=_Convert_Overdensity, units=r"")
 
 # This is (rho_total - <rho_total>) / <rho_total>.
 def _DensityPerturbation(field, data):
@@ -463,9 +463,9 @@
     return (((DL * DLS) / DS) * (1.5e14 * data.pf.omega_matter * 
                                 (data.pf.hubble_constant / speed_of_light_cgs)**2 *
                                 (1 + data.pf.current_redshift)))
-#add_field("WeakLensingConvergence", function=_DensityPerturbation, 
-#          convert_function=_convertConvergence, 
-#          projection_conversion='mpccm')
+add_field("WeakLensingConvergence", function=_DensityPerturbation, 
+          convert_function=_convertConvergence, 
+          projection_conversion='mpccm')
 
 def _CellVolume(field, data):
     if data['dx'].size == 1:


https://bitbucket.org/yt_analysis/yt/commits/1a59d021ddb8/
Changeset:   1a59d021ddb8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-18 19:44:57
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #47)

Add a few RAMSES fields
Affected #:  1 file



https://bitbucket.org/yt_analysis/yt/commits/ef93e65cb9db/
Changeset:   ef93e65cb9db
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-23 22:09:39
Summary:     added standard particle deposit fields to ART
Affected #:  1 file

diff -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 -r ef93e65cb9dbb35dcbb7deed9c1e5ad2566fc2f8 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -218,6 +218,7 @@
               particle_type=True,
               convert_function=lambda x: x.convert("particle_mass"))
 
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr
@@ -260,3 +261,108 @@
     return data["particle_mass"]/mass_sun_cgs
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
+
+# Particle Deposition Fields
+ptypes = ["all", "darkmatter", "stars"]
+names  = ["Particle", "Dark Matter", "Stellar"]
+
+# Particle Mass Density Fields
+for ptype, name in zip(ptypes, names):
+    def particle_density(field, data):
+        vol = data["CellVolume"]
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        pmass = dd[(ptype, "particle_mass")]
+        mass = data.deposit(pos, [pmass], method = "sum")
+        return mass / vol
+    add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'g/cm^{3}',
+              display_name="%s Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        mass = data.deposit(pos, method = "sum")
+        return mass
+    add_field("%s_mass_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Mass Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Number Density Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        vol = data["CellVolume"]
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        count = data.deposit(pos, method = "count")
+        return count / vol
+    add_field("%s_number_density_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Number Density" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Number Fields
+for ptype, name in zip(ptypes, names):
+    def particle_count(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        count = data.deposit(pos, method = "count")
+        return count 
+    add_field("%s_number_deposit" % ptype, function=particle_density, 
+              particle_type=False, take_log=True, units=r'1/cm^{3}',
+              display_name="%s Number" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Velocity Fields
+for ptype, name in zip(ptypes, names):
+    for axis in 'xyz':
+        def particle_velocity(field, data):
+            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                                   for ax in 'xyz'])
+            vel = data[(ptype, "particle_velocity_%s" % axis)]
+            vel_deposit = data.deposit(vel, method = "sum")
+            return vel_deposit
+        add_field("%s_velocity_%s_deposit" % (ptype, axis), 
+                  function=particle_velocity, 
+                  particle_type=False, take_log=False, units=r'cm/s',
+                  display_name="%s Velocity %s" % (name, axis.upper()), 
+                  validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass-weighted Velocity Fields
+for ptype, name in zip(ptypes, names):
+    for axis in 'xyz':
+        def particle_velocity_weighted(field, data):
+            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                                   for ax in 'xyz'])
+            vel  = data[(ptype, "particle_velocity_%s" % axis)]
+            mass = data[(ptype, "particle_mass")]
+            vel_deposit = data.deposit(vel * mass, method = "sum")
+            norm = data.deposit(mass, method = "sum")
+            return vel_deposit / norm
+        add_field("%s_weighted_velocity_%s_deposit" % (ptype, axis), 
+                  function=particle_velocity, 
+                  particle_type=False, take_log=False, units=r'cm/s',
+                  display_name="%s Velocity %s" % (name, axis.upper()), 
+                  validators=[ValidateSpatial()], projection_conversion='1')
+
+# Particle Mass-weighted Velocity Magnitude Fields
+for ptype, name in zip(ptypes, names):
+    def particle_velocity_weighted(field, data):
+        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        vels = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+                               for ax in 'xyz'])
+        vel = np.sqrt(np.sum(vels, axis=0))
+        mass = data[(ptype, "particle_mass")]
+        vel_deposit = data.deposit(vel * mass, method = "sum")
+        norm = data.deposit(mass, method = "sum")
+        return vel_deposit / norm
+    add_field("%s_weighted_velocity_deposit" % (ptype, axis), 
+              function=particle_velocity, 
+              particle_type=False, take_log=False, units=r'cm/s',
+              display_name="%s Velocity" % name, 
+              validators=[ValidateSpatial()], projection_conversion='1')


https://bitbucket.org/yt_analysis/yt/commits/39b6f75e60ed/
Changeset:   39b6f75e60ed
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-24 03:46:36
Summary:     new ART particle deposit fields work
Affected #:  1 file

diff -r ef93e65cb9dbb35dcbb7deed9c1e5ad2566fc2f8 -r 39b6f75e60edbf3e7f2630da2571e19c94148f37 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -270,9 +270,9 @@
 for ptype, name in zip(ptypes, names):
     def particle_density(field, data):
         vol = data["CellVolume"]
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
-        pmass = dd[(ptype, "particle_mass")]
+        pmass = data[(ptype, "particle_mass")]
         mass = data.deposit(pos, [pmass], method = "sum")
         return mass / vol
     add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
@@ -283,7 +283,7 @@
 # Particle Mass Fields
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         mass = data.deposit(pos, method = "sum")
         return mass
@@ -296,7 +296,7 @@
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
         vol = data["CellVolume"]
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         count = data.deposit(pos, method = "count")
         return count / vol
@@ -308,7 +308,7 @@
 # Particle Number Fields
 for ptype, name in zip(ptypes, names):
     def particle_count(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         count = data.deposit(pos, method = "count")
         return count 
@@ -321,7 +321,7 @@
 for ptype, name in zip(ptypes, names):
     for axis in 'xyz':
         def particle_velocity(field, data):
-            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                    for ax in 'xyz'])
             vel = data[(ptype, "particle_velocity_%s" % axis)]
             vel_deposit = data.deposit(vel, method = "sum")
@@ -336,7 +336,7 @@
 for ptype, name in zip(ptypes, names):
     for axis in 'xyz':
         def particle_velocity_weighted(field, data):
-            pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                    for ax in 'xyz'])
             vel  = data[(ptype, "particle_velocity_%s" % axis)]
             mass = data[(ptype, "particle_mass")]
@@ -352,16 +352,16 @@
 # Particle Mass-weighted Velocity Magnitude Fields
 for ptype, name in zip(ptypes, names):
     def particle_velocity_weighted(field, data):
-        pos = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
-        vels = np.column_stack([dd[(ptype, "particle_position_%s" % ax)]
+        vels = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
                                for ax in 'xyz'])
         vel = np.sqrt(np.sum(vels, axis=0))
         mass = data[(ptype, "particle_mass")]
         vel_deposit = data.deposit(vel * mass, method = "sum")
         norm = data.deposit(mass, method = "sum")
         return vel_deposit / norm
-    add_field("%s_weighted_velocity_deposit" % (ptype, axis), 
+    add_field("%s_weighted_velocity_deposit" % (ptype), 
               function=particle_velocity, 
               particle_type=False, take_log=False, units=r'cm/s',
               display_name="%s Velocity" % name, 


https://bitbucket.org/yt_analysis/yt/commits/92a32f6c8c9f/
Changeset:   92a32f6c8c9f
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-24 08:33:31
Summary:     4x speedup in particle access via caching
Affected #:  1 file

diff -r 39b6f75e60edbf3e7f2630da2571e19c94148f37 -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,6 +42,8 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
+    cache = {}
+    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -68,85 +70,102 @@
                 cp += subset.cell_count
         return tr
 
+    def _get_mask(self, selector, ftype):
+        if ftype in self.masks.keys():
+            return self.masks[ftype]
+        pf = self.pf
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        x, y, z = rp(['x','y','z'])
+        dd = pf.domain_dimensions[0]
+        off = 1.0/dd
+        x, y, z = (t/dd - off for t in (x, y, z))
+        mask = selector.select_points(x, y, z)
+        self.masks[ftype] = mask
+        # save the particle positions if asked
+        for ax in 'xyz':
+            f = (ftype, "particle_position_%s" % ax)
+            self.cache[f] = vars()[ax]
+        return self.masks[ftype]
+
+    def _get_field(self,  mask, field):
+        if field in self.cache.keys():
+            return self.cache[field][mask].astype('f8')
+        tr = {}
+        ftype, fname = field
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, self.ls, ptmax)
+        npa = idxb - idxa
+        sizes = np.diff(np.concatenate(([0], self.ls)))
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        for i, ax in enumerate('xyz'):
+            if fname.startswith("particle_position_%s" % ax):
+                tr[field] = rp([ax])
+            if fname.startswith("particle_velocity_%s" % ax):
+                tr[field] = rp(['v'+ax])
+        if fname == "particle_mass":
+            a = 0
+            data = np.zeros(npa, dtype='f8')
+            for ptb, size, m in zip(pbool, sizes, self.ws):
+                if ptb:
+                    data[a:a+size] = m
+                    a += size
+            tr[field] = data
+        elif fname == "particle_index":
+            tr[field] = np.arange(idxa, idxb).astype('int64')
+        elif fname == "particle_type":
+            a = 0
+            data = np.zeros(npa, dtype='int')
+            for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                if ptb:
+                    data[a: a + size] = i
+                    a += size
+            tr[field] = data
+        if pbool[-1] and fname in particle_star_fields:
+            data = read_star_field(self.file_stars, field=fname)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            nstars = self.ls[-1]-self.ls[-2]
+            if nstars > 0:
+                temp[-nstars:] = data
+            tr[field] = temp
+        if fname == "particle_creation_time":
+            self.tb, self.ages, data = interpolate_ages(
+                tr[field][-nstars:],
+                self.file_stars,
+                self.tb,
+                self.ages,
+                self.pf.current_time)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            temp[-nstars:] = data
+            tr[field] = temp
+            del data
+        if tr == {}:
+            tr = dict((f, np.array([])) for f in fields)
+        self.cache[field] = tr[field].astype('f8')
+        return self.cache[field][mask]
+
     def _read_particle_selection(self, chunks, selector, fields):
         tr = {}
         fields_read = []
-        for chunk in chunks:
-            level = chunk.objs[0].domain.domain_level
-            pf = chunk.objs[0].domain.pf
-            masks = {}
-            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
-            sizes = np.diff(np.concatenate(([0], ls)))
-            ptmax = ws[-1]
-            npt = ls[-1]
-            nstars = ls[-1]-ls[-2]
-            file_particle = pf._file_particle_data
-            file_stars = pf._file_particle_stars
-            ftype_old = None
-            for field in fields:
-                if field in fields_read:
-                    continue
-                ftype, fname = field
-                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
-                npa = idxb-idxa
-                if not ftype_old == ftype:
-                    Nrow = pf.parameters["Nrow"]
-                    rp = lambda ax: read_particles(
-                        file_particle, Nrow, idxa=idxa,
-                        idxb=idxb, field=ax)
-                    x, y, z = (rp(ax) for ax in 'xyz')
-                    dd = pf.domain_dimensions[0]
-                    off = 1.0/dd
-                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
-                    mask = selector.select_points(x, y, z)
-                    size = mask.sum()
-                for i, ax in enumerate('xyz'):
-                    if fname.startswith("particle_position_%s" % ax):
-                        tr[field] = vars()[ax]
-                    if fname.startswith("particle_velocity_%s" % ax):
-                        tr[field] = rp('v'+ax)
-                if fname == "particle_mass":
-                    a = 0
-                    data = np.zeros(npa, dtype='f8')
-                    for ptb, size, m in zip(pbool, sizes, ws):
-                        if ptb:
-                            data[a:a+size] = m
-                            a += size
-                    tr[field] = data
-                elif fname == "particle_index":
-                    tr[field] = np.arange(idxa, idxb).astype('int64')
-                elif fname == "particle_type":
-                    a = 0
-                    data = np.zeros(npa, dtype='int')
-                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
-                        if ptb:
-                            data[a:a+size] = i
-                            a += size
-                    tr[field] = data
-                if pbool[-1] and fname in particle_star_fields:
-                    data = read_star_field(file_stars, field=fname)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    if nstars > 0:
-                        temp[-nstars:] = data
-                    tr[field] = temp
-                if fname == "particle_creation_time":
-                    self.tb, self.ages, data = interpolate_ages(
-                        tr[field][-nstars:],
-                        file_stars,
-                        self.tb,
-                        self.ages,
-                        pf.current_time)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    temp[-nstars:] = data
-                    tr[field] = temp
-                    del data
-                tr[field] = tr[field][mask].astype('f8')
-                ftype_old = ftype
-                fields_read.append(field)
-        if tr == {}:
-            tr = dict((f, np.array([])) for f in fields)
-        return tr
-
+        chunk = [c for c in chunks][0]
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
+        data = {}
+        #import pdb; pdb.set_trace()
+        for f in fields:
+            ftype, fname = f
+            mask = self._get_mask(selector, ftype)
+            data[f] =  self._get_field(mask, f)
+        return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):
     pbool = np.zeros(len(lspecies), dtype="bool")
@@ -361,27 +380,29 @@
     return ranges
 
 
-def read_particles(file, Nrow, idxa, idxb, field):
+def read_particles(file, Nrow, idxa, idxb, fields):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
     np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-    data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
-    ranges = get_ranges(skip, count, field, **kwargs)
-    data = None
-    for seek, this_count in ranges:
-        fh.seek(seek)
-        temp = np.fromfile(fh, count=this_count, dtype='>f4')
-        if data is None:
-            data = temp
-        else:
-            data = np.concatenate((data, temp))
+    arrs = []
+    for field in fields:
+        ranges = get_ranges(skip, count, field, **kwargs)
+        data = None
+        for seek, this_count in ranges:
+            fh.seek(seek)
+            temp = np.fromfile(fh, count=this_count, dtype='>f4')
+            if data is None:
+                data = temp
+            else:
+                data = np.concatenate((data, temp))
+        arrs.append(data.astype('f8'))
     fh.close()
-    return data
+    return arrs
 
 
 def read_star_field(file, field=None):


https://bitbucket.org/yt_analysis/yt/commits/14da5639265a/
Changeset:   14da5639265a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 22:45:34
Summary:     fixed mask caching with particles & chunks
Affected #:  2 files

diff -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 -r 14da5639265a0cf4e73a639e817aa28abd73146d yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -274,7 +274,8 @@
                                for ax in 'xyz'])
         pmass = data[(ptype, "particle_mass")]
         mass = data.deposit(pos, [pmass], method = "sum")
-        return mass / vol
+        dens = mass / vol
+        return dens
     add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
               particle_type=False, take_log=True, units=r'g/cm^{3}',
               display_name="%s Density" % name, 

diff -r 92a32f6c8c9f4ebaac7728192ba617c7c9c2f519 -r 14da5639265a0cf4e73a639e817aa28abd73146d yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -43,7 +43,6 @@
     _data_style = "art"
     tb, ages = None, None
     cache = {}
-    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -71,8 +70,6 @@
         return tr
 
     def _get_mask(self, selector, ftype):
-        if ftype in self.masks.keys():
-            return self.masks[ftype]
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
@@ -84,16 +81,15 @@
         off = 1.0/dd
         x, y, z = (t/dd - off for t in (x, y, z))
         mask = selector.select_points(x, y, z)
-        self.masks[ftype] = mask
         # save the particle positions if asked
         for ax in 'xyz':
             f = (ftype, "particle_position_%s" % ax)
             self.cache[f] = vars()[ax]
-        return self.masks[ftype]
+        return mask
 
-    def _get_field(self,  mask, field):
+    def _get_field(self,  field):
         if field in self.cache.keys():
-            return self.cache[field][mask].astype('f8')
+            return self.cache[field]
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
@@ -117,7 +113,7 @@
                     a += size
             tr[field] = data
         elif fname == "particle_index":
-            tr[field] = np.arange(idxa, idxb).astype('int64')
+            tr[field] = np.arange(idxa, idxb)
         elif fname == "particle_type":
             a = 0
             data = np.zeros(npa, dtype='int')
@@ -146,25 +142,24 @@
             del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        self.cache[field] = tr[field].astype('f8')
-        return self.cache[field][mask]
+        self.cache[field] = tr[field]
+        return self.cache[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
-        tr = {}
-        fields_read = []
-        chunk = [c for c in chunks][0]
-        self.pf = chunk.objs[0].domain.pf
-        self.ws = self.pf.parameters["wspecies"]
-        self.ls = self.pf.parameters["lspecies"]
-        self.file_particle = self.pf._file_particle_data
-        self.file_stars = self.pf._file_particle_stars
-        self.Nrow = self.pf.parameters["Nrow"]
-        data = {}
-        #import pdb; pdb.set_trace()
+        for chunk in chunks:
+            self.pf = chunk.objs[0].domain.pf
+            self.ws = self.pf.parameters["wspecies"]
+            self.ls = self.pf.parameters["lspecies"]
+            self.file_particle = self.pf._file_particle_data
+            self.file_stars = self.pf._file_particle_stars
+            self.Nrow = self.pf.parameters["Nrow"]
+            break
+        data = {f:np.array([]) for f in fields}
         for f in fields:
             ftype, fname = f
             mask = self._get_mask(selector, ftype)
-            data[f] =  self._get_field(mask, f)
+            arr = self._get_field(f)[mask].astype('f8')
+            data[f] = np.concatenate((arr, data[f]))
         return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):


https://bitbucket.org/yt_analysis/yt/commits/cf466a241e92/
Changeset:   cf466a241e92
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 22:48:25
Summary:     added particle types; fixes dd[('stars', 'particle_mass')] translating into ('all','particle_mass')
Affected #:  1 file

diff -r 14da5639265a0cf4e73a639e817aa28abd73146d -r cf466a241e92016d150cb4b78d1e5df2699d6357 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -136,6 +136,10 @@
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:
             self.parameter_file.particle_types = []
+        for ptype in self.parameter_file.particle_types:
+            for pfield in self.particle_field_list:
+                pfn = (ptype, pfield)
+                self.field_list.append(pfn)
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()


https://bitbucket.org/yt_analysis/yt/commits/effae4caa73a/
Changeset:   effae4caa73a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 23:39:17
Summary:     improved particle caching, down to 45s deposit
Affected #:  1 file

diff -r cf466a241e92016d150cb4b78d1e5df2699d6357 -r effae4caa73acd2ccc4177f275ecb13520327ac6 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -73,27 +73,21 @@
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
-        rp = lambda ax: read_particles(
-            self.file_particle, self.Nrow, idxa=idxa,
-            idxb=idxb, fields=ax)
-        x, y, z = rp(['x','y','z'])
-        dd = pf.domain_dimensions[0]
-        off = 1.0/dd
-        x, y, z = (t/dd - off for t in (x, y, z))
+        pstr = 'particle_position_%s'
+        x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        # save the particle positions if asked
-        for ax in 'xyz':
-            f = (ftype, "particle_position_%s" % ax)
-            self.cache[f] = vars()[ax]
         return mask
 
     def _get_field(self,  field):
         if field in self.cache.keys():
+            mylog.debug("Cached %s", str(field))
             return self.cache[field]
+        mylog.debug("Reading %s", str(field))
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
-        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, self.ls, ptmax)
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, 
+                                                  self.ls, ptmax)
         npa = idxb - idxa
         sizes = np.diff(np.concatenate(([0], self.ls)))
         rp = lambda ax: read_particles(
@@ -101,7 +95,9 @@
             idxb=idxb, fields=ax)
         for i, ax in enumerate('xyz'):
             if fname.startswith("particle_position_%s" % ax):
-                tr[field] = rp([ax])
+                dd = self.pf.domain_dimensions[0]
+                off = 1.0/dd
+                tr[field] = rp([ax])[0]/dd - off
             if fname.startswith("particle_velocity_%s" % ax):
                 tr[field] = rp(['v'+ax])
         if fname == "particle_mass":


https://bitbucket.org/yt_analysis/yt/commits/a619bc2e3f8a/
Changeset:   a619bc2e3f8a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-27 23:48:02
Summary:     masks now cached; next optimization will remove particles when deposited
Affected #:  1 file

diff -r effae4caa73acd2ccc4177f275ecb13520327ac6 -r a619bc2e3f8a6b80079be6dc86f7624b2add44aa yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -43,6 +43,7 @@
     _data_style = "art"
     tb, ages = None, None
     cache = {}
+    masks = {}
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -70,13 +71,17 @@
         return tr
 
     def _get_mask(self, selector, ftype):
+        key = (selector, ftype)
+        if key in self.masks.keys():
+            return self.masks[key]
         pf = self.pf
         ptmax = self.ws[-1]
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        return mask
+        self.masks[key] = mask
+        return self.masks[key]
 
     def _get_field(self,  field):
         if field in self.cache.keys():


https://bitbucket.org/yt_analysis/yt/commits/d1ae641d84e2/
Changeset:   d1ae641d84e2
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-28 06:33:23
Summary:     removed chunking for loop
Affected #:  1 file

diff -r a619bc2e3f8a6b80079be6dc86f7624b2add44aa -r d1ae641d84e2dbd75badebd26b0a06b137ebfdb9 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -147,14 +147,13 @@
         return self.cache[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
-        for chunk in chunks:
-            self.pf = chunk.objs[0].domain.pf
-            self.ws = self.pf.parameters["wspecies"]
-            self.ls = self.pf.parameters["lspecies"]
-            self.file_particle = self.pf._file_particle_data
-            self.file_stars = self.pf._file_particle_stars
-            self.Nrow = self.pf.parameters["Nrow"]
-            break
+        chunk = chunks.next()
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
         data = {f:np.array([]) for f in fields}
         for f in fields:
             ftype, fname = f


https://bitbucket.org/yt_analysis/yt/commits/fbdf1ecaecef/
Changeset:   fbdf1ecaecef
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-04 23:13:23
Summary:     added weighted mean deposit
Affected #:  1 file

diff -r d1ae641d84e2dbd75badebd26b0a06b137ebfdb9 -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -232,3 +232,39 @@
 
 deposit_std = StdParticleField
 
+
+cdef class WeightedMeanParticleField(ParticleDepositOperation):
+    # Deposit both mass * field and mass into two scalars
+    # then in finalize divide mass * field / mass
+    cdef np.float64_t *wf
+    cdef public object owf
+    cdef np.float64_t *w
+    cdef public object ow
+    def initialize(self):
+        self.owf = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.owf
+        self.wf = <np.float64_t*> wfarr.data
+        
+        self.ow = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.ow
+        self.w = <np.float64_t*> warr.data
+    
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
+                      ):
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
+        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        
+    def finalize(self):
+        return self.owf / self.ow
+
+deposit_weighted_mean= WeightedMeanParticleField
+


https://bitbucket.org/yt_analysis/yt/commits/4fb8998b8dca/
Changeset:   4fb8998b8dca
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:05:38
Summary:     fixed up the fields
Affected #:  4 files

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import numpy as np
+
+from yt.funcs import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -262,108 +264,112 @@
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
 
+# Modeled after the TIPSY / Gadget frontend particle deposit fields
+def _particle_functions(ptype, pname):
+    mass_name = "particle_mass"
+    def particle_pos(data, axes="xyz"):
+        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]\
+                                    for ax in axes])
+        if len(axes)==1:
+            return pos[0]
+        return pos
+
+    def particle_vel(data, axes="xyz"):
+        pos = np.column_stack([data[(ptype, "particle_velocity_%s" % ax)]\
+                                    for ax in axes])
+        if len(axes)==1:
+            return pos[0]
+        return pos
+
+    def particle_count(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, method = "count")
+        return d
+    
+    add_field("deposit_%s_count" % ptype,
+             function = particle_count,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Count}" % pname,
+             projection_conversion = '1')
+
+    def particle_mass(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        return d
+
+    add_field("deposit_%s_mass" % ptype,
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % pname,
+             units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}\/\mathrm{cm}",
+             projection_conversion = 'cm')
+
+    def particle_density(field, data):
+        print data.shape
+        pos = particle_pos(data)
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d /= data["CellVolume"]
+        return d
+
+    add_field("deposit_%s_density" % ptype,
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Density}" % pname,
+             units = r"\mathrm{g}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
+             projection_conversion = 'cm')
+
+    def particle_number_density(field, data):
+        pos = particle_pos(data)
+        d = data.deposit(pos, method = "count")
+        d /= data["CellVolume"]
+        return d
+
+    add_field("deposit_%s_number_density" % ptype,
+             function = particle_density,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Number Density}" % pname,
+             units = r"\mathrm{1}/\mathrm{cm}^{3}",
+             projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
+             projection_conversion = 'cm')
+
+    def particle_mass_velocity(field, data):
+        pos = particle_pos(data)
+        vel = particle_vel(data, ax) 
+        mass = data[ptype, mass_name]
+        d  = data.deposit(pos, [mass, vel], method = "weighted_mean")
+        d /= data.deposit(pos, [mass], method = "sum")
+        return d
+
+    add_field("deposit_%s_weighted_velocity" % ptype,
+             function = particle_mass,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass Weighted Velocity}" % pname,
+             units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}\/\mathrm{cm}",
+             projection_conversion = 'cm')
+
+    add_field((ptype, "ParticleMass"),
+            function = TranslationFunc((ptype, mass_name)),
+            particle_type = True,
+            units = r"\mathrm{g}")
+
+    def _ParticleMassMsun(field, data):
+        return data[ptype, mass_name].copy()
+    def _conv_Msun(data):
+        return 1.0/mass_sun_cgs
+
+    add_field((ptype, "ParticleMassMsun"),
+            function = _ParticleMassMsun,
+            convert_function = _conv_Msun,
+            particle_type = True,
+            units = r"\mathrm{M}_\odot")
+
 # Particle Deposition Fields
-ptypes = ["all", "darkmatter", "stars"]
-names  = ["Particle", "Dark Matter", "Stellar"]
+_ptypes = ["all", "darkmatter", "stars"]
+_pnames  = ["Particle", "Dark Matter", "Stellar"]
 
-# Particle Mass Density Fields
-for ptype, name in zip(ptypes, names):
-    def particle_density(field, data):
-        vol = data["CellVolume"]
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        pmass = data[(ptype, "particle_mass")]
-        mass = data.deposit(pos, [pmass], method = "sum")
-        dens = mass / vol
-        return dens
-    add_field("%s_mass_density_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'g/cm^{3}',
-              display_name="%s Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
+for _ptype, _pname in zip(_ptypes, _pnames):
+    _particle_functions(_ptype, _pname)
 
-# Particle Mass Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        mass = data.deposit(pos, method = "sum")
-        return mass
-    add_field("%s_mass_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Mass Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Number Density Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        vol = data["CellVolume"]
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        count = data.deposit(pos, method = "count")
-        return count / vol
-    add_field("%s_number_density_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Number Density" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Number Fields
-for ptype, name in zip(ptypes, names):
-    def particle_count(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        count = data.deposit(pos, method = "count")
-        return count 
-    add_field("%s_number_deposit" % ptype, function=particle_density, 
-              particle_type=False, take_log=True, units=r'1/cm^{3}',
-              display_name="%s Number" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Velocity Fields
-for ptype, name in zip(ptypes, names):
-    for axis in 'xyz':
-        def particle_velocity(field, data):
-            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                                   for ax in 'xyz'])
-            vel = data[(ptype, "particle_velocity_%s" % axis)]
-            vel_deposit = data.deposit(vel, method = "sum")
-            return vel_deposit
-        add_field("%s_velocity_%s_deposit" % (ptype, axis), 
-                  function=particle_velocity, 
-                  particle_type=False, take_log=False, units=r'cm/s',
-                  display_name="%s Velocity %s" % (name, axis.upper()), 
-                  validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Mass-weighted Velocity Fields
-for ptype, name in zip(ptypes, names):
-    for axis in 'xyz':
-        def particle_velocity_weighted(field, data):
-            pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                                   for ax in 'xyz'])
-            vel  = data[(ptype, "particle_velocity_%s" % axis)]
-            mass = data[(ptype, "particle_mass")]
-            vel_deposit = data.deposit(vel * mass, method = "sum")
-            norm = data.deposit(mass, method = "sum")
-            return vel_deposit / norm
-        add_field("%s_weighted_velocity_%s_deposit" % (ptype, axis), 
-                  function=particle_velocity, 
-                  particle_type=False, take_log=False, units=r'cm/s',
-                  display_name="%s Velocity %s" % (name, axis.upper()), 
-                  validators=[ValidateSpatial()], projection_conversion='1')
-
-# Particle Mass-weighted Velocity Magnitude Fields
-for ptype, name in zip(ptypes, names):
-    def particle_velocity_weighted(field, data):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        vels = np.column_stack([data[(ptype, "particle_position_%s" % ax)]
-                               for ax in 'xyz'])
-        vel = np.sqrt(np.sum(vels, axis=0))
-        mass = data[(ptype, "particle_mass")]
-        vel_deposit = data.deposit(vel * mass, method = "sum")
-        norm = data.deposit(mass, method = "sum")
-        return vel_deposit / norm
-    add_field("%s_weighted_velocity_deposit" % (ptype), 
-              function=particle_velocity, 
-              particle_type=False, take_log=False, units=r'cm/s',
-              display_name="%s Velocity" % name, 
-              validators=[ValidateSpatial()], projection_conversion='1')

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -44,6 +44,7 @@
     tb, ages = None, None
     cache = {}
     masks = {}
+    caching = False
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -72,7 +73,7 @@
 
     def _get_mask(self, selector, ftype):
         key = (selector, ftype)
-        if key in self.masks.keys():
+        if key in self.masks.keys() and self.caching:
             return self.masks[key]
         pf = self.pf
         ptmax = self.ws[-1]
@@ -80,11 +81,14 @@
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z)
-        self.masks[key] = mask
-        return self.masks[key]
+        if self.caching:
+            self.masks[key] = mask
+            return self.masks[key]
+        else:
+            return mask
 
     def _get_field(self,  field):
-        if field in self.cache.keys():
+        if field in self.cache.keys() and self.caching:
             mylog.debug("Cached %s", str(field))
             return self.cache[field]
         mylog.debug("Reading %s", str(field))
@@ -143,8 +147,11 @@
             del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        self.cache[field] = tr[field]
-        return self.cache[field]
+        if self.caching:
+            self.cache[field] = tr[field]
+            return self.cache[field]
+        else:
+            return tr[field]
 
     def _read_particle_selection(self, chunks, selector, fields):
         chunk = chunks.next()

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -246,7 +246,7 @@
         self.wf = <np.float64_t*> wfarr.data
         
         self.ow = np.zeros(self.nvals, dtype='float64')
-        cdef np.ndarray wfarr = self.ow
+        cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     
     @cython.cdivision(True)

diff -r fbdf1ecaecef42ca9857b6e1227cfc9fca68b7fc -r 4fb8998b8dca4254bd25c761241a477addfba81b yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -142,8 +142,10 @@
 elif exe_name in \
         ["mpi4py", "embed_enzo",
          "python"+sys.version[:3]+"-mpi"] \
-    or '_parallel' in dir(sys) \
-    or any(["ipengine" in arg for arg in sys.argv]):
-    parallel_capable = turn_on_parallelism()
+        or '_parallel' in dir(sys) \
+        or any(["ipengine" in arg for arg in sys.argv]) \
+        or any(["cluster-id" in arg for arg in sys.argv]):
+    #parallel_capable = turn_on_parallelism()
+    pass
 else:
     parallel_capable = False


https://bitbucket.org/yt_analysis/yt/commits/d55ca706eeb6/
Changeset:   d55ca706eeb6
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:39:10
Summary:     fixing velocity fields return
Affected #:  1 file

diff -r 4fb8998b8dca4254bd25c761241a477addfba81b -r d55ca706eeb69be62b094887d41ec708bd6cb53f yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -108,7 +108,7 @@
                 off = 1.0/dd
                 tr[field] = rp([ax])[0]/dd - off
             if fname.startswith("particle_velocity_%s" % ax):
-                tr[field] = rp(['v'+ax])
+                tr[field], = rp(['v'+ax])
         if fname == "particle_mass":
             a = 0
             data = np.zeros(npa, dtype='f8')


https://bitbucket.org/yt_analysis/yt/commits/04285b5f9f0b/
Changeset:   04285b5f9f0b
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:39:32
Summary:     implementing weighted mean deposit
Affected #:  1 file

diff -r d55ca706eeb69be62b094887d41ec708bd6cb53f -r 04285b5f9f0b4922a51586f013661f10095be8b5 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -306,7 +306,6 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        print data.shape
         pos = particle_pos(data)
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d /= data["CellVolume"]
@@ -334,21 +333,25 @@
              projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
              projection_conversion = 'cm')
 
-    def particle_mass_velocity(field, data):
-        pos = particle_pos(data)
-        vel = particle_vel(data, ax) 
-        mass = data[ptype, mass_name]
-        d  = data.deposit(pos, [mass, vel], method = "weighted_mean")
-        d /= data.deposit(pos, [mass], method = "sum")
-        return d
 
-    add_field("deposit_%s_weighted_velocity" % ptype,
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass Weighted Velocity}" % pname,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
+    for ax in "xyz":
+        def particle_mass_velocity(field, data, ax):
+            pos = particle_pos(data)
+            vel = particle_vel(data, ax) 
+            mass = data[ptype, mass_name]
+            d = data.deposit(pos, [vel, mass], method = "weighted_mean")
+            d[~np.isfinite(d)] = 0.0
+            return d
+
+        add_field("deposit_%s_weighted_velocity_%s" % (ptype, ax),
+                 function = lambda f, d: particle_mass_velocity(f, d, ax),
+                 validators = [ValidateSpatial()],
+                 display_name = "\\mathrm{%s Mass Weighted Velocity %s}" % \
+                                (pname, ax.upper()),
+                 units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
+                 projected_units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
+                 projection_conversion = '1',
+                 take_log=False)
 
     add_field((ptype, "ParticleMass"),
             function = TranslationFunc((ptype, mass_name)),


https://bitbucket.org/yt_analysis/yt/commits/f09af31e835a/
Changeset:   f09af31e835a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-05 00:49:05
Summary:     removed accidental changes to yt startup_tasks
Affected #:  1 file

diff -r 04285b5f9f0b4922a51586f013661f10095be8b5 -r f09af31e835ae26014d845c12df7f658f8bfdb2c yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -142,10 +142,8 @@
 elif exe_name in \
         ["mpi4py", "embed_enzo",
          "python"+sys.version[:3]+"-mpi"] \
-        or '_parallel' in dir(sys) \
-        or any(["ipengine" in arg for arg in sys.argv]) \
-        or any(["cluster-id" in arg for arg in sys.argv]):
-    #parallel_capable = turn_on_parallelism()
-    pass
+    or '_parallel' in dir(sys) \
+    or any(["ipengine" in arg for arg in sys.argv]):
+    parallel_capable = turn_on_parallelism()
 else:
     parallel_capable = False


https://bitbucket.org/yt_analysis/yt/commits/b9bede66ce55/
Changeset:   b9bede66ce55
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:00:38
Summary:     Merged particle_deposit.pyx; included weighted average
Affected #:  117 files

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,11 +4,15 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
+yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/particle_deposit.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -35,6 +39,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5156,3 +5156,5 @@
 0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
+a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
+954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -34,7 +35,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +77,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -170,6 +213,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +237,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +248,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +264,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +334,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -362,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -383,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -424,7 +490,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +552,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +580,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +634,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +652,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +670,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +707,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +720,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +739,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,14 +760,14 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-3.0-hg ] 
+    elif [ ! -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
         # a new webserver, we are now moving to a three-stage process.
         # First we clone the repo, but only up to r0.
-        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-3.0/ ./yt-3.0-hg 2>&1 ) 1>> ${LOG_FILE}
+        ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
     elif [ -e yt-3.0-hg ] 
@@ -714,7 +778,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +788,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +816,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +840,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +855,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -24,6 +24,13 @@
 """
 
 import numpy as np
+from yt.utilities.physical_constants import \
+    charge_proton_cgs, \
+    cm_per_km, \
+    km_per_cm, \
+    mass_electron_cgs, \
+    speed_of_light_cgs
+
 
 def voigt(a,u):
     """
@@ -167,10 +174,10 @@
     """
 
     ## constants
-    me = 1.6726231e-24 / 1836.        # grams mass electron 
-    e = 4.8032e-10                    # esu 
-    c = 2.99792456e5                  # km/s
-    ccgs = c * 1.e5                   # cm/s 
+    me = mass_electron_cgs              # grams mass electron 
+    e = charge_proton_cgs               # esu 
+    c = speed_of_light_cgs * km_per_cm  # km/s
+    ccgs = speed_of_light_cgs           # cm/s 
 
     ## shift lam0 by deltav
     if deltav is not None:
@@ -181,7 +188,7 @@
         lam1 = lam0
 
     ## conversions
-    vdop = vkms * 1.e5                # in cm/s
+    vdop = vkms * cm_per_km           # in cm/s
     lam0cgs = lam0 / 1.e8             # rest wavelength in cm
     lam1cgs = lam1 / 1.e8             # line wavelength in cm
     nu1 = ccgs / lam1cgs              # line freq in Hz

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -108,6 +108,7 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
+        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -192,6 +193,7 @@
                        get_los_velocity=False,
                        get_nearest_halo=False,
                        nearest_halo_fields=None,
+                       halo_list_file=None,
                        halo_profiler_parameters=None,
                        njobs=1, dynamic=False):
         """
@@ -229,6 +231,10 @@
             A list of fields to be calculated for the halos nearest to
             every lixel in the ray.
             Default: None.
+        halo_list_file : str
+            Filename containing a list of halo properties to be used 
+            for getting the nearest halos to absorbers.
+            Default: None.
         halo_profiler_parameters: dict
             A dictionary of parameters to be passed to the HaloProfiler
             to create the appropriate data used to get properties for
@@ -287,7 +293,7 @@
         >>> # Make the profiles.
         >>> halo_profiler_actions.append({'function': make_profiles,
         ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
         ...
         >>> halo_list = 'filtered'
         >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
@@ -305,6 +311,7 @@
         ...                   get_nearest_halo=True,
         ...                   nearest_halo_fields=['TotalMassMsun_100',
         ...                                        'RadiusMpc_100'],
+        ...                   halo_list_file='VirializedHalos.h5',
         ...                   halo_profiler_parameters=halo_profiler_parameters,
         ...                   get_los_velocity=True)
         
@@ -321,17 +328,18 @@
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
-        all_fields = [field for field in fields]
+        data_fields = fields[:]
+        all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_nearest_halo:
             all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
             all_fields.extend(['nearest_halo_%s' % field \
                                for field in nearest_halo_fields])
-            fields.extend(['x', 'y', 'z'])
+            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
-            fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -348,10 +356,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(my_segment['filename'],
-                                                **halo_profiler_parameters)
-
             # Load dataset for segment.
             pf = load(my_segment['filename'])
 
@@ -373,7 +377,7 @@
                                                  (sub_ray['dts'] *
                                                   vector_length(sub_segment[0],
                                                                 sub_segment[1]))])
-                for field in fields:
+                for field in data_fields:
                     sub_data[field] = np.concatenate([sub_data[field],
                                                       (sub_ray[field])])
 
@@ -400,6 +404,9 @@
 
             # Calculate distance to nearest object on halo list for each lixel.
             if get_nearest_halo:
+                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
+                                                filename=halo_list_file,
+                                                **halo_profiler_parameters)
                 sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
                                 fields=nearest_halo_fields))
                 sub_data['nearest_halo'] *= pf.units['mpccm']
@@ -434,58 +441,92 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, dataset, halo_profiler_kwargs=None,
+    def _get_halo_list(self, pf, fields=None, filename=None, 
+                       halo_profiler_kwargs=None,
                        halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the dataset."
+        "Load a list of halos for the pf."
+
+        if str(pf) in self.halo_lists:
+            return self.halo_lists[str(pf)]
+
+        if fields is None: fields = []
+
+        if filename is not None and \
+                os.path.exists(os.path.join(pf.fullpath, filename)):
+
+            my_filename = os.path.join(pf.fullpath, filename)
+            mylog.info("Loading halo list from %s." % my_filename)
+            my_list = {}
+            in_file = h5py.File(my_filename, 'r')
+            for field in fields + ['center']:
+                my_list[field] = in_file[field][:]
+            in_file.close()
+
+        else:
+            my_list = self._halo_profiler_list(pf, fields=fields,
+                                               halo_profiler_kwargs=halo_profiler_kwargs,
+                                               halo_profiler_actions=halo_profiler_actions,
+                                               halo_list=halo_list)
+
+        self.halo_lists[str(pf)] = my_list
+        return self.halo_lists[str(pf)]
+
+    def _halo_profiler_list(self, pf, fields=None, 
+                            halo_profiler_kwargs=None,
+                            halo_profiler_actions=None, halo_list='all'):
+        "Run the HaloProfiler to get the halo list."
 
         if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
         if halo_profiler_actions is None: halo_profiler_actions = []
 
-        hp = HaloProfiler(dataset, **halo_profiler_kwargs)
+        hp = HaloProfiler(pf, **halo_profiler_kwargs)
         for action in halo_profiler_actions:
             if not action.has_key('args'): action['args'] = ()
             if not action.has_key('kwargs'): action['kwargs'] = {}
             action['function'](hp, *action['args'], **action['kwargs'])
 
         if halo_list == 'all':
-            return_list = copy.deepcopy(hp.all_halos)
+            hp_list = copy.deepcopy(hp.all_halos)
         elif halo_list == 'filtered':
-            return_list = copy.deepcopy(hp.filtered_halos)
+            hp_list = copy.deepcopy(hp.filtered_halos)
         else:
             mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            return_list = None
+            hp_list = None
 
         del hp
+
+        # Create position array from halo list.
+        return_list = dict([(field, []) for field in fields + ['center']])
+        for halo in hp_list:
+            for field in fields + ['center']:
+                return_list[field].append(halo[field])
+        for field in fields + ['center']:
+            return_list[field] = np.array(return_list[field])
         return return_list
-
+        
     def _get_nearest_halo_properties(self, data, halo_list, fields=None):
         """
         Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and masses of nearest objects.
+        Return list of distances and other properties of nearest objects.
         """
 
         if fields is None: fields = []
+        field_data = dict([(field, np.zeros_like(data['x'])) \
+                           for field in fields])
+        nearest_distance = np.zeros_like(data['x'])
 
-        # Create position array from halo list.
-        halo_centers = np.array(map(lambda halo: halo['center'], halo_list))
-        halo_field_values = dict([(field, np.array(map(lambda halo: halo[field],
-                                                       halo_list))) \
-                                  for field in fields])
-
-        nearest_distance = np.zeros(data['x'].shape)
-        field_data = dict([(field, np.zeros(data['x'].shape)) \
-                           for field in fields])
-        for index in xrange(nearest_distance.size):
-            nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                            data['y'][index],
-                                                            data['z'][index]]),
-                                                  halo_centers))
-            nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                  data['y'][index],
-                                                                  data['z'][index]]),
-                                                        halo_centers[nearest])
-            for field in fields:
-                field_data[field][index] = halo_field_values[field][nearest]
+        if halo_list['center'].size > 0:
+            for index in xrange(nearest_distance.size):
+                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
+                                                                data['y'][index],
+                                                                data['z'][index]]),
+                                                      halo_list['center']))
+                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
+                                                                      data['y'][index],
+                                                                      data['z'][index]]),
+                                                            halo_list['center'][nearest])
+                for field in fields:
+                    field_data[field][index] = halo_list[field][nearest]
 
         return_data = {'nearest_halo': nearest_distance}
         for field in fields:

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -45,7 +45,10 @@
 from yt.utilities.performance_counters import \
     yt_counters, time_function
 from yt.utilities.math_utils import periodic_dist, get_rotation_matrix
-from yt.utilities.physical_constants import rho_crit_now, mass_sun_cgs
+from yt.utilities.physical_constants import \
+    rho_crit_now, \
+    mass_sun_cgs, \
+    TINY
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -60,8 +63,6 @@
     ParallelAnalysisInterface, \
     parallel_blocking_call
 
-TINY = 1.e-40
-
 class Halo(object):
     """
     A data source that returns particle information about the members of a
@@ -143,10 +144,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +160,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -1062,7 +1062,7 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
+        *dm_only* is True (default), only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
         self._data_source = data_source
@@ -1097,7 +1097,7 @@
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on creation time")
-            return (self._data_source["creation_time"] < 0)
+            return (self._data_source["creation_time"] <= 0)
         elif 'particle_type' in self._data_source.hierarchy.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
@@ -1428,7 +1429,7 @@
         fglob = path.join(basedir, 'halos_%d.*.bin' % n)
         files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        #Jc = mass_sun_cgs/ pf['mpchcm'] * 1e5
         Jc = 1.0
         length = 1.0 / pf['mpchcm']
         conv = dict(pos = np.array([length, length, length,
@@ -1457,7 +1458,7 @@
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "HOP"
@@ -1656,7 +1657,7 @@
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     """
     Run hop on *data_source* with a given density *threshold*.  If
-    *dm_only* is set, only run it on the dark matter particles, otherwise
+    *dm_only* is True (default), only run it on the dark matter particles, otherwise
     on all particles.  Returns an iterable collection of *HopGroup* items.
     """
     _name = "parallelHOP"
@@ -2008,13 +2009,11 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
-
+        ensure_dir_exists(filename)
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
+
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
@@ -2031,13 +2030,11 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
+
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         r"""Write out the particle data for halos to HDF5 files.
@@ -2058,10 +2055,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
-
+        ensure_dir_exists(prefix)
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2090,15 +2084,12 @@
         ellipsoid_data : bool.
             Whether to save the ellipsoidal information to the files.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
-
+        ensure_dir_exists(basename)
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
@@ -2131,7 +2122,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     resize : bool
         Turns load-balancing on or off. Default = True.
     kdtree : string
@@ -2460,7 +2451,7 @@
         The density threshold used when building halos. Default = 160.0.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -2565,7 +2556,7 @@
         applied.  Default = 0.2.
     dm_only : bool
         If True, only dark matter particles are used when building halos.
-        Default = False.
+        Default = True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True
@@ -337,6 +338,8 @@
                     hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
+        if not self.outbase:
+            self.outbase = os.getcwd()
         if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -31,6 +31,11 @@
     ParallelDummy, \
     ParallelAnalysisInterface, \
     parallel_blocking_call
+from yt.utilities.physical_constants import \
+    cm_per_mpc, \
+    mass_sun_cgs, \
+    rho_crit_now
+
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
@@ -212,7 +217,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """
@@ -259,7 +264,9 @@
         sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R));
         sigma_normalization = self.sigma8input / sigma8_unnorm;
 
-        rho0 = self.omega_matter0 * 2.78e+11; # in units of h^2 Msolar/Mpc^3
+        # rho0 in units of h^2 Msolar/Mpc^3
+        rho0 = self.omega_matter0 * \
+                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -294,7 +301,9 @@
     def dndm(self):
         
         # constants - set these before calling any functions!
-        rho0 = self.omega_matter0 * 2.78e+11; # in units of h^2 Msolar/Mpc^3
+        # rho0 in units of h^2 Msolar/Mpc^3
+        rho0 = self.omega_matter0 * \
+                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ b/yt/analysis_modules/halo_profiler/halo_filters.py
@@ -27,6 +27,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.utilities.physical_constants import TINY
 
 def VirialFilter(profile, overdensity_field='ActualOverdensity',
                  virial_overdensity=200., must_be_virialized=True,
@@ -105,7 +106,8 @@
 
     if use_log:
         for field in temp_profile.keys():
-            temp_profile[field] = np.log10(temp_profile[field])
+            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
+                                                   max(temp_profile[field])))
 
     virial = dict((field, 0.0) for field in fields)
 

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import gc
 import numpy as np
 import os
 import h5py
@@ -52,6 +53,9 @@
     parallel_blocking_call, \
     parallel_root_only, \
     parallel_objects
+from yt.utilities.physical_constants import \
+    mass_sun_cgs, \
+    rho_crit_now
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -583,7 +587,7 @@
 
             r_min = 2 * self.pf.h.get_smallest_dx() * self.pf['mpc']
             if (halo['r_max'] / r_min < PROFILE_RADIUS_THRESHOLD):
-                mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
+                mylog.debug("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
                 return None
 
             # get a sphere object to profile
@@ -629,6 +633,10 @@
                 g.clear_data()
             sphere.clear_data()
             del sphere
+            # Currently, this seems to be the only way to prevent large 
+            # halo profiling runs from running out of ram.
+            # It would be good to track down the real cause at some point.
+            gc.collect()
 
         return profile
 
@@ -951,12 +959,11 @@
         if 'ActualOverdensity' in profile.keys():
             return
 
-        rho_crit_now = 1.8788e-29 * self.pf.hubble_constant**2 # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + self.pf.current_redshift)**3.0)
+        rhocritnow = rho_crit_now * self.pf.hubble_constant**2 # g cm^-3
+        rho_crit = rhocritnow * ((1.0 + self.pf.current_redshift)**3.0)
         if not self.use_critical_density: rho_crit *= self.pf.omega_matter
 
-        profile['ActualOverdensity'] = (Msun2g * profile['TotalMassMsun']) / \
+        profile['ActualOverdensity'] = (mass_sun_cgs * profile['TotalMassMsun']) / \
             profile['CellVolume'] / rho_crit
 
     def _check_for_needed_profile_fields(self):

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -67,8 +67,10 @@
         self.prof = prof
 
     def plot_everything(self, dirname = None):
-        if dirname is None: dirname = "%s_profile_plots/" % (self.pf)
-        if not os.path.isdir(dirname): os.makedirs(dirname)
+        if not dirname:
+            dirname = "%s_profile_plots/" % (self.pf)
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
         import matplotlib; matplotlib.use("Agg")
         import pylab
         for field in self.prof.keys():

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,334 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR "layer" of the style described in
+    the radmc3d manual. Unlike yt grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = unique_id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    ...     return dust_to_gas*data["Density"]
+    >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    ...     return 10.0*data["Ones"]
+    >>> add_field("DustTemperature", function=_DustTemperature)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
+
+    This will create a field called "NumberDensityCO" and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    ...     return (x_co/mu_h)*data["Density"]
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
+    
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
+
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = "amr_grid.inp"
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the "amr_grid.inp" file that describes the mesh
+        radmc3d will use.
+
+        '''
+        dims = self.domain_dimensions
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
+            grid_file.write(s)
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
+            nx, ny, nz = layer.ActiveDimensions / 2
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
+        if isinstance(field, list):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operation are described in the
+            radmc3d manual.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -37,6 +37,9 @@
 from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
     BilinearFieldInterpolator
+from yt.utilities.physical_constants import \
+    erg_per_eV, \
+    keV_per_Hz
 
 xray_data_version = 1
 
@@ -101,7 +104,7 @@
                   np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
                                                [self.log_E[-1] - 0.5 * E_diff[-1],
                                                 self.log_E[-1] + 0.5 * E_diff[-1]]]))
-        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+        self.dnu = keV_per_Hz * np.diff(self.E_bins)
 
     def _get_interpolator(self, data, e_min, e_max):
         r"""Create an interpolator for total emissivity in a 
@@ -311,7 +314,7 @@
     """
 
     my_si = EmissivityIntegrator(filename=filename)
-    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+    energy_erg = np.power(10, my_si.log_E) * erg_per_eV
 
     em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
                                    e_min, e_max)

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -31,9 +31,13 @@
 from yt.utilities.cosmology import \
     Cosmology, \
     EnzoCosmology
+from yt.utilities.physical_constants import \
+    sec_per_year, \
+    speed_of_light_cgs
 
-YEAR = 3.155693e7 # sec / year
-LIGHT = 2.997925e10 # cm / s
+
+YEAR = sec_per_year # sec / year
+LIGHT = speed_of_light_cgs # cm / s
 
 class StarFormationRate(object):
     r"""Calculates the star formation rate for a given population of

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -35,6 +35,9 @@
 import numpy as np
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import \
+    kpc_per_cm, \
+    sec_per_year
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
@@ -524,7 +527,7 @@
                         for ax in 'xyz']).transpose()
         # Velocity is cm/s, we want it to be kpc/yr
         #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
-        vel *= 1.02268944e-14 
+        vel *= kpc_per_cm * sec_per_year
     if initial_mass is None:
         #in solar masses
         initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']

diff -r f09af31e835ae26014d845c12df7f658f8bfdb2c -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,9 +62,10 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold008',
     local_standard_filename = 'local001',
-    sketchfab_api_key = 'None'
+    sketchfab_api_key = 'None',
+    thread_field_detection = 'False'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/fd0cc68d6934/
Changeset:   fd0cc68d6934
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:02:32
Summary:     Fixed dict init
Affected #:  1 file

diff -r b9bede66ce55d25eeb40a9a6f8e1b5a30f7cea32 -r fd0cc68d6934e930985cac2128ca60d061f7db42 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,10 +42,15 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
-    cache = {}
-    masks = {}
+    cache = None
+    masks = None
     caching = False
 
+    def __init__(self):
+        self.cache = {}
+        self.masks = {}
+        super(IOHandlerART, self).__init__()
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives


https://bitbucket.org/yt_analysis/yt/commits/87069c899fe6/
Changeset:   87069c899fe6
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:29:43
Summary:     Switched to standard particle deposit fields
Affected #:  1 file

diff -r fd0cc68d6934e930985cac2128ca60d061f7db42 -r 87069c899fe61f86661a5d78813e8fbc95b1d943 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -42,6 +42,10 @@
 from yt.utilities.physical_constants import mass_sun_cgs
 from yt.frontends.art.definitions import *
 
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
+
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
@@ -264,115 +268,12 @@
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
 
-# Modeled after the TIPSY / Gadget frontend particle deposit fields
-def _particle_functions(ptype, pname):
-    mass_name = "particle_mass"
-    def particle_pos(data, axes="xyz"):
-        pos = np.column_stack([data[(ptype, "particle_position_%s" % ax)]\
-                                    for ax in axes])
-        if len(axes)==1:
-            return pos[0]
-        return pos
+# Particle Deposition Fields
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
 
-    def particle_vel(data, axes="xyz"):
-        pos = np.column_stack([data[(ptype, "particle_velocity_%s" % ax)]\
-                                    for ax in axes])
-        if len(axes)==1:
-            return pos[0]
-        return pos
-
-    def particle_count(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, method = "count")
-        return d
-    
-    add_field("deposit_%s_count" % ptype,
-             function = particle_count,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % pname,
-             projection_conversion = '1')
-
-    def particle_mass(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    add_field("deposit_%s_mass" % ptype,
-             function = particle_mass,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % pname,
-             units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}\/\mathrm{cm}",
-             projection_conversion = 'cm')
-
-    def particle_density(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["CellVolume"]
-        return d
-
-    add_field("deposit_%s_density" % ptype,
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % pname,
-             units = r"\mathrm{g}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{g}/\mathrm{cm}^{-2}",
-             projection_conversion = 'cm')
-
-    def particle_number_density(field, data):
-        pos = particle_pos(data)
-        d = data.deposit(pos, method = "count")
-        d /= data["CellVolume"]
-        return d
-
-    add_field("deposit_%s_number_density" % ptype,
-             function = particle_density,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Number Density}" % pname,
-             units = r"\mathrm{1}/\mathrm{cm}^{3}",
-             projected_units = r"\mathrm{1}/\mathrm{cm}^{-2}",
-             projection_conversion = 'cm')
-
-
-    for ax in "xyz":
-        def particle_mass_velocity(field, data, ax):
-            pos = particle_pos(data)
-            vel = particle_vel(data, ax) 
-            mass = data[ptype, mass_name]
-            d = data.deposit(pos, [vel, mass], method = "weighted_mean")
-            d[~np.isfinite(d)] = 0.0
-            return d
-
-        add_field("deposit_%s_weighted_velocity_%s" % (ptype, ax),
-                 function = lambda f, d: particle_mass_velocity(f, d, ax),
-                 validators = [ValidateSpatial()],
-                 display_name = "\\mathrm{%s Mass Weighted Velocity %s}" % \
-                                (pname, ax.upper()),
-                 units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
-                 projected_units = r"\mathrm{\mathrm{cm}/\mathrm{s}}",
-                 projection_conversion = '1',
-                 take_log=False)
-
-    add_field((ptype, "ParticleMass"),
-            function = TranslationFunc((ptype, mass_name)),
-            particle_type = True,
-            units = r"\mathrm{g}")
-
-    def _ParticleMassMsun(field, data):
-        return data[ptype, mass_name].copy()
-    def _conv_Msun(data):
-        return 1.0/mass_sun_cgs
-
-    add_field((ptype, "ParticleMassMsun"),
-            function = _ParticleMassMsun,
-            convert_function = _conv_Msun,
-            particle_type = True,
-            units = r"\mathrm{M}_\odot")
-
-# Particle Deposition Fields
-_ptypes = ["all", "darkmatter", "stars"]
-_pnames  = ["Particle", "Dark Matter", "Stellar"]
-
-for _ptype, _pname in zip(_ptypes, _pnames):
-    _particle_functions(_ptype, _pname)
-
+for _ptype in _ptypes:
+    particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              ARTFieldInfo)
+    particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
+                                   ARTFieldInfo)


https://bitbucket.org/yt_analysis/yt/commits/e68df96c341a/
Changeset:   e68df96c341a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:36:46
Summary:     Made the field list more explicit about gas fields
Affected #:  2 files

diff -r 87069c899fe61f86661a5d78813e8fbc95b1d943 -r e68df96c341a0a5f2526c57678121c816ceb8cac yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -46,6 +46,7 @@
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
         return d
+
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],

diff -r 87069c899fe61f86661a5d78813e8fbc95b1d943 -r e68df96c341a0a5f2526c57678121c816ceb8cac yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -124,9 +124,9 @@
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields +
-                              particle_star_fields)
-        self.field_list = list(self.field_list)
+        self.field_list = [("gas", f) for f in fluid_fields]
+        self.field_list += set(particle_fields + particle_star_fields \
+                               + fluid_fields)
         # now generate all of the possible particle fields
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']


https://bitbucket.org/yt_analysis/yt/commits/d2eb763a76c3/
Changeset:   d2eb763a76c3
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 02:49:49
Summary:     Changed particle_mass to have projected units of g. particle_mass_width has units of g-cm.
Affected #:  1 file

diff -r e68df96c341a0a5f2526c57678121c816ceb8cac -r d2eb763a76c30e0e97f835143272048dc5e3d6c9 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -56,13 +56,28 @@
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
+        dx = data["dx"]
+        return d / dx
 
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
              display_name = "\\mathrm{%s Mass}" % ptype,
              units = r"\mathrm{g}",
+             projected_units = r"\mathrm{g}",
+             projection_conversion = '1')
+
+
+    def particle_mass_width(field, data):
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        return d
+
+    registry.add_field(("deposit", "%s_mass_width" % ptype),
+             function = particle_mass_width,
+             validators = [ValidateSpatial()],
+             display_name = "\\mathrm{%s Mass}" % ptype,
+             units = r"\mathrm{g}",
              projected_units = r"\mathrm{g}\/\mathrm{cm}",
              projection_conversion = 'cm')
 


https://bitbucket.org/yt_analysis/yt/commits/d2832a1a86d3/
Changeset:   d2832a1a86d3
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 18:50:17
Summary:     reversing the confused particle mass changes i made
Affected #:  1 file

diff -r d2eb763a76c30e0e97f835143272048dc5e3d6c9 -r d2832a1a86d3b54e5af5c8c3639cd45726066347 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -56,28 +56,13 @@
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        dx = data["dx"]
-        return d / dx
+        return d
 
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
              display_name = "\\mathrm{%s Mass}" % ptype,
              units = r"\mathrm{g}",
-             projected_units = r"\mathrm{g}",
-             projection_conversion = '1')
-
-
-    def particle_mass_width(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        return d
-
-    registry.add_field(("deposit", "%s_mass_width" % ptype),
-             function = particle_mass_width,
-             validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
-             units = r"\mathrm{g}",
              projected_units = r"\mathrm{g}\/\mathrm{cm}",
              projection_conversion = 'cm')
 


https://bitbucket.org/yt_analysis/yt/commits/352fa26ed2ed/
Changeset:   352fa26ed2ed
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 19:22:15
Summary:     added mixed fluid-particle fields
Affected #:  1 file

diff -r d2832a1a86d3b54e5af5c8c3639cd45726066347 -r 352fa26ed2edf1052b9faa7dd2a37d99186a1e1a yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -277,3 +277,60 @@
                               ARTFieldInfo)
     particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
                                    ARTFieldInfo)
+
+# Mixed Fluid-Particle Fields
+
+def baryon_density(field, data):
+    pos = np.column_stack([data["stars", "particle_position_%s" % ax]
+        for ax in 'xyz'])
+    pmass = data["stars", "particle_mass"]
+    mass  = data.deposit(pos, [pmass], method = "sum")
+    mass += data["gas", "CellMass"]
+    vol   = data["CellVolume"]
+    return mass / vol
+
+ARTFieldInfo.add_field(("deposit", "baryon_density"),
+         function = baryon_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Baryon Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def total_density(field, data):
+    ptype = 'specie0'
+    rho = data["deposit", "baryon_density"]
+    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz'])
+    pmas = data[ptype, "particle_mass"]
+    mass = data.deposit(pos, [pmas], method = "sum")
+    vol  = data["gas", "CellVolume"]
+    return rho + (mass / vol)
+
+ARTFieldInfo.add_field(("deposit", "total_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Total Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+
+def multimass_density(field, data):
+    ptype = 'darkmatter'
+    rho = data["deposit", "baryon_density"]
+    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz'])
+    pmas = data[ptype, "particle_mass"]
+    mass = data.deposit(pos, [pmas], method = "sum")
+    vol   = data["gas", "CellVolume"]
+    return rho + mass / vol
+
+ARTFieldInfo.add_field(("deposit", "multimass_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Multimass Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+


https://bitbucket.org/yt_analysis/yt/commits/869e903e60c4/
Changeset:   869e903e60c4
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 19:22:35
Summary:     making particle_density derived from particle_mass
Affected #:  1 file

diff -r 352fa26ed2edf1052b9faa7dd2a37d99186a1e1a -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -67,8 +67,7 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        pos = data[ptype, coord_name]
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data["deposit", "%s_mass" % ptype]
         d /= data["CellVolume"]
         return d
 


https://bitbucket.org/yt_analysis/yt/commits/5f66b8559208/
Changeset:   5f66b8559208
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 21:36:38
Summary:     making Density fields explicit in field type
Affected #:  2 files

diff -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -67,7 +67,8 @@
              projection_conversion = 'cm')
 
     def particle_density(field, data):
-        d = data["deposit", "%s_mass" % ptype]
+        pos = data[ptype, coord_name]
+        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
         d /= data["CellVolume"]
         return d
 

diff -r 869e903e60c40b9e6939e94d19ddcbf37555f7d8 -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -286,7 +286,7 @@
     pmass = data["stars", "particle_mass"]
     mass  = data.deposit(pos, [pmass], method = "sum")
     mass += data["gas", "CellMass"]
-    vol   = data["CellVolume"]
+    vol   = data["gas", "CellVolume"]
     return mass / vol
 
 ARTFieldInfo.add_field(("deposit", "baryon_density"),
@@ -323,11 +323,11 @@
                            for ax in 'xyz'])
     pmas = data[ptype, "particle_mass"]
     mass = data.deposit(pos, [pmas], method = "sum")
-    vol   = data["gas", "CellVolume"]
+    vol   = data["gas","CellVolume"]
     return rho + mass / vol
 
 ARTFieldInfo.add_field(("deposit", "multimass_density"),
-         function = total_density,
+         function = multimass_density,
          validators = [ValidateSpatial()],
          display_name = "\\mathrm{Multimass Density}",
          units = r"\mathrm{g}/\mathrm{cm}^{3}",


https://bitbucket.org/yt_analysis/yt/commits/ba3e6dc5c90b/
Changeset:   ba3e6dc5c90b
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-14 21:52:46
Summary:     mixed densities now derive from individual field type densities
Affected #:  1 file

diff -r 5f66b8559208fd3aa2f3d15892ec0da91c39b589 -r ba3e6dc5c90bbcf47762f4c31da4ac0d7787c385 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -281,13 +281,9 @@
 # Mixed Fluid-Particle Fields
 
 def baryon_density(field, data):
-    pos = np.column_stack([data["stars", "particle_position_%s" % ax]
-        for ax in 'xyz'])
-    pmass = data["stars", "particle_mass"]
-    mass  = data.deposit(pos, [pmass], method = "sum")
-    mass += data["gas", "CellMass"]
-    vol   = data["gas", "CellVolume"]
-    return mass / vol
+    rho = data["deposit", "stars_density"]
+    rho += data["gas", "Density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "baryon_density"),
          function = baryon_density,
@@ -298,14 +294,9 @@
          projection_conversion = 'cm')
 
 def total_density(field, data):
-    ptype = 'specie0'
     rho = data["deposit", "baryon_density"]
-    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz'])
-    pmas = data[ptype, "particle_mass"]
-    mass = data.deposit(pos, [pmas], method = "sum")
-    vol  = data["gas", "CellVolume"]
-    return rho + (mass / vol)
+    rho += data["deposit", "specie0_density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "total_density"),
          function = total_density,
@@ -315,16 +306,10 @@
          projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
          projection_conversion = 'cm')
 
-
 def multimass_density(field, data):
-    ptype = 'darkmatter'
     rho = data["deposit", "baryon_density"]
-    pos = np.column_stack([data[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz'])
-    pmas = data[ptype, "particle_mass"]
-    mass = data.deposit(pos, [pmas], method = "sum")
-    vol   = data["gas","CellVolume"]
-    return rho + mass / vol
+    rho += data["deposit", "darkmatter_density"]
+    return rho
 
 ARTFieldInfo.add_field(("deposit", "multimass_density"),
          function = multimass_density,


https://bitbucket.org/yt_analysis/yt/commits/064dc07441c8/
Changeset:   064dc07441c8
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-15 01:48:09
Summary:     Forgot to turn on cacheing
Affected #:  1 file

diff -r ba3e6dc5c90bbcf47762f4c31da4ac0d7787c385 -r 064dc07441c86efd0a32d616b013fdd77a7a88d1 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -44,7 +44,7 @@
     tb, ages = None, None
     cache = None
     masks = None
-    caching = False
+    caching = True
 
     def __init__(self):
         self.cache = {}


https://bitbucket.org/yt_analysis/yt/commits/cea962acce98/
Changeset:   cea962acce98
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-20 00:49:46
Summary:     Merged in juxtaposicion/yt-3.0 (pull request #37)

NMSU-ART Enhancements
Affected #:  6 files

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -46,6 +46,7 @@
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
         return d
+
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -124,9 +124,9 @@
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields +
-                              particle_star_fields)
-        self.field_list = list(self.field_list)
+        self.field_list = [("gas", f) for f in fluid_fields]
+        self.field_list += set(particle_fields + particle_star_fields \
+                               + fluid_fields)
         # now generate all of the possible particle fields
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']
@@ -136,6 +136,10 @@
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:
             self.parameter_file.particle_types = []
+        for ptype in self.parameter_file.particle_types:
+            for pfield in self.particle_field_list:
+                pfn = (ptype, pfield)
+                self.field_list.append(pfn)
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import numpy as np
+
+from yt.funcs import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -40,6 +42,10 @@
 from yt.utilities.physical_constants import mass_sun_cgs
 from yt.frontends.art.definitions import *
 
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
+
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
@@ -218,6 +224,7 @@
               particle_type=True,
               convert_function=lambda x: x.convert("particle_mass"))
 
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr
@@ -260,3 +267,55 @@
     return data["particle_mass"]/mass_sun_cgs
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
+
+# Particle Deposition Fields
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
+
+for _ptype in _ptypes:
+    particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              ARTFieldInfo)
+    particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
+                                   ARTFieldInfo)
+
+# Mixed Fluid-Particle Fields
+
+def baryon_density(field, data):
+    rho = data["deposit", "stars_density"]
+    rho += data["gas", "Density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "baryon_density"),
+         function = baryon_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Baryon Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def total_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "specie0_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "total_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Total Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def multimass_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "darkmatter_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "multimass_density"),
+         function = multimass_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Multimass Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -42,6 +42,14 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
+    cache = None
+    masks = None
+    caching = True
+
+    def __init__(self):
+        self.cache = {}
+        self.masks = {}
+        super(IOHandlerART, self).__init__()
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -68,85 +76,103 @@
                 cp += subset.cell_count
         return tr
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _get_mask(self, selector, ftype):
+        key = (selector, ftype)
+        if key in self.masks.keys() and self.caching:
+            return self.masks[key]
+        pf = self.pf
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
+        pstr = 'particle_position_%s'
+        x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
+        mask = selector.select_points(x, y, z)
+        if self.caching:
+            self.masks[key] = mask
+            return self.masks[key]
+        else:
+            return mask
+
+    def _get_field(self,  field):
+        if field in self.cache.keys() and self.caching:
+            mylog.debug("Cached %s", str(field))
+            return self.cache[field]
+        mylog.debug("Reading %s", str(field))
         tr = {}
-        fields_read = []
-        for chunk in chunks:
-            level = chunk.objs[0].domain.domain_level
-            pf = chunk.objs[0].domain.pf
-            masks = {}
-            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
-            sizes = np.diff(np.concatenate(([0], ls)))
-            ptmax = ws[-1]
-            npt = ls[-1]
-            nstars = ls[-1]-ls[-2]
-            file_particle = pf._file_particle_data
-            file_stars = pf._file_particle_stars
-            ftype_old = None
-            for field in fields:
-                if field in fields_read:
-                    continue
-                ftype, fname = field
-                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
-                npa = idxb-idxa
-                if not ftype_old == ftype:
-                    Nrow = pf.parameters["Nrow"]
-                    rp = lambda ax: read_particles(
-                        file_particle, Nrow, idxa=idxa,
-                        idxb=idxb, field=ax)
-                    x, y, z = (rp(ax) for ax in 'xyz')
-                    dd = pf.domain_dimensions[0]
-                    off = 1.0/dd
-                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
-                    mask = selector.select_points(x, y, z)
-                    size = mask.sum()
-                for i, ax in enumerate('xyz'):
-                    if fname.startswith("particle_position_%s" % ax):
-                        tr[field] = vars()[ax]
-                    if fname.startswith("particle_velocity_%s" % ax):
-                        tr[field] = rp('v'+ax)
-                if fname == "particle_mass":
-                    a = 0
-                    data = np.zeros(npa, dtype='f8')
-                    for ptb, size, m in zip(pbool, sizes, ws):
-                        if ptb:
-                            data[a:a+size] = m
-                            a += size
-                    tr[field] = data
-                elif fname == "particle_index":
-                    tr[field] = np.arange(idxa, idxb).astype('int64')
-                elif fname == "particle_type":
-                    a = 0
-                    data = np.zeros(npa, dtype='int')
-                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
-                        if ptb:
-                            data[a:a+size] = i
-                            a += size
-                    tr[field] = data
-                if pbool[-1] and fname in particle_star_fields:
-                    data = read_star_field(file_stars, field=fname)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    if nstars > 0:
-                        temp[-nstars:] = data
-                    tr[field] = temp
-                if fname == "particle_creation_time":
-                    self.tb, self.ages, data = interpolate_ages(
-                        tr[field][-nstars:],
-                        file_stars,
-                        self.tb,
-                        self.ages,
-                        pf.current_time)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    temp[-nstars:] = data
-                    tr[field] = temp
-                    del data
-                tr[field] = tr[field][mask].astype('f8')
-                ftype_old = ftype
-                fields_read.append(field)
+        ftype, fname = field
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, 
+                                                  self.ls, ptmax)
+        npa = idxb - idxa
+        sizes = np.diff(np.concatenate(([0], self.ls)))
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        for i, ax in enumerate('xyz'):
+            if fname.startswith("particle_position_%s" % ax):
+                dd = self.pf.domain_dimensions[0]
+                off = 1.0/dd
+                tr[field] = rp([ax])[0]/dd - off
+            if fname.startswith("particle_velocity_%s" % ax):
+                tr[field], = rp(['v'+ax])
+        if fname == "particle_mass":
+            a = 0
+            data = np.zeros(npa, dtype='f8')
+            for ptb, size, m in zip(pbool, sizes, self.ws):
+                if ptb:
+                    data[a:a+size] = m
+                    a += size
+            tr[field] = data
+        elif fname == "particle_index":
+            tr[field] = np.arange(idxa, idxb)
+        elif fname == "particle_type":
+            a = 0
+            data = np.zeros(npa, dtype='int')
+            for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                if ptb:
+                    data[a: a + size] = i
+                    a += size
+            tr[field] = data
+        if pbool[-1] and fname in particle_star_fields:
+            data = read_star_field(self.file_stars, field=fname)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            nstars = self.ls[-1]-self.ls[-2]
+            if nstars > 0:
+                temp[-nstars:] = data
+            tr[field] = temp
+        if fname == "particle_creation_time":
+            self.tb, self.ages, data = interpolate_ages(
+                tr[field][-nstars:],
+                self.file_stars,
+                self.tb,
+                self.ages,
+                self.pf.current_time)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            temp[-nstars:] = data
+            tr[field] = temp
+            del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        return tr
+        if self.caching:
+            self.cache[field] = tr[field]
+            return self.cache[field]
+        else:
+            return tr[field]
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        chunk = chunks.next()
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
+        data = {f:np.array([]) for f in fields}
+        for f in fields:
+            ftype, fname = f
+            mask = self._get_mask(selector, ftype)
+            arr = self._get_field(f)[mask].astype('f8')
+            data[f] = np.concatenate((arr, data[f]))
+        return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):
     pbool = np.zeros(len(lspecies), dtype="bool")
@@ -361,27 +387,29 @@
     return ranges
 
 
-def read_particles(file, Nrow, idxa, idxb, field):
+def read_particles(file, Nrow, idxa, idxb, fields):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
     np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-    data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
-    ranges = get_ranges(skip, count, field, **kwargs)
-    data = None
-    for seek, this_count in ranges:
-        fh.seek(seek)
-        temp = np.fromfile(fh, count=this_count, dtype='>f4')
-        if data is None:
-            data = temp
-        else:
-            data = np.concatenate((data, temp))
+    arrs = []
+    for field in fields:
+        ranges = get_ranges(skip, count, field, **kwargs)
+        data = None
+        for seek, this_count in ranges:
+            fh.seek(seek)
+            temp = np.fromfile(fh, count=this_count, dtype='>f4')
+            if data is None:
+                data = temp
+            else:
+                data = np.concatenate((data, temp))
+        arrs.append(data.astype('f8'))
     fh.close()
-    return data
+    return arrs
 
 
 def read_star_field(file, field=None):

diff -r 1a59d021ddb8f04abba877b8b67e60e4e1d8c868 -r cea962acce9864de0213bbad0b67e03b37fe5ee2 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -336,3 +336,39 @@
         return self.ofield
 
 deposit_cic = CICDeposit
+
+cdef class WeightedMeanParticleField(ParticleDepositOperation):
+    # Deposit both mass * field and mass into two scalars
+    # then in finalize divide mass * field / mass
+    cdef np.float64_t *wf
+    cdef public object owf
+    cdef np.float64_t *w
+    cdef public object ow
+    def initialize(self):
+        self.owf = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.owf
+        self.wf = <np.float64_t*> wfarr.data
+        
+        self.ow = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray warr = self.ow
+        self.w = <np.float64_t*> warr.data
+    
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
+                      ):
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
+        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        
+    def finalize(self):
+        return self.owf / self.ow
+
+deposit_weighted_mean= WeightedMeanParticleField
+


https://bitbucket.org/yt_analysis/yt/commits/0d87f2e70ec6/
Changeset:   0d87f2e70ec6
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-06-21 01:10:10
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #53)

This small change to how buffers are filled fixes a terrible bug.
Affected #:  1 file

diff -r cea962acce9864de0213bbad0b67e03b37fe5ee2 -r 0d87f2e70ec6ddb462527ec5eafb25c41a88d5e9 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -342,6 +342,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.cdivision(True)
     def get_all(self, int count_only = 0, int style = 1):
         cdef int i, j, vi
         cdef int total = 0
@@ -391,6 +392,7 @@
                 count += self.count(node.children[i][j])
         return count
 
+    @cython.cdivision(True)
     cdef int fill(self, QuadTreeNode *node, 
                         np.int64_t curpos,
                         np.float64_t *px,
@@ -403,6 +405,8 @@
                         np.float64_t wtoadd,
                         np.int64_t level):
         cdef int i, j, n
+        cdef np.float64_t *vorig
+        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -422,6 +426,7 @@
         cdef np.int64_t added = 0
         if self.merged == 1:
             for i in range(self.nvals):
+                vorig[i] = vtoadd[i]
                 vtoadd[i] += node.val[i]
             wtoadd += node.weight_val
         elif self.merged == -1:
@@ -437,7 +442,7 @@
                         vtoadd, wtoadd, level + 1)
         if self.merged == 1:
             for i in range(self.nvals):
-                vtoadd[i] -= node.val[i]
+                vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
         return added
 


https://bitbucket.org/yt_analysis/yt/commits/3f01744f5cbf/
Changeset:   3f01744f5cbf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 04:27:09
Summary:     NMSU-ART loads up again, does not read data yet.
Affected #:  2 files

diff -r 08d4ff8c02043994d944f8bcd36f03c1d38aca45 -r 3f01744f5cbf9cb07ce0b8ffc05dbc01be7527af yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -20,7 +20,7 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-.
+
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
@@ -575,8 +575,7 @@
             self._level_oct_offsets, level,
             coarse_grid=self.pf.domain_dimensions[0],
             root_level=self.pf.root_level)
-        nocts_check = oct_handler.add(self.domain_id, level, nocts,
-                                      unitary_center, self.domain_id)
+        nocts_check = oct_handler.add(self.domain_id, level, unitary_center)
         assert(nocts_check == nocts)
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     nocts, level, oct_handler.nocts)
@@ -600,8 +599,7 @@
                            LL[2]:RL[2]:NX[2]*1j]
         root_fc = np.vstack([p.ravel() for p in root_fc]).T
         nocts_check = oct_handler.add(self.domain_id, level,
-                                      root_octs_side**3,
-                                      root_fc, self.domain_id)
+                                      root_fc)
         assert(oct_handler.nocts == root_fc.shape[0])
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     root_octs_side**3, 0, oct_handler.nocts)

diff -r 08d4ff8c02043994d944f8bcd36f03c1d38aca45 -r 3f01744f5cbf9cb07ce0b8ffc05dbc01be7527af yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -104,6 +104,12 @@
         self.max_domain = -1
         p = 0
         self.nocts = 0 # Increment when initialized
+        for i in range(3):
+            self.DLE[i] = domain_left_edge[i] #0
+            self.DRE[i] = domain_right_edge[i] #num_grid
+        self._initialize_root_mesh()
+
+    def _initialize_root_mesh(self):
         self.root_mesh = <Oct****> malloc(sizeof(void*) * self.nn[0])
         for i in range(self.nn[0]):
             self.root_mesh[i] = <Oct ***> malloc(sizeof(void*) * self.nn[1])
@@ -111,10 +117,6 @@
                 self.root_mesh[i][j] = <Oct **> malloc(sizeof(void*) * self.nn[2])
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
-        # We don't initialize the octs yet
-        for i in range(3):
-            self.DLE[i] = domain_left_edge[i] #0
-            self.DRE[i] = domain_right_edge[i] #num_grid
 
     def __dealloc__(self):
         free_octs(self.cont)
@@ -621,7 +623,7 @@
         for i in range(root_nodes):
             self.root_nodes[i].key = -1
             self.root_nodes[i].node = NULL
-        
+
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.
@@ -711,6 +713,10 @@
 
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+        OctreeContainer.__init__(self, domain_dimensions,
+            domain_left_edge, domain_right_edge)
+
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -798,3 +804,25 @@
                             local_filled += 1
         return local_filled
 
+    def allocate_domains(self, domain_counts):
+        cdef int count, i
+        cdef OctAllocationContainer *cur = self.cont
+        assert(cur == NULL)
+        self.max_domain = len(domain_counts) # 1-indexed
+        self.domains = <OctAllocationContainer **> malloc(
+            sizeof(OctAllocationContainer *) * len(domain_counts))
+        for i, count in enumerate(domain_counts):
+            cur = allocate_octs(count, cur)
+            if self.cont == NULL: self.cont = cur
+            self.domains[i] = cur
+
+    cdef Oct* next_root(self, int domain_id, int ind[3]):
+        cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        if next != NULL: return next
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        if cont.n_assigned >= cont.n: raise RuntimeError
+        next = &cont.my_octs[cont.n_assigned]
+        cont.n_assigned += 1
+        self.root_mesh[ind[0]][ind[1]][ind[2]] = next
+        self.nocts += 1
+        return next


https://bitbucket.org/yt_analysis/yt/commits/eadc1ed9c34a/
Changeset:   eadc1ed9c34a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 04:56:15
Summary:     Continuing to port NMSU-ART
Affected #:  2 files

diff -r 3f01744f5cbf9cb07ce0b8ffc05dbc01be7527af -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -106,14 +106,15 @@
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l)
-                        for l in range(self.pf.max_level)]
-        self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
-        self.total_octs = sum(self.octs_per_domain)
         self.oct_handler = ARTOctreeContainer(
             self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
+        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l,
+                                      self.oct_handler)
+                        for l in range(self.pf.max_level)]
+        self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
+        self.total_octs = sum(self.octs_per_domain)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
         for domain in self.domains:
@@ -150,28 +151,21 @@
         """
         if getattr(dobj, "_chunk_info", None) is None:
             # Get all octs within this oct handler
-            mask = dobj.selector.select_octs(self.oct_handler)
-            if mask.sum() == 0:
-                mylog.debug("Warning: selected zero octs")
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            # For all domains, figure out how many counts we have
-            # and build a subset=mask of domains
-            subsets = []
-            for d, c in zip(self.domains, counts):
-                if c < 1:
-                    continue
-                subset = ARTDomainSubset(d, mask, c, d.domain_level)
-                subsets.append(subset)
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
+            subsets = [ARTDomainSubset(base_region, domain, self.parameter_file)
+                       for domain in domains]
             dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We pass the chunk both the current chunk and list of chunks,
         # as well as the referring data source
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -180,9 +174,7 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            yield YTDataChunk(dobj, "spatial", [g], None)
 
     def _chunk_io(self, dobj):
         """
@@ -193,7 +185,7 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+            yield YTDataChunk(dobj, "io", [subset], None)
 
 
 class ARTStaticOutput(StaticOutput):
@@ -445,9 +437,9 @@
         return False
 
 class ARTDomainSubset(OctreeSubset):
-    def __init__(self, domain, mask, cell_count, domain_level):
-        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
-        self.domain_level = domain_level
+    def __init__(self, base_region, domain, pf):
+        super(ARTDomainSubset, self).__init__(base_region, domain, pf)
+        self.domain_level = domain.domain_level
 
     def fill_root(self, content, ftfields):
         """
@@ -503,7 +495,6 @@
                                                nocts_filling)
         return dest
 
-
 class ARTDomainFile(object):
     """
     Read in the AMR, left/right edges, fill out the octhandler
@@ -514,7 +505,7 @@
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self, pf, domain_id, nvar, level):
+    def __init__(self, pf, domain_id, nvar, level, oct_handler):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
@@ -522,6 +513,7 @@
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
+        self.oct_handler = oct_handler
 
     @property
     def level_count(self):
@@ -618,3 +610,10 @@
             return self._last_mask.sum()
         self.select(selector)
         return self.count(selector)
+
+    def included(self, selector):
+        return True
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
+        domain_ids = self.pf.h.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids

diff -r 3f01744f5cbf9cb07ce0b8ffc05dbc01be7527af -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -230,6 +230,20 @@
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
         return cur
 
+    def domain_identify(self, SelectorObject selector):
+        cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
+        domain_mask = np.zeros(self.max_domain, dtype="uint8")
+        cdef OctVisitorData data
+        data.array = domain_mask.data
+        data.domain = -1
+        self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
+        cdef int i
+        domain_ids = []
+        for i in range(self.max_domain):
+            if domain_mask[i] == 1:
+                domain_ids.append(i+1)
+        return domain_ids
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -546,20 +560,6 @@
             selector.recursively_visit_octs(
                 o, pos, dds, 0, func, data, vc)
 
-    def domain_identify(self, SelectorObject selector):
-        cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
-        domain_mask = np.zeros(self.max_domain, dtype="uint8")
-        cdef OctVisitorData data
-        data.array = domain_mask.data
-        data.domain = -1
-        self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
-        cdef int i
-        domain_ids = []
-        for i in range(self.max_domain):
-            if domain_mask[i] == 1:
-                domain_ids.append(i+1)
-        return domain_ids
-
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0 # We no longer have a domain offset.
 


https://bitbucket.org/yt_analysis/yt/commits/6f78ec8a3a59/
Changeset:   6f78ec8a3a59
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 06:00:32
Summary:     Refactoring to re-enable NMSU-ART.
Affected #:  6 files

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -452,22 +452,18 @@
         oct_handler = self.oct_handler
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in ftfields]
-        level_offset = 0
         field_idxs = [all_fields.index(f) for f in fields]
-        dest = {}
-        for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')-1.
         level = self.domain_level
         source = {}
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
+
         for field, i in zip(fields, field_idxs):
             temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
                               order='F').astype('float64').T
             source[field] = temp
-        level_offset += oct_handler.fill_level_from_grid(
-            self.domain.domain_id,
-            level, dest, source, self.mask, level_offset)
+        dest = oct_handler.fill_level_from_grid(
+            self.selector, self.domain_id, source)
         return dest
 
     def fill_level(self, content, ftfields):
@@ -596,21 +592,6 @@
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     root_octs_side**3, 0, oct_handler.nocts)
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
-
-    def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None:
-                return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
-
     def included(self, selector):
         return True
         if getattr(selector, "domain_id", None) is not None:

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -30,6 +30,7 @@
 import os
 import os.path
 
+from yt.funcs import *
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.lib as au
@@ -47,7 +48,7 @@
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
-        tr = dict((f, np.empty(size, dtype='float64')) for f in fields)
+        tr = defaultdict(list)
         cp = 0
         for chunk in chunks:
             for subset in chunk.objs:
@@ -60,13 +61,16 @@
                 else:
                     rv = subset.fill_level(f, fields)
                 for ft, f in fields:
+                    d = rv.pop(f)
                     mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
-                                subset.domain_level,
-                                f, subset.cell_count, rv[f].min(), rv[f].max(),
-                                cp, cp+subset.cell_count)
-                    tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
+                                subset.domain_level, f, d.size, d.min(), d.max(),
+                                cp, cp+d.size)
+                    tr[(ft, f)].append(d)
                 cp += subset.cell_count
-        return tr
+        d = {}
+        for k in tr.keys():
+            d[k] = np.concatenate(tr.pop(k))
+        return d
 
     def _read_particle_selection(self, chunks, selector, fields):
         tr = {}

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -51,6 +51,7 @@
 
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
+    cdef OctAllocationContainer **domains
     cdef Oct ****root_mesh
     cdef int partial_coverage
     cdef int nn[3]
@@ -67,15 +68,14 @@
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
                         OctVisitorData *data)
+    cdef Oct *next_root(self, int domain_id, int ind[3])
+    cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
-    cdef OctAllocationContainer **domains
     cdef OctKey *root_nodes
     cdef void *tree_root
     cdef int num_root
     cdef int max_root
-    cdef Oct *next_root(self, int domain_id, int ind[3])
-    cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
 
 cdef extern from "search.h" nogil:
     void *tsearch(const void *key, void **rootp,

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -480,6 +480,97 @@
         self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def add(self, int curdom, int curlevel,
+            np.ndarray[np.float64_t, ndim=2] pos,
+            int skip_boundary = 1):
+        cdef int level, no, p, i, j, k, ind[3]
+        cdef Oct *cur, *next = NULL
+        cdef np.float64_t pp[3], cp[3], dds[3]
+        no = pos.shape[0] #number of octs
+        if curdom > self.max_domain: return 0
+        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
+        cdef int initial = cont.n_assigned
+        cdef int in_boundary = 0
+        # How do we bootstrap ourselves?
+        for p in range(no):
+            #for every oct we're trying to add find the 
+            #floating point unitary position on this level
+            in_boundary = 0
+            for i in range(3):
+                pp[i] = pos[p, i]
+                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+                ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
+                cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+                if ind[i] < 0 or ind[i] >= self.nn[i]:
+                    in_boundary = 1
+            if skip_boundary == in_boundary == 1: continue
+            cur = self.next_root(curdom, ind)
+            if cur == NULL: raise RuntimeError
+            # Now we find the location we want
+            # Note that RAMSES I think 1-findiceses levels, but we don't.
+            for level in range(curlevel):
+                # At every level, find the cell this oct
+                # lives inside
+                for i in range(3):
+                    #as we get deeper, oct size halves
+                    dds[i] = dds[i] / 2.0
+                    if cp[i] > pp[i]: 
+                        ind[i] = 0
+                        cp[i] -= dds[i]/2.0
+                    else:
+                        ind[i] = 1
+                        cp[i] += dds[i]/2.0
+                # Check if it has not been allocated
+                cur = self.next_child(curdom, ind, cur)
+            # Now we should be at the right level
+            cur.domain = curdom
+            cur.file_ind = p
+        return cont.n_assigned - initial
+
+    def allocate_domains(self, domain_counts):
+        cdef int count, i
+        cdef OctAllocationContainer *cur = self.cont
+        assert(cur == NULL)
+        self.max_domain = len(domain_counts) # 1-indexed
+        self.domains = <OctAllocationContainer **> malloc(
+            sizeof(OctAllocationContainer *) * len(domain_counts))
+        for i, count in enumerate(domain_counts):
+            cur = allocate_octs(count, cur)
+            if self.cont == NULL: self.cont = cur
+            self.domains[i] = cur
+
+    cdef Oct* next_root(self, int domain_id, int ind[3]):
+        cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        if next != NULL: return next
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        if cont.n_assigned >= cont.n: raise RuntimeError
+        next = &cont.my_octs[cont.n_assigned]
+        cont.n_assigned += 1
+        self.root_mesh[ind[0]][ind[1]][ind[2]] = next
+        self.nocts += 1
+        return next
+
+    cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
+        cdef int i
+        cdef Oct *next = NULL
+        if parent.children != NULL:
+            next = parent.children[cind(ind[0],ind[1],ind[2])]
+        else:
+            parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                parent.children[i] = NULL
+        if next != NULL: return next
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        if cont.n_assigned >= cont.n: raise RuntimeError
+        next = &cont.my_octs[cont.n_assigned]
+        cont.n_assigned += 1
+        parent.children[cind(ind[0],ind[1],ind[2])] = next
+        self.nocts += 1
+        return next
+
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
     ao = <OctKey *>a
@@ -589,35 +680,8 @@
         self.nocts += 1
         return next
 
-    cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent):
-        cdef int i
-        cdef Oct *next = NULL
-        if parent.children != NULL:
-            next = parent.children[cind(ind[0],ind[1],ind[2])]
-        else:
-            parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
-            for i in range(8):
-                parent.children[i] = NULL
-        if next != NULL: return next
-        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
-        if cont.n_assigned >= cont.n: raise RuntimeError
-        next = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        parent.children[cind(ind[0],ind[1],ind[2])] = next
-        self.nocts += 1
-        return next
-
     def allocate_domains(self, domain_counts, int root_nodes):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
+        OctreeContainer.allocate_domains(domain_counts)
         self.root_nodes = <OctKey*> malloc(sizeof(OctKey) * root_nodes)
         self.max_root = root_nodes
         for i in range(root_nodes):
@@ -633,56 +697,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def add(self, int curdom, int curlevel,
-            np.ndarray[np.float64_t, ndim=2] pos,
-            int skip_boundary = 1):
-        cdef int level, no, p, i, j, k, ind[3]
-        cdef Oct *cur, *next = NULL
-        cdef np.float64_t pp[3], cp[3], dds[3]
-        no = pos.shape[0] #number of octs
-        if curdom > self.max_domain: return 0
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int initial = cont.n_assigned
-        cdef int in_boundary = 0
-        # How do we bootstrap ourselves?
-        for p in range(no):
-            #for every oct we're trying to add find the 
-            #floating point unitary position on this level
-            in_boundary = 0
-            for i in range(3):
-                pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-                ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-                cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-                if ind[i] < 0 or ind[i] >= self.nn[i]:
-                    in_boundary = 1
-            if skip_boundary == in_boundary == 1: continue
-            cur = self.next_root(curdom, ind)
-            if cur == NULL: raise RuntimeError
-            # Now we find the location we want
-            # Note that RAMSES I think 1-findiceses levels, but we don't.
-            for level in range(curlevel):
-                # At every level, find the cell this oct
-                # lives inside
-                for i in range(3):
-                    #as we get deeper, oct size halves
-                    dds[i] = dds[i] / 2.0
-                    if cp[i] > pp[i]: 
-                        ind[i] = 0
-                        cp[i] -= dds[i]/2.0
-                    else:
-                        ind[i] = 1
-                        cp[i] += dds[i]/2.0
-                # Check if it has not been allocated
-                cur = self.next_child(curdom, ind, cur)
-            # Now we should be at the right level
-            cur.domain = curdom
-            cur.file_ind = p
-        return cont.n_assigned - initial
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def fill_level(self, int domain, int level, dest_fields, source_fields,
                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
         cdef np.ndarray[np.float64_t, ndim=2] source
@@ -711,11 +725,7 @@
                             local_filled += 1
         return local_filled
 
-cdef class ARTOctreeContainer(RAMSESOctreeContainer):
-
-    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
-        OctreeContainer.__init__(self, domain_dimensions,
-            domain_left_edge, domain_right_edge)
+cdef class ARTOctreeContainer(OctreeContainer):
 
     @cython.boundscheck(True)
     @cython.wraparound(False)
@@ -758,14 +768,11 @@
                             local_filled += 1
         return local_filled
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fill_level_from_grid(self, int domain, int level, dest_fields, 
-                             source_fields, 
-                             np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                             int offset):
+    def fill_level_from_grid(self, SelectorObject selector,
+                             int domain_id, source_fields):
         #Fill  level, but instead of assuming that the source
         #order is that of the oct order, we look up the oct position
         #and fill its children from the the source field
@@ -777,32 +784,21 @@
         # Note that the .pos[0] etc calls need to be uncommented.
         cdef np.ndarray[np.float64_t, ndim=3] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        cdef np.int64_t ox,oy,oz
-        for key in dest_fields:
+        cdef OctVisitorData data
+        cdef void *p[2]
+        num_cells = selector.count_oct_cells(self, domain_id)
+        dest_fields = {}
+        for key in source_fields:
+            dest_fields[key] = dest = \
+                np.zeros(num_cells, dtype="float64")
+            data.index = 0
             local_filled = 0
-            dest = dest_fields[key]
             source = source_fields[key]
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                # TODO: Uncomment this!
-                #if o.level != level: continue
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.domain_ind, ii] == 0: continue
-                            #ox = (o.pos[0] << 1) + i
-                            #oy = (o.pos[1] << 1) + j
-                            #oz = (o.pos[2] << 1) + k
-                            dest[local_filled + offset] = source[ox,oy,oz]
-                            local_filled += 1
-        return local_filled
+            p[0] = source.data
+            p[1] = dest.data
+            data.array = &p
+            self.visit_all_octs(selector, oct_visitors.fill_from_file, &data)
+        return dest_fields
 
     def allocate_domains(self, domain_counts):
         cdef int count, i
@@ -816,13 +812,3 @@
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
 
-    cdef Oct* next_root(self, int domain_id, int ind[3]):
-        cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        if next != NULL: return next
-        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
-        if cont.n_assigned >= cont.n: raise RuntimeError
-        next = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        self.root_mesh[ind[0]][ind[1]][ind[2]] = next
-        self.nocts += 1
-        return next

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -60,6 +60,7 @@
 cdef oct_visitor_function copy_array_i64
 cdef oct_visitor_function identify_octs
 cdef oct_visitor_function assign_domain_ind
+cdef oct_visitor_function fill_from_file
 
 cdef inline int cind(int i, int j, int k):
     return (((i*2)+j)*2+k)

diff -r eadc1ed9c34a9069e9121b59d3b70d2b7156f26a -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -151,3 +151,11 @@
 cdef void assign_domain_ind(Oct *o, OctVisitorData *data, np.uint8_t selected):
     o.domain_ind = data.global_index
     data.index += 1
+
+cdef void fill_from_file(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
+    # There are this many records between "octs"
+    cdef np.float64_t **p = <np.float64_t**> data.array
+    p[1][data.index] = p[0][o.file_ind + oind(data)]
+    data.index += 1
+


https://bitbucket.org/yt_analysis/yt/commits/9e27d3a8e379/
Changeset:   9e27d3a8e379
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 06:20:18
Summary:     Specifying min/max level fixes the ART selection.
Affected #:  2 files

diff -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 -r 9e27d3a8e379927a92e4787119bad3c29c37b061 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -439,6 +439,8 @@
 class ARTDomainSubset(OctreeSubset):
     def __init__(self, base_region, domain, pf):
         super(ARTDomainSubset, self).__init__(base_region, domain, pf)
+        self.min_level = domain.domain_level
+        self.max_level = domain.domain_level
         self.domain_level = domain.domain_level
 
     def fill_root(self, content, ftfields):

diff -r 6f78ec8a3a59b676d3fe3de5a6f45ef5798e9e37 -r 9e27d3a8e379927a92e4787119bad3c29c37b061 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -727,6 +727,10 @@
 
 cdef class ARTOctreeContainer(OctreeContainer):
 
+    def __init__(self, *args, **kwargs):
+        self.partial_coverage = 1
+        OctreeContainer.__init__(self, *args, **kwargs)
+
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/9d53024f1c04/
Changeset:   9d53024f1c04
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 14:25:04
Summary:     Beginning unification of NMSU-ART domains.
Affected #:  3 files

diff -r 9e27d3a8e379927a92e4787119bad3c29c37b061 -r 9d53024f1c049fc9e151e068fecc039cf42290c4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -110,9 +110,8 @@
             self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
-        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l,
-                                      self.oct_handler)
-                        for l in range(self.pf.max_level)]
+        self.domains = [ARTDomainFile(self.parameter_file, 0, nv, l,
+                                      self.oct_handler)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
         mylog.debug("Allocating %s octs", self.total_octs)
@@ -439,8 +438,6 @@
 class ARTDomainSubset(OctreeSubset):
     def __init__(self, base_region, domain, pf):
         super(ARTDomainSubset, self).__init__(base_region, domain, pf)
-        self.min_level = domain.domain_level
-        self.max_level = domain.domain_level
         self.domain_level = domain.domain_level
 
     def fill_root(self, content, ftfields):
@@ -455,7 +452,6 @@
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in ftfields]
         field_idxs = [all_fields.index(f) for f in fields]
-        level = self.domain_level
         source = {}
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
@@ -503,11 +499,10 @@
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self, pf, domain_id, nvar, level, oct_handler):
+    def __init__(self, pf, nvar, level, oct_handler):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
-        self.domain_level = level
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
@@ -519,7 +514,7 @@
         if self._level_count is not None:
             return self._level_count
         self.level_offsets
-        return self._level_count[self.domain_level]
+        return self._level_count
 
     @property
     def level_child_offsets(self):
@@ -559,25 +554,25 @@
         """
         self.level_offsets
         f = open(self.pf._file_amr, "rb")
-        level = self.domain_level
-        unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
-            f,
-            self._level_oct_offsets, level,
-            coarse_grid=self.pf.domain_dimensions[0],
-            root_level=self.pf.root_level)
-        nocts_check = oct_handler.add(self.domain_id, level, unitary_center)
-        assert(nocts_check == nocts)
-        mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                    nocts, level, oct_handler.nocts)
+        for level in range(self.pf.max_level + 1):
+            unitary_center, fl, iocts, nocts, root_level = \
+                _read_art_level_info( f,
+                    self._level_oct_offsets, level,
+                    coarse_grid=self.pf.domain_dimensions[0],
+                    root_level=self.pf.root_level)
+            nocts_check = oct_handler.add(self.domain_id, level,
+                                          unitary_center)
+            assert(nocts_check == nocts)
+            mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
+                        nocts, level, oct_handler.nocts)
 
     def _read_amr_root(self, oct_handler):
         self.level_offsets
         f = open(self.pf._file_amr, "rb")
         # add the root *cell* not *oct* mesh
-        level = self.domain_level
         root_octs_side = self.pf.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
-        octs_side = NX*2**level
+        octs_side = NX*2 # Level == 0
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
@@ -588,8 +583,7 @@
                            LL[1]:RL[1]:NX[1]*1j,
                            LL[2]:RL[2]:NX[2]*1j]
         root_fc = np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(self.domain_id, level,
-                                      root_fc)
+        nocts_check = oct_handler.add(self.domain_id, 0, root_fc)
         assert(oct_handler.nocts == root_fc.shape[0])
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     root_octs_side**3, 0, oct_handler.nocts)

diff -r 9e27d3a8e379927a92e4787119bad3c29c37b061 -r 9d53024f1c049fc9e151e068fecc039cf42290c4 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -66,7 +66,7 @@
                                 subset.domain_level, f, d.size, d.min(), d.max(),
                                 cp, cp+d.size)
                     tr[(ft, f)].append(d)
-                cp += subset.cell_count
+                cp += d.size
         d = {}
         for k in tr.keys():
             d[k] = np.concatenate(tr.pop(k))

diff -r 9e27d3a8e379927a92e4787119bad3c29c37b061 -r 9d53024f1c049fc9e151e068fecc039cf42290c4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -783,9 +783,6 @@
         #As a result, source is 3D grid with 8 times as many
         #elements as the number of octs on this level in this domain
         #and with the shape of an equal-sided cube
-        #
-        # TODO: Convert to a recrusive function.
-        # Note that the .pos[0] etc calls need to be uncommented.
         cdef np.ndarray[np.float64_t, ndim=3] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef OctVisitorData data


https://bitbucket.org/yt_analysis/yt/commits/642acd1552bd/
Changeset:   642acd1552bd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 05:09:37
Summary:     Attempt at a file-index function.
Affected #:  2 files

diff -r 9d53024f1c049fc9e151e068fecc039cf42290c4 -r 642acd1552bd49a8498995949eefb0b5ccf6262c yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -60,7 +60,7 @@
 cdef oct_visitor_function copy_array_i64
 cdef oct_visitor_function identify_octs
 cdef oct_visitor_function assign_domain_ind
-cdef oct_visitor_function fill_from_file
+cdef oct_visitor_function fill_file_indices
 
 cdef inline int cind(int i, int j, int k):
     return (((i*2)+j)*2+k)

diff -r 9d53024f1c049fc9e151e068fecc039cf42290c4 -r 642acd1552bd49a8498995949eefb0b5ccf6262c yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -152,10 +152,13 @@
     o.domain_ind = data.global_index
     data.index += 1
 
-cdef void fill_from_file(Oct *o, OctVisitorData *data, np.uint8_t selected):
+cdef void fill_file_indices(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We fill these arrays, then inside the level filler we use these as
+    # indices as we fill a second array from the data.
     if selected == 0: return
-    # There are this many records between "octs"
-    cdef np.float64_t **p = <np.float64_t**> data.array
-    p[1][data.index] = p[0][o.file_ind + oind(data)]
-    data.index += 1
-
+    cdef void **p = data.array
+    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
+    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
+    level_arr[data.index] = data.level
+    level_arr[data.index] = o.file_ind * 8 + oind(data)
+    data.index +=1


https://bitbucket.org/yt_analysis/yt/commits/a63a8aa7dcea/
Changeset:   a63a8aa7dcea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-27 14:44:37
Summary:     Fix to RAMSES oct container.
Affected #:  1 file

diff -r 9d53024f1c049fc9e151e068fecc039cf42290c4 -r a63a8aa7dcead07db35d0cee61e7c6611ba2fd64 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -681,7 +681,7 @@
         return next
 
     def allocate_domains(self, domain_counts, int root_nodes):
-        OctreeContainer.allocate_domains(domain_counts)
+        OctreeContainer.allocate_domains(self, domain_counts)
         self.root_nodes = <OctKey*> malloc(sizeof(OctKey) * root_nodes)
         self.max_root = root_nodes
         for i in range(root_nodes):


https://bitbucket.org/yt_analysis/yt/commits/e8efda0bf32c/
Changeset:   e8efda0bf32c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 05:11:16
Summary:     Merging
Affected #:  2 files

diff -r a63a8aa7dcead07db35d0cee61e7c6611ba2fd64 -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -60,7 +60,7 @@
 cdef oct_visitor_function copy_array_i64
 cdef oct_visitor_function identify_octs
 cdef oct_visitor_function assign_domain_ind
-cdef oct_visitor_function fill_from_file
+cdef oct_visitor_function fill_file_indices
 
 cdef inline int cind(int i, int j, int k):
     return (((i*2)+j)*2+k)

diff -r a63a8aa7dcead07db35d0cee61e7c6611ba2fd64 -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -152,10 +152,13 @@
     o.domain_ind = data.global_index
     data.index += 1
 
-cdef void fill_from_file(Oct *o, OctVisitorData *data, np.uint8_t selected):
+cdef void fill_file_indices(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We fill these arrays, then inside the level filler we use these as
+    # indices as we fill a second array from the data.
     if selected == 0: return
-    # There are this many records between "octs"
-    cdef np.float64_t **p = <np.float64_t**> data.array
-    p[1][data.index] = p[0][o.file_ind + oind(data)]
-    data.index += 1
-
+    cdef void **p = data.array
+    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
+    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
+    level_arr[data.index] = data.level
+    level_arr[data.index] = o.file_ind * 8 + oind(data)
+    data.index +=1


https://bitbucket.org/yt_analysis/yt/commits/fe4a5753149b/
Changeset:   fe4a5753149b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 06:09:51
Summary:     First pass at re-doing RAMSES fluid IO.
Affected #:  4 files

diff -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -257,17 +257,18 @@
 
     _domain_offset = 1
 
-    def fill(self, content, fields):
+    def fill(self, content, fields, selector):
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.
         oct_handler = self.oct_handler
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in fields]
         tr = {}
-        filled = pos = level_offset = 0
-        min_level = self.domain.pf.min_level
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
         for field in fields:
-            tr[field] = np.zeros(self.cell_count, 'float64')
+            tr[field] = np.zeros(cell_count, 'float64')
         for level, offset in enumerate(self.domain.hydro_offset):
             if offset == -1: continue
             content.seek(offset)
@@ -278,17 +279,10 @@
             for i in range(8):
                 for field in all_fields:
                     if field not in fields:
-                        #print "Skipping %s in %s : %s" % (field, level,
-                        #        self.domain.domain_id)
                         fpu.skip(content)
                     else:
-                        #print "Reading %s in %s : %s" % (field, level,
-                        #        self.domain.domain_id)
                         temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            level_offset += oct_handler.fill_level(self.domain.domain_id, level,
-                                   tr, temp, self.mask, level_offset)
-            #print "FILL (%s : %s) %s" % (self.domain.domain_id, level, level_offset)
-        #print "DONE (%s) %s of %s" % (self.domain.domain_id, level_offset, self.cell_count)
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
         return tr
 
 class RAMSESGeometryHandler(OctreeGeometryHandler):

diff -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -39,7 +39,7 @@
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
-        tr = dict((f, np.empty(size, dtype='float64')) for f in fields)
+        tr = defaultdict(list)
         cp = 0
         for chunk in chunks:
             for subset in chunk.objs:
@@ -48,14 +48,16 @@
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
                 content = cStringIO.StringIO(f.read())
-                rv = subset.fill(content, fields)
+                rv = subset.fill(content, fields, selector)
                 for ft, f in fields:
-                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
-                        f, subset.cell_count, rv[f].min(), rv[f].max(),
-                        cp, cp+subset.cell_count)
-                    tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
-                cp += subset.cell_count
-        return tr
+                    d = rv.pop(f)
+                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
+                        f, d.size, d.min(), d.max(), d.size)
+                    tr[(ft, f)].append(d)
+        d = {}
+        for field in fields:
+            d[field] = np.concatenate(tr.pop(field))
+        return d
 
     def _read_particle_selection(self, chunks, selector, fields):
         size = 0

diff -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -694,36 +694,53 @@
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
 
+    def file_index_octs(self, SelectorObject selector, int domain_id,
+                        num_cells = -1):
+        # We create oct arrays of the correct size
+        cdef np.int64_t i
+        cdef np.ndarray[np.uint8_t, ndim=1] levels
+        cdef np.ndarray[np.uint8_t, ndim=1] cell_inds
+        cdef np.ndarray[np.int64_t, ndim=1] file_inds
+        if num_cells < 0:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        levels = np.zeros(num_cells, dtype="uint8")
+        file_inds = np.zeros(num_cells, dtype="int64")
+        cell_inds = np.zeros(num_cells, dtype="uint8")
+        for i in range(num_cells):
+            levels[i] = 100
+            file_inds[i] = -1
+            cell_inds[i] = 9
+        cdef OctVisitorData data
+        data.index = 0
+        cdef void *p[3]
+        p[0] = levels.data
+        p[1] = file_inds.data
+        p[2] = cell_inds.data
+        data.array = p
+        data.domain = domain_id
+        self.visit_all_octs(selector, oct_visitors.fill_file_indices, &data)
+        return levels, cell_inds, file_inds
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fill_level(self, int domain, int level, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
+    def fill_level(self, int level,
+                   np.ndarray[np.uint8_t, ndim=1] levels,
+                   np.ndarray[np.uint8_t, ndim=1] cell_inds,
+                   np.ndarray[np.int64_t, ndim=1] file_inds,
+                   dest_fields, source_fields):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
         cdef int n
-        cdef int i, j, k, ii
+        cdef int i, di
         cdef int local_pos, local_filled
         cdef np.float64_t val
         for key in dest_fields:
-            local_filled = 0
             dest = dest_fields[key]
             source = source_fields[key]
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.domain_ind, ii] == 0: continue
-                            # TODO: Uncomment this!
-                            #if o.level == level:
-                            #    dest[local_filled] = \
-                            #        source[o.file_ind, ii]
-                            local_filled += 1
-        return local_filled
+            for i in range(levels.shape[0]):
+                if levels[i] != level: continue
+                dest[i] = source[file_inds[i], cell_inds[i]]
 
 cdef class ARTOctreeContainer(OctreeContainer):
 
@@ -798,7 +815,7 @@
             p[0] = source.data
             p[1] = dest.data
             data.array = &p
-            self.visit_all_octs(selector, oct_visitors.fill_from_file, &data)
+            #self.visit_all_octs(selector, oct_visitors.fill_from_file, &data)
         return dest_fields
 
     def allocate_domains(self, domain_counts):

diff -r e8efda0bf32c8250a1f0ebc06704c3ea3c449b1d -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -156,9 +156,11 @@
     # We fill these arrays, then inside the level filler we use these as
     # indices as we fill a second array from the data.
     if selected == 0: return
-    cdef void **p = data.array
+    cdef void **p = <void **> data.array
     cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
     cdef np.int64_t *find_arr = <np.int64_t *> p[1]
+    cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
     level_arr[data.index] = data.level
-    level_arr[data.index] = o.file_ind * 8 + oind(data)
+    find_arr[data.index] = o.file_ind
+    cell_arr[data.index] = rind(data)
     data.index +=1


https://bitbucket.org/yt_analysis/yt/commits/cd025de87f35/
Changeset:   cd025de87f35
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 06:32:40
Summary:     This allows overlapping cell descent to be specified.
Affected #:  2 files

diff -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc -r cd025de87f35468f467633e2afe81f7f666896dd yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -30,6 +30,7 @@
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level
+    cdef int overlap_cells
 
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],

diff -r fe4a5753149b223dbf07d03a0d0d6efa6acab2dc -r cd025de87f35468f467633e2afe81f7f666896dd yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -120,6 +120,7 @@
     def __cinit__(self, dobj):
         self.min_level = getattr(dobj, "min_level", 0)
         self.max_level = getattr(dobj, "max_level", 99)
+        self.overlap_cells = 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -232,6 +233,8 @@
                             data.level -= 1
                         elif this_level == 1:
                             selected = self.select_cell(spos, sdds, eterm)
+                            if ch != NULL:
+                                selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
                             data.ind[0] = i
@@ -1122,6 +1125,7 @@
     def __init__(self, dobj):
         self.base_selector = dobj.base_selector
         self.domain_id = dobj.domain_id
+        self.overlap_cells = 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -1209,7 +1213,7 @@
 cdef class AlwaysSelector(SelectorObject):
 
     def __init__(self, dobj):
-        pass
+        self.overlap_cells = 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/dc1968091184/
Changeset:   dc1968091184
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 18:23:05
Summary:     Adding some dependencies, because we changed selection_routines.pxd.
Affected #:  1 file

diff -r cd025de87f35468f467633e2afe81f7f666896dd -r dc1968091184d01910f90916a1c6530617c30480 yt/frontends/artio/setup.py
--- a/yt/frontends/artio/setup.py
+++ b/yt/frontends/artio/setup.py
@@ -16,7 +16,10 @@
                          include_dirs=["yt/frontends/artio/artio_headers/",
                                        "yt/geometry/",
                                        "yt/utilities/lib/"],
-                         depends=artio_sources)
+                         depends=artio_sources + 
+                                 ["yt/utilities/lib/fp_utils.pxd",
+                                  "yt/geometry/oct_container.pxd",
+                                  "yt/geometry/selection_routines.pxd"])
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/e2dbd707505b/
Changeset:   e2dbd707505b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 19:01:23
Summary:     Further development of NMSU-ART file reading and IO.
Affected #:  3 files

diff -r dc1968091184d01910f90916a1c6530617c30480 -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -110,17 +110,16 @@
             self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
-        self.domains = [ARTDomainFile(self.parameter_file, 0, nv, l,
-                                      self.oct_handler)]
+        # The 1 here refers to domain_id == 1 always for ARTIO.
+        self.domains = [ARTDomainFile(self.parameter_file, nv, 
+                                      self.oct_handler, 1)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
-        for domain in self.domains:
-            if domain.domain_level == 0:
-                domain._read_amr_root(self.oct_handler)
-            else:
-                domain._read_amr_level(self.oct_handler)
+        domain = self.domains[0]
+        domain._read_amr_root(self.oct_handler)
+        domain._read_amr_level(self.oct_handler)
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
@@ -436,11 +435,8 @@
         return False
 
 class ARTDomainSubset(OctreeSubset):
-    def __init__(self, base_region, domain, pf):
-        super(ARTDomainSubset, self).__init__(base_region, domain, pf)
-        self.domain_level = domain.domain_level
 
-    def fill_root(self, content, ftfields):
+    def fill(self, content, ftfields, selector):
         """
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
@@ -452,42 +448,34 @@
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in ftfields]
         field_idxs = [all_fields.index(f) for f in fields]
-        source = {}
+        tr = {}
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        for field in fields:
+            tr[field] = np.zeros(cell_count, 'float64')
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
-
         for field, i in zip(fields, field_idxs):
             temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
-                              order='F').astype('float64').T
-            source[field] = temp
-        dest = oct_handler.fill_level_from_grid(
-            self.selector, self.domain_id, source)
-        return dest
-
-    def fill_level(self, content, ftfields):
-        oct_handler = self.oct_handler
-        fields = [f for ft, f in ftfields]
-        level_offset = 0
-        dest = {}
-        for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')-1.
-        level = self.domain_level
-        no = self.domain.level_count[level]
-        noct_range = [0, no]
-        source = _read_child_level(
-            content, self.domain.level_child_offsets,
-            self.domain.level_offsets,
-            self.domain.level_count, level, fields,
-            self.domain.pf.domain_dimensions,
-            self.domain.pf.parameters['ncell0'],
-            noct_range=noct_range)
-        nocts_filling = noct_range[1]-noct_range[0]
-        level_offset += oct_handler.fill_level(self.domain.domain_id,
-                                               level, dest, source,
-                                               self.mask, level_offset,
-                                               noct_range[0],
-                                               nocts_filling)
-        return dest
+                              order='F')
+            # Need it to be ordered correctly; this is expensive, though ...
+            oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, temp)
+        # Now we continue with the additional levels.
+        for level in range(1, self.pf.max_level + 1):
+            no = self.domain.level_count[level]
+            noct_range = [0, no]
+            source = _read_child_level(
+                content, self.domain.level_child_offsets,
+                self.domain.level_offsets,
+                self.domain.level_count, level, fields,
+                self.domain.pf.domain_dimensions,
+                self.domain.pf.parameters['ncell0'],
+                noct_range=noct_range)
+            for field, i in zip(fields, field_idxs):
+                # Need it to be ordered correctly; this is expensive, though ...
+                oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+        return tr
 
 class ARTDomainFile(object):
     """
@@ -499,7 +487,7 @@
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self, pf, nvar, level, oct_handler):
+    def __init__(self, pf, nvar, oct_handler, domain_id):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
@@ -554,7 +542,7 @@
         """
         self.level_offsets
         f = open(self.pf._file_amr, "rb")
-        for level in range(self.pf.max_level + 1):
+        for level in range(1, self.pf.max_level + 1):
             unitary_center, fl, iocts, nocts, root_level = \
                 _read_art_level_info( f,
                     self._level_oct_offsets, level,

diff -r dc1968091184d01910f90916a1c6530617c30480 -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -56,10 +56,7 @@
                 f = open(subset.domain.pf._file_amr, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                if subset.domain_level == 0:
-                    rv = subset.fill_root(f, fields)
-                else:
-                    rv = subset.fill_level(f, fields)
+                rv = subset.fill(f, fields, selector)
                 for ft, f in fields:
                     d = rv.pop(f)
                     mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
@@ -68,8 +65,8 @@
                     tr[(ft, f)].append(d)
                 cp += d.size
         d = {}
-        for k in tr.keys():
-            d[k] = np.concatenate(tr.pop(k))
+        for field in fields:
+            d[field] = np.concatenate(tr.pop(field))
         return d
 
     def _read_particle_selection(self, chunks, selector, fields):

diff -r dc1968091184d01910f90916a1c6530617c30480 -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -571,6 +571,55 @@
         self.nocts += 1
         return next
 
+    def file_index_octs(self, SelectorObject selector, int domain_id,
+                        num_cells = -1):
+        # We create oct arrays of the correct size
+        cdef np.int64_t i
+        cdef np.ndarray[np.uint8_t, ndim=1] levels
+        cdef np.ndarray[np.uint8_t, ndim=1] cell_inds
+        cdef np.ndarray[np.int64_t, ndim=1] file_inds
+        if num_cells < 0:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        levels = np.zeros(num_cells, dtype="uint8")
+        file_inds = np.zeros(num_cells, dtype="int64")
+        cell_inds = np.zeros(num_cells, dtype="uint8")
+        for i in range(num_cells):
+            levels[i] = 100
+            file_inds[i] = -1
+            cell_inds[i] = 9
+        cdef OctVisitorData data
+        data.index = 0
+        cdef void *p[3]
+        p[0] = levels.data
+        p[1] = file_inds.data
+        p[2] = cell_inds.data
+        data.array = p
+        data.domain = domain_id
+        self.visit_all_octs(selector, oct_visitors.fill_file_indices, &data)
+        return levels, cell_inds, file_inds
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_level(self, int level,
+                   np.ndarray[np.uint8_t, ndim=1] levels,
+                   np.ndarray[np.uint8_t, ndim=1] cell_inds,
+                   np.ndarray[np.int64_t, ndim=1] file_inds,
+                   dest_fields, source_fields):
+        cdef np.ndarray[np.float64_t, ndim=2] source
+        cdef np.ndarray[np.float64_t, ndim=1] dest
+        cdef int n
+        cdef int i, di
+        cdef int local_pos, local_filled
+        cdef np.float64_t val
+        for key in dest_fields:
+            dest = dest_fields[key]
+            source = source_fields[key]
+            for i in range(levels.shape[0]):
+                if levels[i] != level: continue
+                dest[i] = source[file_inds[i], cell_inds[i]]
+
+
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
     ao = <OctKey *>a
@@ -694,130 +743,12 @@
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
 
-    def file_index_octs(self, SelectorObject selector, int domain_id,
-                        num_cells = -1):
-        # We create oct arrays of the correct size
-        cdef np.int64_t i
-        cdef np.ndarray[np.uint8_t, ndim=1] levels
-        cdef np.ndarray[np.uint8_t, ndim=1] cell_inds
-        cdef np.ndarray[np.int64_t, ndim=1] file_inds
-        if num_cells < 0:
-            num_cells = selector.count_oct_cells(self, domain_id)
-        levels = np.zeros(num_cells, dtype="uint8")
-        file_inds = np.zeros(num_cells, dtype="int64")
-        cell_inds = np.zeros(num_cells, dtype="uint8")
-        for i in range(num_cells):
-            levels[i] = 100
-            file_inds[i] = -1
-            cell_inds[i] = 9
-        cdef OctVisitorData data
-        data.index = 0
-        cdef void *p[3]
-        p[0] = levels.data
-        p[1] = file_inds.data
-        p[2] = cell_inds.data
-        data.array = p
-        data.domain = domain_id
-        self.visit_all_octs(selector, oct_visitors.fill_file_indices, &data)
-        return levels, cell_inds, file_inds
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_level(self, int level,
-                   np.ndarray[np.uint8_t, ndim=1] levels,
-                   np.ndarray[np.uint8_t, ndim=1] cell_inds,
-                   np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
-        cdef np.ndarray[np.float64_t, ndim=2] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef int n
-        cdef int i, di
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        for key in dest_fields:
-            dest = dest_fields[key]
-            source = source_fields[key]
-            for i in range(levels.shape[0]):
-                if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
-
 cdef class ARTOctreeContainer(OctreeContainer):
 
     def __init__(self, *args, **kwargs):
         self.partial_coverage = 1
         OctreeContainer.__init__(self, *args, **kwargs)
 
-    @cython.boundscheck(True)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_level(self, int domain, int level, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset,
-                   np.int64_t subchunk_offset, np.int64_t subchunk_max):
-        #Only minorly different from the RAMSES version
-        #The source array is in chunks, just stop when we hit the end
-        cdef np.ndarray[np.float64_t, ndim=2] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        cdef np.int64_t index
-        for key in dest_fields:
-            local_filled = 0
-            dest = dest_fields[key]
-            source = source_fields[key]
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                index = o.file_ind-subchunk_offset
-                # TODO: Uncomment this!
-                #if o.level != level: continue
-                if index < 0: continue
-                if index >= subchunk_max: 
-                    #if we hit the end of the array,
-                    #immeditely discontinue
-                    return local_filled
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.domain_ind, ii] == 0: continue
-                            dest[local_filled + offset] = \
-                                source[index,ii]
-                            local_filled += 1
-        return local_filled
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_level_from_grid(self, SelectorObject selector,
-                             int domain_id, source_fields):
-        #Fill  level, but instead of assuming that the source
-        #order is that of the oct order, we look up the oct position
-        #and fill its children from the the source field
-        #As a result, source is 3D grid with 8 times as many
-        #elements as the number of octs on this level in this domain
-        #and with the shape of an equal-sided cube
-        cdef np.ndarray[np.float64_t, ndim=3] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctVisitorData data
-        cdef void *p[2]
-        num_cells = selector.count_oct_cells(self, domain_id)
-        dest_fields = {}
-        for key in source_fields:
-            dest_fields[key] = dest = \
-                np.zeros(num_cells, dtype="float64")
-            data.index = 0
-            local_filled = 0
-            source = source_fields[key]
-            p[0] = source.data
-            p[1] = dest.data
-            data.array = &p
-            #self.visit_all_octs(selector, oct_visitors.fill_from_file, &data)
-        return dest_fields
-
     def allocate_domains(self, domain_counts):
         cdef int count, i
         cdef OctAllocationContainer *cur = self.cont


https://bitbucket.org/yt_analysis/yt/commits/90fe041ad494/
Changeset:   90fe041ad494
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 23:28:15
Summary:     Continuing to convert NMSU-ART
Affected #:  3 files

diff -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -120,6 +120,7 @@
         domain = self.domains[0]
         domain._read_amr_root(self.oct_handler)
         domain._read_amr_level(self.oct_handler)
+        self.oct_handler.finalize()
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
@@ -448,7 +449,7 @@
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in ftfields]
         field_idxs = [all_fields.index(f) for f in fields]
-        tr = {}
+        source, tr = {}, {}
         cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
         levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
             selector, self.domain_id, cell_count)
@@ -456,11 +457,14 @@
             tr[field] = np.zeros(cell_count, 'float64')
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
+        ns = (8, self.domain.pf.domain_dimensions.prod() / 8)
         for field, i in zip(fields, field_idxs):
-            temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
-                              order='F')
+            source[field] = data[i, :]
+            source[field].shape = ns
+            source[field] = np.array(source[field], dtype="float64", order='F')
             # Need it to be ordered correctly; this is expensive, though ...
-            oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, temp)
+        oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
+        del source
         # Now we continue with the additional levels.
         for level in range(1, self.pf.max_level + 1):
             no = self.domain.level_count[level]
@@ -472,9 +476,8 @@
                 self.domain.pf.domain_dimensions,
                 self.domain.pf.parameters['ncell0'],
                 noct_range=noct_range)
-            for field, i in zip(fields, field_idxs):
-                # Need it to be ordered correctly; this is expensive, though ...
-                oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr,
+                source)
         return tr
 
 class ARTDomainFile(object):

diff -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -59,8 +59,8 @@
                 rv = subset.fill(f, fields, selector)
                 for ft, f in fields:
                     d = rv.pop(f)
-                    mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
-                                subset.domain_level, f, d.size, d.min(), d.max(),
+                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
+                                f, d.size, d.min(), d.max(),
                                 cp, cp+d.size)
                     tr[(ft, f)].append(d)
                 cp += d.size

diff -r e2dbd707505b656f4c9e81ca942d81bb80e99f43 -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -161,7 +161,8 @@
             for j in range(self.nn[1]):
                 pos[2] = self.DLE[2] + dds[2]/2.0
                 for k in range(self.nn[2]):
-                    if self.root_mesh[i][j][k] == NULL: continue
+                    if self.root_mesh[i][j][k] == NULL:
+                        raise RuntimeError
                     data.pos[0] = i
                     data.pos[1] = j
                     data.pos[2] = k
@@ -619,6 +620,13 @@
                 if levels[i] != level: continue
                 dest[i] = source[file_inds[i], cell_inds[i]]
 
+    def finalize(self):
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        data.index = 0
+        data.domain = 1
+        self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
+        assert ((data.global_index+1)*8 == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
@@ -649,14 +657,6 @@
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
 
-    def finalize(self):
-        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
-        cdef OctVisitorData data
-        data.index = 0
-
-        self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*8 == data.index)
-
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i


https://bitbucket.org/yt_analysis/yt/commits/ade63d627a80/
Changeset:   ade63d627a80
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 23:50:31
Summary:     Merge
Affected #:  14 files

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -281,7 +281,7 @@
             chunk_fields.append(self.weight_field)
         tree = self._get_tree(len(fields))
         # We do this once
-        for chunk in self.data_source.chunks(None, "io"):
+        for chunk in self.data_source.chunks([], "io"):
             self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
@@ -429,6 +429,7 @@
         self._data_source.max_level = self.level
 
     def get_data(self, fields = None):
+        if fields is None: return
         fields = self._determine_fields(ensure_list(fields))
         fields_to_get = [f for f in fields if f not in self.field_data]
         fields_to_get = self._identify_dependencies(fields_to_get)

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -46,6 +46,7 @@
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
         return d
+
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -32,7 +32,7 @@
 
 from yt.funcs import *
 
-from yt.utilities.lib import CICDeposit_3, obtain_rvec, obtain_rv_vec
+from yt.utilities.lib import obtain_rvec, obtain_rv_vec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -395,7 +395,7 @@
           convert_function=_convertCellMassCode)
 
 def _TotalMass(field,data):
-    return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+    return (data["Density"]+data[("deposit", "particle_density")]) * data["CellVolume"]
 add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
@@ -940,19 +940,17 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
+# We add these fields so that the field detector can use them
+for field in ["particle_position_%s" % ax for ax in "xyz"]:
+    # This marker should let everyone know not to use the fields, but NullFunc
+    # should do that, too.
+    add_field(field, function=NullFunc, particle_type = True,
+        units=r"UNDEFINED")
+
 def _pdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                 data["particle_position_y"].astype(np.float64),
-                 data["particle_position_z"].astype(np.float64),
-                 data["ParticleMass"],
-                 data["particle_position_x"].size,
-                 blank, np.array(data.LeftEdge).astype(np.float64),
-                 np.array(data.ActiveDimensions).astype(np.int32),
-                 just_one(data['dx']))
-    np.divide(blank, data["CellVolume"], blank)
-    return blank
+    pmass = data[('deposit','all_mass')]
+    np.divide(pmass, data["CellVolume"], pmass)
+    return pmass
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()],
           display_name=r"\mathrm{Particle}\/\mathrm{Density}")

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -124,9 +124,9 @@
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields +
-                              particle_star_fields)
-        self.field_list = list(self.field_list)
+        self.field_list = [("gas", f) for f in fluid_fields]
+        self.field_list += set(particle_fields + particle_star_fields \
+                               + fluid_fields)
         # now generate all of the possible particle fields
         if "wspecies" in self.parameter_file.parameters.keys():
             wspecies = self.parameter_file.parameters['wspecies']
@@ -136,6 +136,10 @@
                 self.parameter_file.particle_types.append("specie%i" % specie)
         else:
             self.parameter_file.particle_types = []
+        for ptype in self.parameter_file.particle_types:
+            for pfield in self.particle_field_list:
+                pfn = (ptype, pfield)
+                self.field_list.append(pfn)
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -25,6 +25,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import numpy as np
+
+from yt.funcs import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -40,6 +42,10 @@
 from yt.utilities.physical_constants import mass_sun_cgs
 from yt.frontends.art.definitions import *
 
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
+
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
@@ -218,6 +224,7 @@
               particle_type=True,
               convert_function=lambda x: x.convert("particle_mass"))
 
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr
@@ -260,3 +267,55 @@
     return data["particle_mass"]/mass_sun_cgs
 add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
           take_log=True, units=r"\rm{Msun}")
+
+# Particle Deposition Fields
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
+
+for _ptype in _ptypes:
+    particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
+                                     ["particle_velocity_%s" % ax for ax in 'xyz'],
+                              ARTFieldInfo)
+    particle_deposition_functions(_ptype, "Coordinates", "particle_mass",
+                                   ARTFieldInfo)
+
+# Mixed Fluid-Particle Fields
+
+def baryon_density(field, data):
+    rho = data["deposit", "stars_density"]
+    rho += data["gas", "Density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "baryon_density"),
+         function = baryon_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Baryon Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def total_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "specie0_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "total_density"),
+         function = total_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Total Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+
+def multimass_density(field, data):
+    rho = data["deposit", "baryon_density"]
+    rho += data["deposit", "darkmatter_density"]
+    return rho
+
+ARTFieldInfo.add_field(("deposit", "multimass_density"),
+         function = multimass_density,
+         validators = [ValidateSpatial()],
+         display_name = "\\mathrm{Multimass Density}",
+         units = r"\mathrm{g}/\mathrm{cm}^{3}",
+         projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+         projection_conversion = 'cm')
+

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -43,6 +43,14 @@
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
     tb, ages = None, None
+    cache = None
+    masks = None
+    caching = True
+
+    def __init__(self):
+        self.cache = {}
+        self.masks = {}
+        super(IOHandlerART, self).__init__()
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
@@ -69,85 +77,103 @@
             d[field] = np.concatenate(tr.pop(field))
         return d
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _get_mask(self, selector, ftype):
+        key = (selector, ftype)
+        if key in self.masks.keys() and self.caching:
+            return self.masks[key]
+        pf = self.pf
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
+        pstr = 'particle_position_%s'
+        x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
+        mask = selector.select_points(x, y, z)
+        if self.caching:
+            self.masks[key] = mask
+            return self.masks[key]
+        else:
+            return mask
+
+    def _get_field(self,  field):
+        if field in self.cache.keys() and self.caching:
+            mylog.debug("Cached %s", str(field))
+            return self.cache[field]
+        mylog.debug("Reading %s", str(field))
         tr = {}
-        fields_read = []
-        for chunk in chunks:
-            level = chunk.objs[0].domain.domain_level
-            pf = chunk.objs[0].domain.pf
-            masks = {}
-            ws, ls = pf.parameters["wspecies"], pf.parameters["lspecies"]
-            sizes = np.diff(np.concatenate(([0], ls)))
-            ptmax = ws[-1]
-            npt = ls[-1]
-            nstars = ls[-1]-ls[-2]
-            file_particle = pf._file_particle_data
-            file_stars = pf._file_particle_stars
-            ftype_old = None
-            for field in fields:
-                if field in fields_read:
-                    continue
-                ftype, fname = field
-                pbool, idxa, idxb = _determine_field_size(pf, ftype, ls, ptmax)
-                npa = idxb-idxa
-                if not ftype_old == ftype:
-                    Nrow = pf.parameters["Nrow"]
-                    rp = lambda ax: read_particles(
-                        file_particle, Nrow, idxa=idxa,
-                        idxb=idxb, field=ax)
-                    x, y, z = (rp(ax) for ax in 'xyz')
-                    dd = pf.domain_dimensions[0]
-                    off = 1.0/dd
-                    x, y, z = (t.astype('f8')/dd - off for t in (x, y, z))
-                    mask = selector.select_points(x, y, z)
-                    size = mask.sum()
-                for i, ax in enumerate('xyz'):
-                    if fname.startswith("particle_position_%s" % ax):
-                        tr[field] = vars()[ax]
-                    if fname.startswith("particle_velocity_%s" % ax):
-                        tr[field] = rp('v'+ax)
-                if fname == "particle_mass":
-                    a = 0
-                    data = np.zeros(npa, dtype='f8')
-                    for ptb, size, m in zip(pbool, sizes, ws):
-                        if ptb:
-                            data[a:a+size] = m
-                            a += size
-                    tr[field] = data
-                elif fname == "particle_index":
-                    tr[field] = np.arange(idxa, idxb).astype('int64')
-                elif fname == "particle_type":
-                    a = 0
-                    data = np.zeros(npa, dtype='int')
-                    for i, (ptb, size) in enumerate(zip(pbool, sizes)):
-                        if ptb:
-                            data[a:a+size] = i
-                            a += size
-                    tr[field] = data
-                if pbool[-1] and fname in particle_star_fields:
-                    data = read_star_field(file_stars, field=fname)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    if nstars > 0:
-                        temp[-nstars:] = data
-                    tr[field] = temp
-                if fname == "particle_creation_time":
-                    self.tb, self.ages, data = interpolate_ages(
-                        tr[field][-nstars:],
-                        file_stars,
-                        self.tb,
-                        self.ages,
-                        pf.current_time)
-                    temp = tr.get(field, np.zeros(npa, 'f8'))
-                    temp[-nstars:] = data
-                    tr[field] = temp
-                    del data
-                tr[field] = tr[field][mask].astype('f8')
-                ftype_old = ftype
-                fields_read.append(field)
+        ftype, fname = field
+        ptmax = self.ws[-1]
+        pbool, idxa, idxb = _determine_field_size(self.pf, ftype, 
+                                                  self.ls, ptmax)
+        npa = idxb - idxa
+        sizes = np.diff(np.concatenate(([0], self.ls)))
+        rp = lambda ax: read_particles(
+            self.file_particle, self.Nrow, idxa=idxa,
+            idxb=idxb, fields=ax)
+        for i, ax in enumerate('xyz'):
+            if fname.startswith("particle_position_%s" % ax):
+                dd = self.pf.domain_dimensions[0]
+                off = 1.0/dd
+                tr[field] = rp([ax])[0]/dd - off
+            if fname.startswith("particle_velocity_%s" % ax):
+                tr[field], = rp(['v'+ax])
+        if fname == "particle_mass":
+            a = 0
+            data = np.zeros(npa, dtype='f8')
+            for ptb, size, m in zip(pbool, sizes, self.ws):
+                if ptb:
+                    data[a:a+size] = m
+                    a += size
+            tr[field] = data
+        elif fname == "particle_index":
+            tr[field] = np.arange(idxa, idxb)
+        elif fname == "particle_type":
+            a = 0
+            data = np.zeros(npa, dtype='int')
+            for i, (ptb, size) in enumerate(zip(pbool, sizes)):
+                if ptb:
+                    data[a: a + size] = i
+                    a += size
+            tr[field] = data
+        if pbool[-1] and fname in particle_star_fields:
+            data = read_star_field(self.file_stars, field=fname)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            nstars = self.ls[-1]-self.ls[-2]
+            if nstars > 0:
+                temp[-nstars:] = data
+            tr[field] = temp
+        if fname == "particle_creation_time":
+            self.tb, self.ages, data = interpolate_ages(
+                tr[field][-nstars:],
+                self.file_stars,
+                self.tb,
+                self.ages,
+                self.pf.current_time)
+            temp = tr.get(field, np.zeros(npa, 'f8'))
+            temp[-nstars:] = data
+            tr[field] = temp
+            del data
         if tr == {}:
             tr = dict((f, np.array([])) for f in fields)
-        return tr
+        if self.caching:
+            self.cache[field] = tr[field]
+            return self.cache[field]
+        else:
+            return tr[field]
 
+    def _read_particle_selection(self, chunks, selector, fields):
+        chunk = chunks.next()
+        self.pf = chunk.objs[0].domain.pf
+        self.ws = self.pf.parameters["wspecies"]
+        self.ls = self.pf.parameters["lspecies"]
+        self.file_particle = self.pf._file_particle_data
+        self.file_stars = self.pf._file_particle_stars
+        self.Nrow = self.pf.parameters["Nrow"]
+        data = {f:np.array([]) for f in fields}
+        for f in fields:
+            ftype, fname = f
+            mask = self._get_mask(selector, ftype)
+            arr = self._get_field(f)[mask].astype('f8')
+            data[f] = np.concatenate((arr, data[f]))
+        return data
 
 def _determine_field_size(pf, field, lspecies, ptmax):
     pbool = np.zeros(len(lspecies), dtype="bool")
@@ -362,27 +388,29 @@
     return ranges
 
 
-def read_particles(file, Nrow, idxa, idxb, field):
+def read_particles(file, Nrow, idxa, idxb, fields):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
     np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-    data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
-    ranges = get_ranges(skip, count, field, **kwargs)
-    data = None
-    for seek, this_count in ranges:
-        fh.seek(seek)
-        temp = np.fromfile(fh, count=this_count, dtype='>f4')
-        if data is None:
-            data = temp
-        else:
-            data = np.concatenate((data, temp))
+    arrs = []
+    for field in fields:
+        ranges = get_ranges(skip, count, field, **kwargs)
+        data = None
+        for seek, this_count in ranges:
+            fh.seek(seek)
+            temp = np.fromfile(fh, count=this_count, dtype='>f4')
+            if data is None:
+                data = temp
+            else:
+                data = np.concatenate((data, temp))
+        arrs.append(data.astype('f8'))
     fh.close()
-    return data
+    return arrs
 
 
 def read_star_field(file, field=None):

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -361,41 +361,23 @@
     f.take_log = False
 
 def _spdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
-    if not filter.any(): return blank
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                           data["particle_position_y"][filter].astype(np.float64),
-                           data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
-                           np.int64(np.where(filter)[0].size),
-                           blank, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    return blank
+    pos = data["all", "Coordinates"][filter, :]
+    d = data.deposit(pos, [data['all', 'Mass'][filter]], method='sum')
+    d /= data['CellVolume']
+    return d
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
 def _dmpdensity(field, data):
-    blank = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
-        if not filter.any(): return blank
-        num = filter.sum()
     else:
         filter = Ellipsis
-        num = data["particle_position_x"].size
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                           data["particle_position_y"][filter].astype(np.float64),
-                           data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
-                           num,
-                           blank, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    return blank
+    pos = data["all", "Coordinates"][filter, :]
+    d = data.deposit(pos, [data['all', 'Mass'][filter]], method='sum')
+    d /= data['CellVolume']
+    return d
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
@@ -405,28 +387,17 @@
     using cloud-in-cell deposit.
     """
     particle_field = field.name[4:]
-    top = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return top
-    particle_field_data = data[particle_field] * data['particle_mass']
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                           data["particle_position_y"].astype(np.float64),
-                           data["particle_position_z"].astype(np.float64),
-                           particle_field_data,
-                           data["particle_position_x"].size,
-                           top, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
-    del particle_field_data
-
-    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
-                           data["particle_position_y"].astype(np.float64),
-                           data["particle_position_z"].astype(np.float64),
-                           data["particle_mass"],
-                           data["particle_position_x"].size,
-                           bottom, np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           just_one(data['dx']))
+    pos = data[('all', 'Coordinates')]
+    top = data.deposit(
+        pos,
+        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        method = 'cic'
+        )
+    bottom = data.deposit(
+        pos,
+        [data[('all', 'particle_mass')]],
+        method = 'cic'
+        )
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -444,30 +415,18 @@
     Create a grid field for star quantities, weighted by star mass.
     """
     particle_field = field.name[5:]
-    top = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data["particle_position_x"].size == 0: return top
     filter = data['creation_time'] > 0.0
-    if not filter.any(): return top
-    particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                          data["particle_position_y"][filter].astype(np.float64),
-                          data["particle_position_z"][filter].astype(np.float64),
-                          particle_field_data,
-                          np.int64(np.where(filter)[0].size),
-                          top, np.array(data.LeftEdge).astype(np.float64),
-                          np.array(data.ActiveDimensions).astype(np.int32), 
-                          just_one(data['dx']))
-    del particle_field_data
-
-    bottom = np.zeros(data.ActiveDimensions, dtype='float64')
-    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
-                          data["particle_position_y"][filter].astype(np.float64),
-                          data["particle_position_z"][filter].astype(np.float64),
-                          data["particle_mass"][filter],
-                          np.int64(np.where(filter)[0].size),
-                          bottom, np.array(data.LeftEdge).astype(np.float64),
-                          np.array(data.ActiveDimensions).astype(np.int32), 
-                          just_one(data['dx']))
+    pos = data['all', 'Coordinates'][filter, :]
+    top = data.deposit(
+        pos,
+        [data['all', particle_field][filter]*data['all', 'Mass'][filter]],
+        method='sum'
+        )
+    bottom = data.deposit(
+        pos,
+        [data['all', 'Mass'][filter]],
+        method='sum'
+        )
     top[bottom == 0] = 0.0
     bnz = bottom.nonzero()
     top[bnz] /= bottom[bnz]
@@ -531,8 +490,8 @@
 
 for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
-    add_enzo_field(("all", "particle_%s" % pf), NullFunc, particle_type=True)
-    
+    add_enzo_field(('all',"particle_%s" % pf), NullFunc, particle_type=True)
+
 def _convRetainInt(data):
     return 1
 add_enzo_field(("all", "particle_index"), function=NullFunc,
@@ -549,10 +508,10 @@
               particle_type=True)
 
 for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    add_enzo_field(("all", pf), function=NullFunc,
+    add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field("particle_mass", function=NullFunc, particle_type=True)
+add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -564,7 +523,7 @@
           particle_type=True, convert_function=_convertParticleAge)
 
 def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64') * \
+    particles = data['all', "particle_mass"].astype('float64') * \
                 just_one(data["CellVolumeCode"].ravel())
     # Note that we mandate grid-type here, so this is okay
     return particles

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -285,6 +285,8 @@
         self.periodicity = self.stream_handler.periodicity
         self.domain_dimensions = self.stream_handler.domain_dimensions
         self.current_time = self.stream_handler.simulation_time
+        self.parameters['Gamma'] = 5/3
+        self.parameters['EOSType'] = -1
         if self.stream_handler.cosmology_simulation:
             self.cosmological_simulation = 1
             self.current_redshift = self.stream_handler.current_redshift

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -54,7 +54,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype != "gas" for ftype, fname in fields)):
+        if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:
@@ -65,6 +65,8 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
+            if ftype == 'deposit':
+                fname = field
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -42,6 +42,7 @@
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_splitter
+from yt.utilities.exceptions import YTFieldNotFound
 
 class GeometryHandler(ParallelAnalysisInterface):
     _global_mesh = True
@@ -189,6 +190,9 @@
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
             except Exception as e:
+                if type(e) != YTFieldNotFound:
+                    mylog.debug("Exception %s raised during field detection" %
+                                str(type(e)))
                 continue
             missing = False
             # This next bit checks that we can't somehow generate everything.

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -359,3 +359,39 @@
         return self.ofield
 
 deposit_cic = CICDeposit
+
+cdef class WeightedMeanParticleField(ParticleDepositOperation):
+    # Deposit both mass * field and mass into two scalars
+    # then in finalize divide mass * field / mass
+    cdef np.float64_t *wf
+    cdef public object owf
+    cdef np.float64_t *w
+    cdef public object ow
+    def initialize(self):
+        self.owf = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray wfarr = self.owf
+        self.wf = <np.float64_t*> wfarr.data
+        
+        self.ow = np.zeros(self.nvals, dtype='float64')
+        cdef np.ndarray warr = self.ow
+        self.w = <np.float64_t*> warr.data
+    
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
+                      ):
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
+        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        
+    def finalize(self):
+        return self.owf / self.ow
+
+deposit_weighted_mean= WeightedMeanParticleField
+

diff -r 90fe041ad4942ce1e6b82055875d1e5d1f7fe915 -r ade63d627a80d85174235e0bc8af16b71cd74f74 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -148,7 +148,7 @@
         fields = getattr(self.data_source, "fields", [])
         fields += getattr(self.data_source, "field_data", {}).keys()
         for f in fields:
-            if f not in exclude:
+            if f not in exclude and f[0] not in self.data_source.pf.particle_types:
                 self[f]
 
     def _get_info(self, item):


https://bitbucket.org/yt_analysis/yt/commits/ebe8fb5fbf5a/
Changeset:   ebe8fb5fbf5a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-26 03:29:07
Summary:     added debug info when guessing the ftype
Affected #:  1 file

diff -r 064dc07441c86efd0a32d616b013fdd77a7a88d1 -r ebe8fb5fbf5a6737bdeb219a68c9ac300f31320f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -264,6 +264,8 @@
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
         if fname == self._last_freq[1]:
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        self._last_freq[0], self._last_freq[1])
             return self._last_finfo
         if fname in self.field_info:
             self._last_freq = field
@@ -274,6 +276,8 @@
         if guessing_type and ("all", fname) in self.field_info:
             self._last_freq = ("all", fname)
             self._last_finfo = self.field_info["all", fname]
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        "all", fname)
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 


https://bitbucket.org/yt_analysis/yt/commits/4010d0e4f63a/
Changeset:   4010d0e4f63a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-06-26 03:32:13
Summary:     added particle spherical coordinates
Affected #:  1 file

diff -r ebe8fb5fbf5a6737bdeb219a68c9ac300f31320f -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -5,6 +5,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Chris Moody <chrisemoody at gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -871,6 +873,66 @@
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
           convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 
+def _ParticleRadialVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphr = get_sph_r_component(vel, theta, phi, normal)
+    return sphr
+
+add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticleThetaVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    spht = get_sph_theta_component(vel, theta, phi, normal)
+    return sphrt
+
+add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticlePhiVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphp = get_sph_phi_component(vel, theta, phi, normal)
+    return sphrp
+
+add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
 def _TangentialVelocity(field, data):
     return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)


https://bitbucket.org/yt_analysis/yt/commits/596e05332166/
Changeset:   596e05332166
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-28 23:51:18
Summary:     Merging with Chris
Affected #:  2 files

diff -r ade63d627a80d85174235e0bc8af16b71cd74f74 -r 596e053321665b64a69ac7da02a3be845f447fe2 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -264,6 +264,8 @@
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
         if fname == self._last_freq[1]:
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        self._last_freq[0], self._last_freq[1])
             return self._last_finfo
         if fname in self.field_info:
             self._last_freq = field
@@ -274,6 +276,8 @@
         if guessing_type and ("all", fname) in self.field_info:
             self._last_freq = ("all", fname)
             self._last_finfo = self.field_info["all", fname]
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        "all", fname)
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 

diff -r ade63d627a80d85174235e0bc8af16b71cd74f74 -r 596e053321665b64a69ac7da02a3be845f447fe2 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -5,6 +5,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Chris Moody <chrisemoody at gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -871,6 +873,66 @@
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
           convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 
+def _ParticleRadialVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphr = get_sph_r_component(vel, theta, phi, normal)
+    return sphr
+
+add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticleThetaVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    spht = get_sph_theta_component(vel, theta, phi, normal)
+    return sphrt
+
+add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticlePhiVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphp = get_sph_phi_component(vel, theta, phi, normal)
+    return sphrp
+
+add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
 def _TangentialVelocity(field, data):
     return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)


https://bitbucket.org/yt_analysis/yt/commits/9e599fd7cad4/
Changeset:   9e599fd7cad4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 06:34:15
Summary:     Fixed NMSU-ART and removed subclass of OctreeContainer.
Affected #:  2 files

diff -r 596e053321665b64a69ac7da02a3be845f447fe2 -r 9e599fd7cad48f5e65df5ef3d441ca2a033acfa6 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -43,7 +43,7 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 from yt.geometry.oct_container import \
-    ARTOctreeContainer
+    OctreeContainer
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 from .fields import \
@@ -106,10 +106,11 @@
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.oct_handler = ARTOctreeContainer(
+        self.oct_handler = OctreeContainer(
             self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
+            self.parameter_file.domain_right_edge,
+            1)
         # The 1 here refers to domain_id == 1 always for ARTIO.
         self.domains = [ARTDomainFile(self.parameter_file, nv, 
                                       self.oct_handler, 1)]

diff -r 596e053321665b64a69ac7da02a3be845f447fe2 -r 9e599fd7cad48f5e65df5ef3d441ca2a033acfa6 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -95,9 +95,10 @@
 
 cdef class OctreeContainer:
 
-    def __init__(self, oct_domain_dimensions, domain_left_edge, domain_right_edge):
+    def __init__(self, oct_domain_dimensions, domain_left_edge,
+                 domain_right_edge, partial_coverage = 0):
         # This will just initialize the root mesh octs
-        self.partial_coverage = 0
+        self.partial_coverage = partial_coverage
         cdef int i, j, k, p
         for i in range(3):
             self.nn[i] = oct_domain_dimensions[i]
@@ -742,22 +743,3 @@
         # called.
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
-
-cdef class ARTOctreeContainer(OctreeContainer):
-
-    def __init__(self, *args, **kwargs):
-        self.partial_coverage = 1
-        OctreeContainer.__init__(self, *args, **kwargs)
-
-    def allocate_domains(self, domain_counts):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
-


https://bitbucket.org/yt_analysis/yt/commits/2b0a401a4705/
Changeset:   2b0a401a4705
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 07:12:31
Summary:     I believe NMSU-ART is now correct.  Slower, though.
Affected #:  3 files

diff -r 9e599fd7cad48f5e65df5ef3d441ca2a033acfa6 -r 2b0a401a4705507b2a798666b9eadfbd5014d9d9 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -462,12 +462,17 @@
             tr[field] = np.zeros(cell_count, 'float64')
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
-        ns = (8, self.domain.pf.domain_dimensions.prod() / 8)
-        for field, i in zip(fields, field_idxs):
-            source[field] = data[i, :]
-            source[field].shape = ns
-            source[field] = np.array(source[field], dtype="float64", order='F')
-            # Need it to be ordered correctly; this is expensive, though ...
+        ns = (self.domain.pf.domain_dimensions.prod() / 8, 8)
+        for field, fi in zip(fields, field_idxs):
+            source[field] = np.empty(ns, dtype="float64", order="C")
+            dt = data[fi,:].reshape(self.domain.pf.domain_dimensions,
+                                    order="F")
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        source[field][:,ii] = \
+                            dt[i::2,j::2,k::2].ravel(order="F")
         oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
         del source
         # Now we continue with the additional levels.

diff -r 9e599fd7cad48f5e65df5ef3d441ca2a033acfa6 -r 2b0a401a4705507b2a798666b9eadfbd5014d9d9 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -743,3 +743,46 @@
         # called.
         if self.root_nodes != NULL: free(self.root_nodes)
         if self.domains != NULL: free(self.domains)
+
+cdef class ARTOctreeContainer(OctreeContainer):
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_level_from_grid(self, int domain, int level, dest_fields, 
+                             source_fields, 
+                             np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                             int offset):
+        #Fill  level, but instead of assuming that the source
+        #order is that of the oct order, we look up the oct position
+        #and fill its children from the the source field
+        #As a result, source is 3D grid with 8 times as many
+        #elements as the number of octs on this level in this domain
+        #and with the shape of an equal-sided cube
+        cdef np.ndarray[np.float64_t, ndim=3] source
+        cdef np.ndarray[np.float64_t, ndim=1] dest
+        cdef OctAllocationContainer *dom = self.domains[domain - 1]
+        cdef Oct *o
+        cdef int n
+        cdef int i, j, k, ii
+        cdef int local_pos, local_filled
+        cdef np.float64_t val
+        cdef np.int64_t ox,oy,oz
+        for key in dest_fields:
+            local_filled = 0
+            dest = dest_fields[key]
+            source = source_fields[key]
+            for n in range(dom.n):
+                o = &dom.my_octs[n]
+                #if o.level != level: continue
+                for i in range(2):
+                    for j in range(2):
+                        for k in range(2):
+                            ii = ((k*2)+j)*2+i
+                            if mask[o.domain_ind, ii] == 0: continue
+                            #ox = (o.pos[0] << 1) + i
+                            #oy = (o.pos[1] << 1) + j
+                            #oz = (o.pos[2] << 1) + k
+                            dest[local_filled + offset] = source[ox,oy,oz]
+                            local_filled += 1
+        return local_filled

diff -r 9e599fd7cad48f5e65df5ef3d441ca2a033acfa6 -r 2b0a401a4705507b2a798666b9eadfbd5014d9d9 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -162,5 +162,5 @@
     cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
     level_arr[data.index] = data.level
     find_arr[data.index] = o.file_ind
-    cell_arr[data.index] = rind(data)
+    cell_arr[data.index] = oind(data)
     data.index +=1


https://bitbucket.org/yt_analysis/yt/commits/cf4f6706a4d6/
Changeset:   cf4f6706a4d6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 17:11:30
Summary:     Switching which is generated forward and which is generated backward.

I think this addresses Sam's concern -- thanks, Sam!
Affected #:  1 file

diff -r b8b8621258376d8a8e694fb58358f5d381639382 -r cf4f6706a4d6b85e43b3c746c54c078f36b78098 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -524,8 +524,8 @@
     generate_endpoints = len(objects) != my_size
     if generate_endpoints and mutable:
         raise NotImplementedError
-    gforw = generate_endpoints and my_rank == 0
-    gback = generate_endpoints and my_rank == my_size - 1
+    gforw = generate_endpoints and my_rank == my_size - 1
+    gback = generate_endpoints and my_rank == 0
     # Now we need to do pairwise sends
     source = (my_rank - 1) % my_size
     dest = (my_rank + 1) % my_size


https://bitbucket.org/yt_analysis/yt/commits/dc6171491b34/
Changeset:   dc6171491b34
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 17:53:19
Summary:     Intermediate commit.  Things work except for len(objects) > my_size.
Affected #:  1 file

diff -r cf4f6706a4d6b85e43b3c746c54c078f36b78098 -r dc6171491b3433f939bc16cb22ec59b56cddc1c2 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -522,10 +522,23 @@
             yield obj, generator_func(obj)
         return
     generate_endpoints = len(objects) != my_size
+    # gforw means: should we expect one from forwards?
+    # gback means: do we send this object backwards?
+    if len(objects) > my_size:
+        # In this case, the first processor (my_rank == 0) will generate.
+        generate_endpoints = True
+        gback = (my_rank > 0)
+        gforw = (my_rank + 1 < my_size)
+    elif len(objects) > my_size:
+        generate_endpoints = True
+        gback = (my_rank > 0)
+        gforw = (my_rank + 1 < len(objects))
+    else: # Length of objects is equal to my_size
+        generate_endpoints = False
+        gback = True
+        gforw = True
     if generate_endpoints and mutable:
         raise NotImplementedError
-    gforw = generate_endpoints and my_rank == my_size - 1
-    gback = generate_endpoints and my_rank == 0
     # Now we need to do pairwise sends
     source = (my_rank - 1) % my_size
     dest = (my_rank + 1) % my_size


https://bitbucket.org/yt_analysis/yt/commits/d850e79e9160/
Changeset:   d850e79e9160
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 19:05:17
Summary:     Had the forward/backward directions wrong.
Affected #:  1 file

diff -r dc6171491b3433f939bc16cb22ec59b56cddc1c2 -r d850e79e916033d8e3504ff9cf7085a5b5ad10a7 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -522,26 +522,22 @@
             yield obj, generator_func(obj)
         return
     generate_endpoints = len(objects) != my_size
-    # gforw means: should we expect one from forwards?
-    # gback means: do we send this object backwards?
-    if len(objects) > my_size:
+    # gback False: send the object backwards
+    # gforw False: receive an object from forwards
+    if len(objects) == my_size:
+        generate_endpoints = False
+        gback = False
+        gforw = False
+    else:
         # In this case, the first processor (my_rank == 0) will generate.
         generate_endpoints = True
-        gback = (my_rank > 0)
-        gforw = (my_rank + 1 < my_size)
-    elif len(objects) > my_size:
-        generate_endpoints = True
-        gback = (my_rank > 0)
-        gforw = (my_rank + 1 < len(objects))
-    else: # Length of objects is equal to my_size
-        generate_endpoints = False
-        gback = True
-        gforw = True
+        gback = (my_rank == 0)
+        gforw = (my_rank == my_size - 1)
     if generate_endpoints and mutable:
         raise NotImplementedError
     # Now we need to do pairwise sends
-    source = (my_rank - 1) % my_size
-    dest = (my_rank + 1) % my_size
+    source = (my_rank + 1) % my_size
+    dest = (my_rank - 1) % my_size
     oiter = itertools.islice(itertools.cycle(objects),
                              my_rank, my_rank+len(objects))
     idata = None


https://bitbucket.org/yt_analysis/yt/commits/e0d63962d093/
Changeset:   e0d63962d093
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 19:32:12
Summary:     Merging
Affected #:  1 file

diff -r 2b0a401a4705507b2a798666b9eadfbd5014d9d9 -r e0d63962d0930c0a5207a8af4d9df8506f80a66e yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -522,13 +522,22 @@
             yield obj, generator_func(obj)
         return
     generate_endpoints = len(objects) != my_size
+    # gback False: send the object backwards
+    # gforw False: receive an object from forwards
+    if len(objects) == my_size:
+        generate_endpoints = False
+        gback = False
+        gforw = False
+    else:
+        # In this case, the first processor (my_rank == 0) will generate.
+        generate_endpoints = True
+        gback = (my_rank == 0)
+        gforw = (my_rank == my_size - 1)
     if generate_endpoints and mutable:
         raise NotImplementedError
-    gforw = generate_endpoints and my_rank == 0
-    gback = generate_endpoints and my_rank == my_size - 1
     # Now we need to do pairwise sends
-    source = (my_rank - 1) % my_size
-    dest = (my_rank + 1) % my_size
+    source = (my_rank + 1) % my_size
+    dest = (my_rank - 1) % my_size
     oiter = itertools.islice(itertools.cycle(objects),
                              my_rank, my_rank+len(objects))
     idata = None


https://bitbucket.org/yt_analysis/yt/commits/ebcc89db0a1d/
Changeset:   ebcc89db0a1d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 20:39:25
Summary:     Adding comment that we didn't invent these constants.  :)
Affected #:  1 file

diff -r e0d63962d0930c0a5207a8af4d9df8506f80a66e -r ebcc89db0a1db3f92c042ec30efe40349a3babef yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -300,6 +300,7 @@
             positions[i, j] = p[j]
     return positions
 
+# yt did not invent these! :)
 cdef np.uint64_t _const20 = 0x000001FFC00003FF
 cdef np.uint64_t _const10 = 0x0007E007C00F801F
 cdef np.uint64_t _const04 = 0x00786070C0E181C3


https://bitbucket.org/yt_analysis/yt/commits/b153b797d39f/
Changeset:   b153b797d39f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 20:44:20
Summary:     Fixing masks, etc for grid patch data.  ``blocks`` is not function for octs yet.
Affected #:  1 file

diff -r ebcc89db0a1db3f92c042ec30efe40349a3babef -r b153b797d39f7557edb64c570fedd8902d008c85 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -408,9 +408,10 @@
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
             for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
-                mask = self._current_chunk.objs[0].select(self.selector)
+                g = self._current_chunk.objs[0]
+                mask = g._get_selector_mask(self.selector)
                 if mask is None: continue
-                yield self._current_chunk.objs[0], mask
+                yield g, mask
 
 class GenerationInProgress(Exception):
     def __init__(self, fields):


https://bitbucket.org/yt_analysis/yt/commits/c0fe3330521d/
Changeset:   c0fe3330521d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 20:48:11
Summary:     Fixing IO for Enzo, FLASH, GDF and Stream with new select() args.
Affected #:  5 files

diff -r b153b797d39f7557edb64c570fedd8902d008c85 -r c0fe3330521d10ec0d0219f9668440df0eb75e83 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -497,7 +497,7 @@
     def select(self, selector, source, dest, offset):
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
-        if count == 0: return
+        if count == 0: return 0
         dest[offset:offset+count] = source[mask]
         return count
 

diff -r b153b797d39f7557edb64c570fedd8902d008c85 -r c0fe3330521d10ec0d0219f9668440df0eb75e83 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -153,13 +153,10 @@
         for chunk in chunks:
             data = self._read_chunk_data(chunk, fields)
             for g in chunk.objs:
-                mask = g.select(selector)
-                if mask is None: continue
-                nd = mask.sum()
                 for field in fields:
                     ftype, fname = field
-                    gdata = data[g.id].pop(fname).swapaxes(0,2)
-                    nd = mask_fill(rv[field], ind, mask, gdata)
+                    ds = data[g.id].pop(fname).swapaxes(0,2)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
                 ind += nd
                 data.pop(g.id)
         return rv

diff -r b153b797d39f7557edb64c570fedd8902d008c85 -r c0fe3330521d10ec0d0219f9668440df0eb75e83 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,9 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    mask = g.select(selector) # caches
-                    if mask is None: continue
                     data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
-                    rv[field][ind:ind+data.size] = data
-                    ind += data.size
+                    ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r b153b797d39f7557edb64c570fedd8902d008c85 -r c0fe3330521d10ec0d0219f9668440df0eb75e83 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -84,13 +84,8 @@
             ind = 0
             for chunk in chunks:
                 for grid in chunk.objs:
-                    mask = grid.select(selector)  # caches
-                    if mask is None:
-                        continue
+                    data = fhandle[field_dname(grid.id, fname)][:]
                     if self.pf.field_ordering == 1:
-                        data = fhandle[field_dname(grid.id, fname)][:].swapaxes(0, 2)[mask]
-                    else:
-                        data = fhandle[field_dname(grid.id, fname)][mask]
-                    rv[field][ind:ind + data.size] = data
-                    ind += data.size
+                        data = data.swapaxes(0, 2)
+                    ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r b153b797d39f7557edb64c570fedd8902d008c85 -r c0fe3330521d10ec0d0219f9668440df0eb75e83 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -70,12 +70,8 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    mask = g.select(selector) # caches
-                    if mask is None: continue
                     ds = self.fields[g.id][fname]
-                    data = ds[mask]
-                    rv[field][ind:ind+data.size] = data
-                    ind += data.size
+                    ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
 
     def _read_particle_selection(self, chunks, selector, fields):


https://bitbucket.org/yt_analysis/yt/commits/3d9455d76f99/
Changeset:   3d9455d76f99
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 21:01:11
Summary:     Not 100% happy about this, but updated ParticleOctree tests.

We should add more tests of AlwaysSelector and so on.  Out of scope at the
moment.
Affected #:  1 file

diff -r c0fe3330521d10ec0d0219f9668440df0eb75e83 -r 3d9455d76f99935ea734aeb3eb1953ab1ded32d1 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,38 +1,41 @@
 from yt.testing import *
 import numpy as np
-from yt.geometry.oct_container import ParticleOctreeContainer
+from yt.geometry.particle_oct_container import ParticleOctreeContainer
+from yt.geometry.oct_container import _ORDER_MAX
+from yt.utilities.lib.geometry_utils import get_morton_indices
 import time, os
 
 NPART = 32**3
-NDIM = 64
 DLE = np.array([0.0, 0.0, 0.0])
 DRE = np.array([10.0, 10.0, 10.0])
+dx = (DRE-DLE)/(2**_ORDER_MAX)
 
 def test_add_particles_random():
     np.random.seed(int(0x4d3d3d3))
     pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    # Now convert to integers
     for i in range(3):
         np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
     for ndom in [1, 2, 4, 8]:
-        octree = ParticleOctreeContainer((NDIM, NDIM, NDIM), DLE, DRE)
+        octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
         octree.n_ref = 32
-        for dom in range(ndom):
-            octree.add(pos[dom::ndom,:], dom)
+        for dom, split in enumerate(np.array_split(morton, ndom)):
+            octree.add(split)
         octree.finalize()
         # This visits every oct.
-        lin_count = octree.linearly_count()
         tc = octree.recursively_count()
         total_count = np.zeros(len(tc), dtype="int32")
         for i in sorted(tc):
             total_count[i] = tc[i]
-        yield assert_equal, lin_count, total_count.sum()
-        mask = np.ones((total_count.sum(), 8), dtype="bool")
+        yield assert_equal, octree.nocts, total_count.sum()
         # This visits every cell -- including those covered by octs.
-        level_count  = octree.count_levels(total_count.size-1, -1, mask)
-        for dom in range(ndom):
-            level_count += octree.count_levels(total_count.size-1, dom, mask)
-        yield assert_equal, level_count[0], NDIM**3 * 8
-        yield assert_equal, level_count, total_count * 8
+        #for dom in range(ndom):
+        #    level_count += octree.count_levels(total_count.size-1, dom, mask)
+        yield assert_equal, total_count, [1, 8, 64, 104, 184, 480, 1680, 1480]
 
 if __name__=="__main__":
     for i in test_add_particles_random():


https://bitbucket.org/yt_analysis/yt/commits/c3f59cd9dd26/
Changeset:   c3f59cd9dd26
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 21:10:16
Summary:     Fixing size/shape by adding a _reshape_vals operation to grid patches.
Affected #:  3 files

diff -r 3d9455d76f99935ea734aeb3eb1953ab1ded32d1 -r c3f59cd9dd267915b4c4e59b76db493a775804f0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -418,6 +418,10 @@
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._setup_data_source()
 
+    def _reshape_vals(self, arr):
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
     @property
     def shape(self):
         return tuple(self.ActiveDimensions.tolist())

diff -r 3d9455d76f99935ea734aeb3eb1953ab1ded32d1 -r c3f59cd9dd267915b4c4e59b76db493a775804f0 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -113,6 +113,10 @@
     def shape(self):
         return self.ActiveDimensions
 
+    def _reshape_vals(self, arr):
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
     def _generate_container_field(self, field):
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)

diff -r 3d9455d76f99935ea734aeb3eb1953ab1ded32d1 -r c3f59cd9dd267915b4c4e59b76db493a775804f0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -98,7 +98,10 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.ires.shape, dtype='float64')
+    tr = np.ones(data.ires.shape, dtype="float64")
+    if data._spatial:
+        return data._reshape_vals(tr)
+    return tr
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)


https://bitbucket.org/yt_analysis/yt/commits/ffba4792808a/
Changeset:   ffba4792808a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 21:22:48
Summary:     Adding ires, icoords, fwidth, fcoords to CoveringGrid.

This fixes up remaining tests.
Affected #:  3 files

diff -r c3f59cd9dd267915b4c4e59b76db493a775804f0 -r ffba4792808a17b6d50d0dee75f28a32dc2c3c26 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -418,6 +418,34 @@
                     self.pf.domain_left_edge)/self.dds).astype('int64')
         self._setup_data_source()
 
+    @property
+    def icoords(self):
+        ic = np.indices(self.ActiveDimensions).astype("int64")
+        return np.column_stack([i.ravel() + gi for i, gi in
+            zip(ic, self.get_global_startindex())])
+
+    @property
+    def fwidth(self):
+        fw = np.ones((self.ActiveDimensions.prod(), 3), dtype="float64")
+        fw *= self.dds
+        return fw
+
+    @property
+    def fcoords(self):
+        LE = self.LeftEdge + self.dds/2.0
+        RE = self.RightEdge - self.dds/2.0
+        N = self.ActiveDimensions
+        fc = np.mgrid[LE[0]:RE[0]:N[0]*1j,
+                      LE[1]:RE[1]:N[1]*1j,
+                      LE[2]:RE[2]:N[2]*1j]
+        return np.column_stack([f.ravel() for f in fc])
+
+    @property
+    def ires(self):
+        tr = np.ones(self.ActiveDimensions.prod(), dtype="int64")
+        tr *= self.level
+        return tr
+
     def _reshape_vals(self, arr):
         if len(arr.shape) == 3: return arr
         return arr.reshape(self.ActiveDimensions, order="C")

diff -r c3f59cd9dd267915b4c4e59b76db493a775804f0 -r ffba4792808a17b6d50d0dee75f28a32dc2c3c26 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -264,6 +264,11 @@
                 lambda: np.ones((nd * nd * nd), dtype='float64')
                 + 1e-4*np.random.random((nd * nd * nd)))
 
+    def _reshape_vals(self, arr):
+        if not self._spatial: return arr
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
     def __missing__(self, item):
         if hasattr(self.pf, "field_info") and isinstance(item, tuple):
             finfo = self.pf._get_field_info(*item)

diff -r c3f59cd9dd267915b4c4e59b76db493a775804f0 -r ffba4792808a17b6d50d0dee75f28a32dc2c3c26 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -92,7 +92,7 @@
           display_field=False)
 
 def _Zeros(field, data):
-    return np.zeros(data.shape, dtype='float64')
+    return np.zeros(data["Ones"].shape, dtype='float64')
 add_field("Zeros", function=_Zeros,
           projection_conversion="unitary",
           display_field = False)


https://bitbucket.org/yt_analysis/yt/commits/9b36137d26eb/
Changeset:   9b36137d26eb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 23:23:09
Summary:     Arbitrary Grids should be C-ordered after deposition.
Affected #:  1 file

diff -r ffba4792808a17b6d50d0dee75f28a32dc2c3c26 -r 9b36137d26eb3bb40bd233fe067586c296131630 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -541,7 +541,7 @@
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="F")
+        return vals.reshape(self.ActiveDimensions, order="C")
 
 class YTArbitraryGridBase(YTCoveringGridBase):
     """A 3D region with arbitrary bounds and dimensions.


https://bitbucket.org/yt_analysis/yt/commits/a6ee8f0baf5a/
Changeset:   a6ee8f0baf5a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 02:42:07
Summary:     Fixing issues with Gadget reading and record sizes.
Affected #:  2 files

diff -r 9b36137d26eb3bb40bd233fe067586c296131630 -r a6ee8f0baf5a8ff72eb55e84a718061a5d58cbe6 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -81,13 +81,16 @@
         with open(filename, "rb") as f:
             self.header = read_record(f, pf._header_spec)
             self._position_offset = f.tell()
+            f.seek(0, os.SEEK_END)
+            self._file_size = f.tell()
 
         super(GadgetBinaryFile, self).__init__(pf, io,
                 filename, file_id)
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_field_offsets(
-                field_list, self.total_particles)
+                field_list, self.total_particles,
+                self._file_size)
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None

diff -r 9b36137d26eb3bb40bd233fe067586c296131630 -r a6ee8f0baf5a8ff72eb55e84a718061a5d58cbe6 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -245,7 +245,7 @@
     def _read_field_from_file(self, f, count, name):
         if count == 0: return
         if name == "ParticleIDs":
-            dt = "int32"
+            dt = "uint32"
         else:
             dt = "float32"
         if name in _vector_fields:
@@ -281,24 +281,36 @@
             for i, v in enumerate(data_file.header["Npart"])) 
         return npart
 
-    _header_offset = 256
+    # header is 256, but we have 4 at beginning and end for ints
+    _header_offset = 256 + 8
     _field_size = 4
-    def _calculate_field_offsets(self, field_list, pcount):
+    def _calculate_field_offsets(self, field_list, pcount,
+                                 file_size = None):
         # field_list is (ftype, fname) but the blocks are ordered
         # (fname, ftype) in the file.
-        pos = self._header_offset # 256 bytes for the header
+        pos = self._header_offset
         fs = self._field_size
         offsets = {}
         for field in self._fields:
             if not isinstance(field, types.StringTypes):
                 field = field[0]
+            if not any( (ptype, field) in field_list
+                        for ptype in self._ptypes):
+                continue
+            pos += 4
             for ptype in self._ptypes:
-                if (ptype, field) not in field_list: continue
+                if (ptype, field) not in field_list:
+                    continue
                 offsets[(ptype, field)] = pos
                 if field in _vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
+            pos += 4
+        if file_size is not None:
+            if file_size != pos:
+                mylog.warning("Your Gadget-2 file may have extra " +
+                              "columns or different precision!")
         return offsets
 
     def _identify_fields(self, domain):


https://bitbucket.org/yt_analysis/yt/commits/81b87d3f57fe/
Changeset:   81b87d3f57fe
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 03:29:30
Summary:     Making a few warnings and fields more explicit.
Affected #:  3 files

diff -r a6ee8f0baf5a8ff72eb55e84a718061a5d58cbe6 -r 81b87d3f57feff96e934028b7006b84f0906e26d yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -69,7 +69,7 @@
     def particle_density(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["CellVolume"]
+        d /= data["gas","CellVolume"]
         return d
 
     registry.add_field(("deposit", "%s_density" % ptype),
@@ -83,7 +83,7 @@
     def particle_cic(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
-        d /= data["CellVolume"]
+        d /= data["gas","CellVolume"]
         return d
 
     registry.add_field(("deposit", "%s_cic" % ptype),

diff -r a6ee8f0baf5a8ff72eb55e84a718061a5d58cbe6 -r 81b87d3f57feff96e934028b7006b84f0906e26d yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -235,8 +235,8 @@
                     data = self._read_field_from_file(f, tp[ptype], field)
                     data = data[mask]
                     my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
+                    mylog.debug("Filling (%s, %s) from %s to %s",
+                        ptype, field, my_ind, my_ind+data.shape[0])
                     rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
                     ind[ptype, field] += data.shape[0]
             f.close()

diff -r a6ee8f0baf5a8ff72eb55e84a718061a5d58cbe6 -r 81b87d3f57feff96e934028b7006b84f0906e26d yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -191,8 +191,8 @@
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
             except Exception as e:
                 if type(e) != YTFieldNotFound:
-                    mylog.debug("Exception %s raised during field detection" %
-                                str(type(e)))
+                    mylog.debug("Raises %s during field %s detection.",
+                                str(type(e)), field)
                 continue
             missing = False
             # This next bit checks that we can't somehow generate everything.


https://bitbucket.org/yt_analysis/yt/commits/68b8b829ab6b/
Changeset:   68b8b829ab6b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 04:30:10
Summary:     Gadget coordinate records start with a 4-byte header.
Affected #:  2 files

diff -r 81b87d3f57feff96e934028b7006b84f0906e26d -r 68b8b829ab6bbc23195b3d0660d09588fa7a5924 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -90,7 +90,7 @@
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_field_offsets(
                 field_list, self.total_particles,
-                self._file_size)
+                self._position_offset, self._file_size)
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None

diff -r 81b87d3f57feff96e934028b7006b84f0906e26d -r 68b8b829ab6bbc23195b3d0660d09588fa7a5924 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -261,8 +261,10 @@
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
+        pos = np.empty((count, 3), dtype='float64')
         with open(data_file.filename, "rb") as f:
-            f.seek(self._header_offset)
+            # We add on an additionally 4 for the first record.
+            f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
             pp = np.fromfile(f, dtype = dt, count = count)
         pos = np.column_stack([pp['px'], pp['py'], pp['pz']]).astype("float64")
@@ -282,13 +284,12 @@
         return npart
 
     # header is 256, but we have 4 at beginning and end for ints
-    _header_offset = 256 + 8
     _field_size = 4
     def _calculate_field_offsets(self, field_list, pcount,
-                                 file_size = None):
+                                 offset, file_size = None):
         # field_list is (ftype, fname) but the blocks are ordered
         # (fname, ftype) in the file.
-        pos = self._header_offset
+        pos = offset
         fs = self._field_size
         offsets = {}
         for field in self._fields:


https://bitbucket.org/yt_analysis/yt/commits/ab5d001a8829/
Changeset:   ab5d001a8829
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 04:43:58
Summary:     Explicitly naming particle fields is necessary when we do not have an implicit
particle field defined in a frontend -- for instance, in SPH codes.

Note that this is related to #592, but I believe does not cause a regression.
Affected #:  1 file

diff -r 68b8b829ab6bbc23195b3d0660d09588fa7a5924 -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1009,7 +1009,7 @@
 for field in ["particle_position_%s" % ax for ax in "xyz"]:
     # This marker should let everyone know not to use the fields, but NullFunc
     # should do that, too.
-    add_field(field, function=NullFunc, particle_type = True,
+    add_field(("all", field), function=NullFunc, particle_type = True,
         units=r"UNDEFINED")
 
 def _pdensity(field, data):


https://bitbucket.org/yt_analysis/yt/commits/ffdae8519da4/
Changeset:   ffdae8519da4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 15:34:19
Summary:     Backporting this fix from Nathan's units branch.
Affected #:  1 file

diff -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc -r ffdae8519da44d382b0e6ca25c90141b461e997a yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -283,7 +283,7 @@
                 vv = finfo(self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd + ngz * 2)
+                nfd = FieldDetector(self.nd + ngz * 2, pf = self.pf)
                 nfd._num_ghost_zones = ngz
                 vv = finfo(nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]


https://bitbucket.org/yt_analysis/yt/commits/1b16df894cc4/
Changeset:   1b16df894cc4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 17:36:16
Summary:     Adding new, simpler Morton order calculation.
Affected #:  3 files

diff -r ffdae8519da44d382b0e6ca25c90141b461e997a -r 1b16df894cc41c28162829b3687e34a12991f951 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -33,7 +33,7 @@
 
 from yt.utilities.fortran_utils import read_record
 from yt.utilities.lib.geometry_utils import get_morton_indices, \
-    get_morton_indices_unravel
+    get_morton_indices_unravel, compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
 
@@ -104,16 +104,18 @@
         f = h5py.File(data_file.filename, "r")
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
         morton = np.empty(pcount, dtype='uint64')
-        DLE = data_file.pf.domain_left_edge
-        DRE = data_file.pf.domain_right_edge
-        dx = (DRE - DLE) / 2**_ORDER_MAX
         ind = 0
         for key in f.keys():
             if not key.startswith("PartType"): continue
-            pos = f[key]["Coordinates"][:].astype("float64")
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
             regions.add_data_file(pos, data_file.file_id)
-            pos = np.floor((pos - DLE)/dx).astype("uint64")
-            morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
             ind += pos.shape[0]
         f.close()
         return morton
@@ -257,7 +259,6 @@
 
     def _initialize_index(self, data_file, regions):
         count = sum(data_file.total_particles.values())
-        dt = [("px", "float32"), ("py", "float32"), ("pz", "float32")]
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
@@ -266,16 +267,10 @@
             # We add on an additionally 4 for the first record.
             f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
-            pp = np.fromfile(f, dtype = dt, count = count)
-        pos = np.column_stack([pp['px'], pp['py'], pp['pz']]).astype("float64")
-        del pp
-        regions.add_data_file(pos, data_file.file_id)
-        lx = np.floor((pos[:,0] - DLE[0])/dx[0]).astype("uint64")
-        ly = np.floor((pos[:,1] - DLE[1])/dx[1]).astype("uint64")
-        lz = np.floor((pos[:,2] - DLE[2])/dx[2]).astype("uint64")
-        del pos
-        morton = get_morton_indices_unravel(lx, ly, lz)
-        del lx, ly, lz
+            pp = np.fromfile(f, dtype = 'float32', count = count*3)
+            pp.shape = (count, 3)
+        regions.add_data_file(pp, data_file.file_id)
+        morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE)
         return morton
 
     def _count_particles(self, data_file):

diff -r ffdae8519da44d382b0e6ca25c90141b461e997a -r 1b16df894cc41c28162829b3687e34a12991f951 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -261,6 +261,10 @@
                         self.visit(o.children[cind(i,j,k)], counts, level + 1)
         return
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]
@@ -282,7 +286,14 @@
         for i in range(nfiles/64 + 1):
             self.masks.append(np.zeros(dims, dtype="uint64"))
 
-    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
+    def add_data_file(self, np.ndarray pos, int file_id):
+        if pos.dtype == np.float32:
+            self._mask_positions[np.float32_t](pos, file_id)
+        elif pos.dtype == np.float64:
+            self._mask_positions[np.float64_t](pos, file_id)
+
+    cdef void _mask_positions(self, np.ndarray[anyfloat, ndim=2] pos,
+                              int file_id):
         cdef np.int64_t no = pos.shape[0]
         cdef np.int64_t p
         cdef int ind[3], i

diff -r ffdae8519da44d382b0e6ca25c90141b461e997a -r 1b16df894cc41c28162829b3687e34a12991f951 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -353,6 +353,51 @@
         morton_indices[i] = mi
     return morton_indices
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
+cdef position_to_morton(np.ndarray[anyfloat, ndim=1] pos_x,
+                        np.ndarray[anyfloat, ndim=1] pos_y,
+                        np.ndarray[anyfloat, ndim=1] pos_z,
+                        np.float64_t dds[3], np.float64_t DLE[3],
+                        np.ndarray[np.uint64_t, ndim=1] ind):
+    cdef np.uint64_t mi, ii[3]
+    cdef np.float64_t p[3]
+    cdef np.int64_t i, j
+    for i in range(pos_x.shape[0]):
+        p[0] = <np.float64_t> pos_x[i]
+        p[1] = <np.float64_t> pos_y[i]
+        p[2] = <np.float64_t> pos_z[i]
+        for j in range(3):
+            ii[j] = <np.uint64_t> ((p[j] - DLE[j])/dds[j])
+        mi = 0
+        mi |= spread_bits(ii[2])<<0
+        mi |= spread_bits(ii[1])<<1
+        mi |= spread_bits(ii[0])<<2
+        ind[i] = mi
+
+DEF ORDER_MAX=20
+        
+def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z,
+                   domain_left_edge, domain_right_edge):
+    cdef int i
+    cdef np.float64_t dds[3], DLE[3], DRE[3]
+    for i in range(3):
+        DLE[i] = domain_left_edge[i]
+        DRE[i] = domain_right_edge[i]
+        dds[i] = (DRE[i] - DLE[i]) / (1 << ORDER_MAX)
+    cdef np.ndarray[np.uint64_t, ndim=1] ind
+    ind = np.zeros(pos_x.shape[0], dtype="uint64")
+    if pos_x.dtype == np.float32:
+        position_to_morton[np.float32_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    elif pos_x.dtype == np.float64:
+        position_to_morton[np.float64_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    else:
+        print "Could not identify dtype.", pos_x.dtype
+        raise NotImplementedError
+    return ind
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/15e14fe3e829/
Changeset:   15e14fe3e829
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 17:48:51
Summary:     Converting Tipsy to use new morton computation.
Affected #:  1 file

diff -r 1b16df894cc41c28162829b3687e34a12991f951 -r 15e14fe3e82918f3b840dc23551618e6afaae9a8 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -37,6 +37,8 @@
 
 from yt.geometry.oct_container import _ORDER_MAX
 
+CHUNKSIZE = 10000000
+
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
 class IOHandlerOWLS(BaseIOHandler):
@@ -426,34 +428,33 @@
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
-                pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                 count = count)
-                mis = np.empty(3, dtype="float64")
-                mas = np.empty(3, dtype="float64")
-                for axi, ax in enumerate('xyz'):
-                    mi = pp["Coordinates"][ax].min()
-                    ma = pp["Coordinates"][ax].max()
-                    mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
-                    mis[axi] = mi
-                    mas[axi] = ma
-                if np.any(mis < pf.domain_left_edge) or \
-                   np.any(mas > pf.domain_right_edge):
-                    raise YTDomainOverflow(mis, mas,
-                                           pf.domain_left_edge,
-                                           pf.domain_right_edge)
-                fpos = np.empty((count, 3), dtype="float64")
-                fpos[:,0] = pp["Coordinates"]["x"]
-                fpos[:,1] = pp["Coordinates"]["y"]
-                fpos[:,2] = pp["Coordinates"]["z"]
-                regions.add_data_file(fpos, data_file.file_id)
-                del fpos
-                pos = np.empty((count, 3), dtype="uint64")
-                for axi, ax in enumerate("xyz"):
-                    coords = pp['Coordinates'][ax].astype("float64")
-                    coords = np.floor((coords - DLE[axi])/dx[axi])
-                    pos[:,axi] = coords
-                morton[ind:ind+count] = get_morton_indices(pos)
-                del pp, pos
+                start, stop = ind, ind + count
+                while ind < stop:
+                    c = min(CHUNKSIZE, stop - ind)
+                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
+                                     count = c)
+                    mis = np.empty(3, dtype="float64")
+                    mas = np.empty(3, dtype="float64")
+                    for axi, ax in enumerate('xyz'):
+                        mi = pp["Coordinates"][ax].min()
+                        ma = pp["Coordinates"][ax].max()
+                        mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                        mis[axi] = mi
+                        mas[axi] = ma
+                    if np.any(mis < pf.domain_left_edge) or \
+                       np.any(mas > pf.domain_right_edge):
+                        raise YTDomainOverflow(mis, mas,
+                                               pf.domain_left_edge,
+                                               pf.domain_right_edge)
+                    pos = np.empty((pp.size, 3), dtype="float64")
+                    pos[:,0] = pp["Coordinates"]["x"]
+                    pos[:,1] = pp["Coordinates"]["y"]
+                    pos[:,2] = pp["Coordinates"]["z"]
+                    regions.add_data_file(pos, data_file.file_id)
+                    morton[ind:ind+c] = compute_morton(
+                        pos[:,0], pos[:,1], pos[:,2],
+                        DLE, DRE)
+                    ind += c
         mylog.info("Adding %0.3e particles", morton.size)
         return morton
 


https://bitbucket.org/yt_analysis/yt/commits/31e07a926600/
Changeset:   31e07a926600
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 17:49:06
Summary:     Removing unused imports.
Affected #:  1 file

diff -r 15e14fe3e82918f3b840dc23551618e6afaae9a8 -r 31e07a9266001a83c1418d610a0405eeca77f64d yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -32,8 +32,7 @@
     BaseIOHandler
 
 from yt.utilities.fortran_utils import read_record
-from yt.utilities.lib.geometry_utils import get_morton_indices, \
-    get_morton_indices_unravel, compute_morton
+from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
 


https://bitbucket.org/yt_analysis/yt/commits/f3e4516b49c8/
Changeset:   f3e4516b49c8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 23:19:30
Summary:     Fixing broken tests.
Affected #:  1 file

diff -r 31e07a9266001a83c1418d610a0405eeca77f64d -r f3e4516b49c81e426fd860c43da02a87d92f5b9e yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -85,13 +85,17 @@
 
 def test_all_fields():
     for field in FieldInfo:
-        if field.startswith("CuttingPlane"): continue
-        if field.startswith("particle"): continue
-        if field.startswith("CIC"): continue
-        if field.startswith("WeakLensingConvergence"): continue
-        if field.startswith("DensityPerturbation"): continue
-        if field.startswith("Matter_Density"): continue
-        if field.startswith("Overdensity"): continue
+        if isinstance(field, types.TupleType):
+            fname = field[0]
+        else:
+            fname = field
+        if fname.startswith("CuttingPlane"): continue
+        if fname.startswith("particle"): continue
+        if fname.startswith("CIC"): continue
+        if fname.startswith("WeakLensingConvergence"): continue
+        if fname.startswith("DensityPerturbation"): continue
+        if fname.startswith("Matter_Density"): continue
+        if fname.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)


https://bitbucket.org/yt_analysis/yt/commits/0ed9759f31d2/
Changeset:   0ed9759f31d2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 04:44:58
Summary:     Superset/Subselect selectors need to respect min/max level.

This fixes the issue Chris reported with the PR.
Affected #:  2 files

diff -r f3e4516b49c81e426fd860c43da02a87d92f5b9e -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -74,6 +74,9 @@
     _hydro_offset = None
     _level_count = None
 
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
     @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
@@ -398,6 +401,8 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
+            self.units['%scm' % unit] = (self.units[unit] /
+                                          (1 + self.current_redshift))
             self.units['%shcm' % unit] = (self.units['%sh' % unit] /
                                           (1 + self.current_redshift))
         for unit in sec_conversion.keys():

diff -r f3e4516b49c81e426fd860c43da02a87d92f5b9e -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1124,6 +1124,8 @@
 
     def __init__(self, dobj):
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
         self.domain_id = dobj.domain_id
         self.overlap_cells = 1
 
@@ -1157,11 +1159,10 @@
                          Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        cdef int res
-        res = self.base_selector.select_grid(left_edge, right_edge, level, o)
-        if res == 1 and o != NULL and o.domain != self.domain_id:
-            return -1
-        return res
+        return self.base_selector.select_grid(left_edge, right_edge, level, o)
+
+    def get_base(self):
+        return self.base_selector
 
 octree_subset_selector = OctreeSubsetSelector
 
@@ -1175,6 +1176,8 @@
         self.min_ind = dobj.min_ind
         self.max_ind = dobj.max_ind
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/fd7528b8bafb/
Changeset:   fd7528b8bafb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 06:04:28
Summary:     This somewhat simplifies the process of fields for particles by constructing
*new* fields when renaming them, thus enabling their names to be different
during that process.

The process of building a set of fields and particle fields will need to be
improved in the future, but that absolutely has to wait until the field
renaming and units work that Nathan is working on is ready for inclusion.
Affected #:  4 files

diff -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 -r fd7528b8bafbc65f724169fddc06a986fb37a5e0 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -146,3 +146,4 @@
     registry.add_field((ptype, "Velocities"),
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
+

diff -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 -r fd7528b8bafbc65f724169fddc06a986fb37a5e0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1005,13 +1005,6 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-# We add these fields so that the field detector can use them
-for field in ["particle_position_%s" % ax for ax in "xyz"]:
-    # This marker should let everyone know not to use the fields, but NullFunc
-    # should do that, too.
-    add_field(("all", field), function=NullFunc, particle_type = True,
-        units=r"UNDEFINED")
-
 def _pdensity(field, data):
     pmass = data[('deposit','all_mass')]
     np.divide(pmass, data["CellVolume"], pmass)

diff -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 -r fd7528b8bafbc65f724169fddc06a986fb37a5e0 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -507,7 +507,9 @@
     add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"] \
+        + ["particle_position_%s" % ax for ax in 'xyz'] \
+        + ["particle_velocity_%s" % ax for ax in 'xyz']:
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)

diff -r 0ed9759f31d2ddd0dd4945e9412c3c15050067c6 -r fd7528b8bafbc65f724169fddc06a986fb37a5e0 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -31,6 +31,7 @@
 from types import ClassType
 import numpy as np
 import abc
+import copy
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -166,7 +167,6 @@
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
-        fields_to_add = []
         for field in fi:
             finfo = fi[field]
             # Explicitly defined
@@ -179,13 +179,14 @@
                 fields_to_check.append(field)
                 continue
             # We do a special case for 'all' later
-            new_fields = [(pt, field) for pt in
-                          self.parameter_file.particle_types]
+            new_fields = []
+            for pt in self.parameter_file.particle_types:
+                new_fi = copy.copy(finfo)
+                new_fi.name = (pt, new_fi.name)
+                fi[new_fi.name] = new_fi
+                new_fields.append(new_fi.name)
             fields_to_check += new_fields
-            fields_to_add.extend( (new_field, fi[field]) for
-                                   new_field in new_fields )
             fields_to_allcheck.append(field)
-        fi.update(fields_to_add)
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)


https://bitbucket.org/yt_analysis/yt/commits/5e2716f491e2/
Changeset:   5e2716f491e2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 06:08:35
Summary:     Be more careful about filling fields based on their names, when we are able to
guess them.
Affected #:  1 file

diff -r fd7528b8bafbc65f724169fddc06a986fb37a5e0 -r 5e2716f491e2bc1ff2eeaafbbecde5754451f81d yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -270,12 +270,17 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.pf, "field_info") and isinstance(item, tuple):
-            finfo = self.pf._get_field_info(*item)
+        if hasattr(self.pf, "field_info"):
+            if not isinstance(item, tuple):
+                field = ("unknown", item)
+            else:
+                field = item
+            finfo = self.pf._get_field_info(*field)
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
-            if item in FI:
-                finfo = FI[item]
+            field = item
+            if field in FI:
+                finfo = FI[field]
             else:
                 finfo = None
         if finfo is not None and finfo._function.func_name != 'NullFunc':
@@ -293,23 +298,23 @@
                     if i not in self.requested_parameters:
                         self.requested_parameters.append(i)
             if vv is not None:
-                if not self.flat: self[item] = vv
-                else: self[item] = vv.ravel()
-                return self[item]
+                if not self.flat: self[field] = vv
+                else: self[field] = vv.ravel()
+                return self[field]
         elif finfo is not None and finfo.particle_type:
-            if item == "Coordinates" or item[1] == "Coordinates" or \
-               item == "Velocities" or item[1] == "Velocities":
+            if field == "Coordinates" or field[1] == "Coordinates" or \
+               field == "Velocities" or field[1] == "Velocities":
                 # A vector
-                self[item] = np.ones((self.NumberOfParticles, 3))
+                self[field] = np.ones((self.NumberOfParticles, 3))
             else:
                 # Not a vector
-                self[item] = np.ones(self.NumberOfParticles)
-            self.requested.append(item)
-            return self[item]
-        self.requested.append(item)
-        if item not in self:
-            self[item] = self._read_data(item)
-        return self[item]
+                self[field] = np.ones(self.NumberOfParticles)
+            self.requested.append(field)
+            return self[field]
+        self.requested.append(field)
+        if field not in self:
+            self[field] = self._read_data(field)
+        return self[field]
 
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))


https://bitbucket.org/yt_analysis/yt/commits/b9ebc0a0e0e8/
Changeset:   b9ebc0a0e0e8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 06:14:01
Summary:     RAMSES, which mixes domains within a given OctHandler, still needs this check.
Affected #:  1 file

diff -r 5e2716f491e2bc1ff2eeaafbbecde5754451f81d -r b9ebc0a0e0e8f296e55c423806085f4ea47a97b2 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1159,10 +1159,11 @@
                          Oct *o = NULL) nogil:
         # Because visitors now use select_grid, we should be explicitly
         # checking this.
-        return self.base_selector.select_grid(left_edge, right_edge, level, o)
-
-    def get_base(self):
-        return self.base_selector
+        cdef int res
+        res = self.base_selector.select_grid(left_edge, right_edge, level, o)
+        if res == 1 and o != NULL and o.domain != self.domain_id:
+            return -1
+        return res
 
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt/commits/80b8e52b5f1a/
Changeset:   80b8e52b5f1a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 06:31:56
Summary:     Fix tests and field detection by being explicit about requested items.

Again I feel the need to lament that field detection is difficult when trying
to address multiple fluids/particles, unless we move to closures and
functionally declared fields ...

I've also added a test runner to the test_derived_quantities.py file, which
breaks nicely for some field detection errors, and I've added some known fields
to the Stream frontend.
Affected #:  3 files

diff -r b9ebc0a0e0e8f296e55c423806085f4ea47a97b2 -r 80b8e52b5f1a059033a3cbd231125b979c684207 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -278,9 +278,8 @@
             finfo = self.pf._get_field_info(*field)
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
-            field = item
-            if field in FI:
-                finfo = FI[field]
+            if item in FI:
+                finfo = FI[item]
             else:
                 finfo = None
         if finfo is not None and finfo._function.func_name != 'NullFunc':
@@ -298,23 +297,23 @@
                     if i not in self.requested_parameters:
                         self.requested_parameters.append(i)
             if vv is not None:
-                if not self.flat: self[field] = vv
-                else: self[field] = vv.ravel()
-                return self[field]
+                if not self.flat: self[item] = vv
+                else: self[item] = vv.ravel()
+                return self[item]
         elif finfo is not None and finfo.particle_type:
-            if field == "Coordinates" or field[1] == "Coordinates" or \
-               field == "Velocities" or field[1] == "Velocities":
+            if item == "Coordinates" or item[1] == "Coordinates" or \
+               item == "Velocities" or item[1] == "Velocities":
                 # A vector
-                self[field] = np.ones((self.NumberOfParticles, 3))
+                self[item] = np.ones((self.NumberOfParticles, 3))
             else:
                 # Not a vector
-                self[field] = np.ones(self.NumberOfParticles)
-            self.requested.append(field)
-            return self[field]
-        self.requested.append(field)
-        if field not in self:
-            self[field] = self._read_data(field)
-        return self[field]
+                self[item] = np.ones(self.NumberOfParticles)
+            self.requested.append(item)
+            return self[item]
+        self.requested.append(item)
+        if item not in self:
+            self[item] = self._read_data(item)
+        return self[item]
 
     def deposit(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))

diff -r b9ebc0a0e0e8f296e55c423806085f4ea47a97b2 -r 80b8e52b5f1a059033a3cbd231125b979c684207 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -50,3 +50,7 @@
         a_std = np.sqrt((ad["CellMass"] * (ad["Density"] - a_mean)**2).sum() / 
                         ad["CellMass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
+
+if __name__ == "__main__":
+    for i in test_extrema():
+        i[0](*i[1:])

diff -r b9ebc0a0e0e8f296e55c423806085f4ea47a97b2 -r 80b8e52b5f1a059033a3cbd231125b979c684207 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -42,6 +42,10 @@
 add_field = StreamFieldInfo.add_field
 
 add_stream_field("density", function = NullFunc)
+add_stream_field("x-velocity", function = NullFunc)
+add_stream_field("y-velocity", function = NullFunc)
+add_stream_field("z-velocity", function = NullFunc)
+
 add_field("Density", function = TranslationFunc("density"))
 
 add_stream_field("particle_position_x", function = NullFunc, particle_type=True)


https://bitbucket.org/yt_analysis/yt/commits/92a91071ac6b/
Changeset:   92a91071ac6b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 16:30:59
Summary:     We change the dictionary during iteration so we need to use a copy of the key list.
Affected #:  1 file

diff -r 80b8e52b5f1a059033a3cbd231125b979c684207 -r 92a91071ac6b84e1008646de7f2210e6841db9f0 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -167,7 +167,7 @@
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
-        for field in fi:
+        for field in fi.keys():
             finfo = fi[field]
             # Explicitly defined
             if isinstance(field, tuple):


https://bitbucket.org/yt_analysis/yt/commits/b02f9ede5df5/
Changeset:   b02f9ede5df5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 21:33:22
Summary:     Simplifying particle_mass for Enzo by pushing it all right into the IO routine.

This does change how Enzo's particles are regarded -- but at the benefit of
having much better IO and reduced complexity for calculating particle masses.
I believe this is worth it; we special case for other frontends, and I do not
think it is a problem to do it here.
Affected #:  2 files

diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r b02f9ede5df50cbc312b42e6c5565f523029ddd1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -388,14 +388,16 @@
     """
     particle_field = field.name[4:]
     pos = data[('all', 'Coordinates')]
+    # Get back into density
+    pden = data['all', 'particle_mass'] / data["CellVolume"] 
     top = data.deposit(
         pos,
-        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        [data[('all', particle_field)]*pden],
         method = 'cic'
         )
     bottom = data.deposit(
         pos,
-        [data[('all', 'particle_mass')]],
+        [pden],
         method = 'cic'
         )
     top[bottom == 0] = 0.0
@@ -513,7 +515,22 @@
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
+# We have now multiplied by grid.dds.prod() inside the IO function.
+# So here we multiply just by the conversion to density.
+add_field(('all', "particle_mass"), function=NullFunc, 
+          particle_type=True, convert_function = _convertParticleMass)
+
+add_field("ParticleMass",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMass)
+add_field("ParticleMassMsun",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMassMsun)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -524,32 +541,6 @@
           validators=[ValidateDataField("creation_time")],
           particle_type=True, convert_function=_convertParticleAge)
 
-def _ParticleMass(field, data):
-    particles = data['all', "particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
 
 #
 # Now we do overrides for 2D fields

diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r b02f9ede5df50cbc312b42e6c5565f523029ddd1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,6 +36,8 @@
 import numpy as np
 from yt.funcs import *
 
+_convert_mass = ("particle_mass",)
+
 class IOHandlerPackedHDF5(BaseIOHandler):
 
     _data_style = "enzo_packed_3d"
@@ -81,6 +83,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
                 data.pop(g.id)
@@ -130,6 +134,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
         return rv


https://bitbucket.org/yt_analysis/yt/commits/4452846ad4b6/
Changeset:   4452846ad4b6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 23:11:41
Summary:     Remove usage of alloca in QuadTree in favor of explicit malloc/free.
Affected #:  1 file

diff -r b02f9ede5df50cbc312b42e6c5565f523029ddd1 -r 4452846ad4b6c93c9f304b82afe85b970e593c5e yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -131,7 +131,7 @@
         cdef int i, j
         cdef QuadTreeNode *node
         cdef np.int64_t pos[2]
-        cdef np.float64_t *vals = <np.float64_t *> alloca(
+        cdef np.float64_t *vals = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*nvals)
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
@@ -160,6 +160,7 @@
                 self.root_nodes[i][j] = QTN_initialize(
                     pos, nvals, vals, weight_val)
         self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
+        free(vals)
 
     cdef int count_total_cells(self, QuadTreeNode *root):
         cdef int total = 0
@@ -373,7 +374,7 @@
         cdef np.float64_t *vdata = <np.float64_t *> nvals.data
         cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
         cdef np.float64_t wtoadd
-        cdef np.float64_t *vtoadd = <np.float64_t *> alloca(
+        cdef np.float64_t *vtoadd = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*self.nvals)
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
@@ -381,6 +382,7 @@
                 wtoadd = 0.0
                 curpos += self.fill(self.root_nodes[i][j],
                     curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+        free(vtoadd)
         return opx, opy, opdx, opdy, nvals, nwvals
 
     cdef int count(self, QuadTreeNode *node):
@@ -406,7 +408,7 @@
                         np.int64_t level):
         cdef int i, j, n
         cdef np.float64_t *vorig
-        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
+        vorig = <np.float64_t *> malloc(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -444,6 +446,7 @@
             for i in range(self.nvals):
                 vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
+        free(vorig)
         return added
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/0f91890a2fad/
Changeset:   0f91890a2fad
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 23:15:40
Summary:     Adding cached_property decorator for chunks.

This allows YTDataChunk objects to only optionally track results of operations
like fcoords, fwidth, etc.
Affected #:  1 file

diff -r 4452846ad4b6c93c9f304b82afe85b970e593c5e -r 0f91890a2fad5813b6085198a1316d00459c8be6 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -453,20 +453,30 @@
         else:
             raise NotImplementedError
 
+def cached_property(func):
+    n = '_%s' % func.func_name
+    def cached_func(self):
+        if self._cache and getattr(self, n, None) is not None:
+            return getattr(self, n)
+        if self.data_size is None:
+            tr = self._accumulate_values(n[1:])
+        else:
+            tr = func(self)
+        if self._cache:
+            setattr(self, n, tr)
+        return tr
+    return property(cached_func)
+
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
+    def __init__(self, dobj, chunk_type, objs, data_size = None,
+                 field_type = None, cache = True):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
-        self._data_size = data_size
+        self.data_size = data_size
         self._field_type = field_type
-
-    @property
-    def data_size(self):
-        if callable(self._data_size):
-            self._data_size = self._data_size(self.dobj, self.objs)
-        return self._data_size
+        self._cache = cache
 
     def _accumulate_values(self, method):
         # We call this generically.  It's somewhat slower, since we're doing
@@ -477,35 +487,25 @@
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
         arrs = np.concatenate(arrs)
-        self._data_size = arrs.shape[0]
+        self.data_size = arrs.shape[0]
         return arrs
 
-    _fcoords = None
-    @property
+    @cached_property
     def fcoords(self):
-        if self.data_size is None:
-            self._fcoords = self._accumulate_values("fcoords")
-        if self._fcoords is not None: return self._fcoords
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fcoords = ci
-        if self.data_size == 0: return self._fcoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fcoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
             ind += c.shape[0]
-        return self._fcoords
+        return ci
 
-    _icoords = None
-    @property
+    @cached_property
     def icoords(self):
-        if self.data_size is None:
-            self._icoords = self._accumulate_values("icoords")
-        if self._icoords is not None: return self._icoords
         ci = np.empty((self.data_size, 3), dtype='int64')
-        self._icoords = ci
-        if self.data_size == 0: return self._icoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_icoords(self.dobj)
@@ -514,15 +514,10 @@
             ind += c.shape[0]
         return ci
 
-    _fwidth = None
-    @property
+    @cached_property
     def fwidth(self):
-        if self.data_size is None:
-            self._fwidth = self._accumulate_values("fwidth")
-        if self._fwidth is not None: return self._fwidth
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fwidth = ci
-        if self.data_size == 0: return self._fwidth
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fwidth(self.dobj)
@@ -531,15 +526,10 @@
             ind += c.shape[0]
         return ci
 
-    _ires = None
-    @property
+    @cached_property
     def ires(self):
-        if self.data_size is None:
-            self._ires = self._accumulate_values("ires")
-        if self._ires is not None: return self._ires
         ci = np.empty(self.data_size, dtype='int64')
-        self._ires = ci
-        if self.data_size == 0: return self._ires
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_ires(self.dobj)
@@ -548,22 +538,17 @@
             ind += c.size
         return ci
 
-    _tcoords = None
-    @property
+    @cached_property
     def tcoords(self):
-        if self._tcoords is None:
-            self.dtcoords
+        self.dtcoords
         return self._tcoords
 
-    _dtcoords = None
-    @property
+    @cached_property
     def dtcoords(self):
-        if self._dtcoords is not None: return self._dtcoords
         ct = np.empty(self.data_size, dtype='float64')
         cdt = np.empty(self.data_size, dtype='float64')
-        self._tcoords = ct
-        self._dtcoords = cdt
-        if self.data_size == 0: return self._dtcoords
+        self._tcoords = ct # Se this for tcoords
+        if self.data_size == 0: return cdt
         ind = 0
         for obj in self.objs:
             gdt, gt = obj.tcoords(self.dobj)


https://bitbucket.org/yt_analysis/yt/commits/2b865392b01b/
Changeset:   2b865392b01b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 23:39:10
Summary:     Thread cache arguments through the various frontends.

Note that by default, caching is off, but we turn it on when explicitly asking
for an 'all'.  I think this will replicate the behavior users expect.  However,
one thing to keep a look out for is the relative performance of various
operations, and I think we will also want to consider adding either a heuristic
or a contextmanager that will preserve and traverse simultaneously for the
various coordinates.  This would be helpful, for instance, in simulations like
ARTIO where the traversal could potentially be expensive.

This largely completes the enzodiet, as memory usage is now down considerably
-- although not completely -- in my tests.
Affected #:  9 files

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,8 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
+            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                get_memory_usage()/1024.)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -246,7 +246,7 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
-            for io_chunk in self.chunks([], "io"):
+            for io_chunk in self.chunks([], "io", cache = False):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     ind += self._current_chunk.objs[0].select(
                             self.selector, self[field], rv, ind)
@@ -279,8 +279,8 @@
             size = self._count_particles(ftype)
             rv = np.empty(size, dtype="float64")
             ind = 0
-            for io_chunk in self.chunks([], "io"):
-                for i,chunk in enumerate(self.chunks(field, "spatial")):
+            for io_chunk in self.chunks([], "io", cache = False):
+                for i, chunk in enumerate(self.chunks(field, "spatial")):
                     x, y, z = (self[ftype, 'particle_position_%s' % ax]
                                for ax in 'xyz')
                     if x.size == 0: continue
@@ -301,7 +301,7 @@
             if f1 == ftype:
                 return val.size
         size = 0
-        for io_chunk in self.chunks([], "io"):
+        for io_chunk in self.chunks([], "io", cache = False):
             for i,chunk in enumerate(self.chunks([], "spatial")):
                 x, y, z = (self[ftype, 'particle_position_%s' % ax]
                             for ax in 'xyz')

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -180,7 +180,7 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         """
         Since subsets are calculated per domain,
         i.e. per file, yield each domain at a time to
@@ -189,7 +189,8 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None,
+                              cache = cache)
 
 
 class ARTStaticOutput(StaticOutput):

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -232,11 +232,12 @@
     def _chunk_spatial(self, dobj, ngz):
         raise NotImplementedError
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for chunk in oobjs:
-            yield YTDataChunk(dobj, "io", [chunk], self._data_size)
+            yield YTDataChunk(dobj, "io", [chunk], self._data_size,
+                              cache = cache)
 
     def _read_fluid_fields(self, fields, dobj, chunk=None):
         if len(fields) == 0:

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,12 +212,13 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We'll take the max of 128 and the number of processors
         nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
         for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection)
+            yield YTDataChunk(dobj, "io", gs, self._count_selection,
+                              cache = cache)
 
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -348,10 +348,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -403,8 +403,9 @@
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
-                    self._chunk_io(dobj), selector,
-                    fields_to_read)
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
             finfo = self.pf._get_field_info(*field)
@@ -425,10 +426,11 @@
         fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
-        fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),
-                                                   selector,
-                                                   fields_to_read,
-                                                   chunk_size)
+        fields_to_return = self.io._read_fluid_selection(
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read,
+            chunk_size)
         for field in fields_to_read:
             ftype, fname = field
             conv_factor = self.pf.field_info[fname]._convert_function(self)
@@ -470,7 +472,7 @@
 class YTDataChunk(object):
 
     def __init__(self, dobj, chunk_type, objs, data_size = None,
-                 field_type = None, cache = True):
+                 field_type = None, cache = False):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -245,16 +245,16 @@
             dobj.size = self._count_selection(dobj)
         if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+        dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
         if grids is None: grids = dobj._chunk_info
         count = sum((g.count(dobj.selector) for g in grids))
         return count
 
-    def _chunk_all(self, dobj):
+    def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -271,13 +271,16 @@
                 g = og
             size = self._count_selection(dobj, [og])
             if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            # We don't want to cache any of the masks or icoords or fcoords for
+            # individual grids.
+            yield YTDataChunk(dobj, "spatial", [g], size, cache = False)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gfiles = defaultdict(list)
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for g in gobjs:
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs))
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)

diff -r 0f91890a2fad5813b6085198a1316d00459c8be6 -r 2b865392b01b1f5458947d629dee2707c8140765 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -170,10 +170,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g])
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class ParticleDataChunk(YTDataChunk):
     def __init__(self, oct_handler, regions, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/c7d14681b421/
Changeset:   c7d14681b421
Branch:      yt-3.0
User:        drudd
Date:        2013-04-16 16:30:47
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  3 files

diff -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 -r c7d14681b421ad536caf21b5c7e5201a666f7aaa yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -55,7 +55,7 @@
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
         if len(ptypes) > 1: raise NotImplementedError
-        pfields = [(ptypes[0], "position_%s" % ax) for ax in 'xyz']
+        pfields = [(ptypes[0], "particle_position_%s" % ax) for ax in 'xyz']
         size = 0
         for chunk in chunks:
             data = self._read_chunk_data(chunk, pfields, 'active', 

diff -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 -r c7d14681b421ad536caf21b5c7e5201a666f7aaa yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -116,6 +116,9 @@
             self.particle_field_offsets = {}
             return
         f = open(self.part_fn, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
         hvals = {}
         attrs = ( ('ncpu', 1, 'I'),
                   ('ndim', 1, 'I'),
@@ -143,12 +146,15 @@
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
-        field_offsets = {particle_fields[0][0]: f.tell()}
-        for field, vtype in particle_fields[1:]:
+        field_offsets = {}
+        _pfields = {}
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets[field] = f.tell()
+            _pfields[field] = vtype
             fpu.skip(f, 1)
-            field_offsets[field] = f.tell()
         self.particle_field_offsets = field_offsets
-        self.particle_field_types = dict(particle_fields)
+        self.particle_field_types = _pfields
 
     def _read_amr_header(self):
         hvals = {}

diff -r 3a1a2a95457855b229396b14b67aeba15d2a3f27 -r c7d14681b421ad536caf21b5c7e5201a666f7aaa yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -91,6 +91,8 @@
     "particle_mass",
     "particle_identifier",
     "particle_refinement_level",
+    "particle_age",
+    "particle_metallicity",
 ]
 
 for f in known_ramses_particle_fields:


https://bitbucket.org/yt_analysis/yt/commits/1132bce09de4/
Changeset:   1132bce09de4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-29 18:24:44
Summary:     Adding default storage_filename
Affected #:  1 file

diff -r 0d87f2e70ec6ddb462527ec5eafb25c41a88d5e9 -r 1132bce09de4ea1912b0aadcec4b936b18dbe4a2 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -60,6 +60,7 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    storage_filename = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):


https://bitbucket.org/yt_analysis/yt/commits/24915104ff16/
Changeset:   24915104ff16
Branch:      yt-3.0
User:        samskillman
Date:        2013-06-29 19:08:55
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #49)

Adding a parallel ring iterator
Affected #:  1 file

diff -r 1132bce09de4ea1912b0aadcec4b936b18dbe4a2 -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -60,7 +60,8 @@
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
             int32   = MPI.INT,
-            int64   = MPI.LONG
+            int64   = MPI.LONG,
+            c       = MPI.CHAR,
     )
     op_names = dict(
         sum = MPI.SUM,
@@ -73,7 +74,8 @@
             float32 = "MPI.FLOAT",
             float64 = "MPI.DOUBLE",
             int32   = "MPI.INT",
-            int64   = "MPI.LONG"
+            int64   = "MPI.LONG",
+            c       = "MPI.CHAR",
     )
     op_names = dict(
             sum = "MPI.SUM",
@@ -458,6 +460,116 @@
     if barrier:
         my_communicator.barrier()
 
+def parallel_ring(objects, generator_func, mutable = False):
+    r"""This function loops in a ring around a set of objects, yielding the
+    results of generator_func and passing from one processor to another to
+    avoid IO or expensive computation.
+
+    This function is designed to operate in sequence on a set of objects, where
+    the creation of those objects might be expensive.  For instance, this could
+    be a set of particles that are costly to read from disk.  Processor N will
+    run generator_func on an object, and the results of that will both be
+    yielded and passed to processor N-1.  If the length of the objects is not
+    equal to the number of processors, then the final processor in the top
+    communicator will re-generate the data as needed.
+
+    In all likelihood, this function will only be useful internally to yt.
+
+    Parameters
+    ----------
+    objects : iterable
+        The list of objects to operate on.
+    generator_func : callable
+        This function will be called on each object, and the results yielded.
+        It must return a single NumPy array; for multiple values, it needs to
+        have a custom dtype.
+    mutable : bool
+        Should the arrays be considered mutable?  Currently, this will only
+        work if the number of processors equals the number of objects.
+    dynamic : bool
+        This governs whether or not dynamic load balancing will be enabled.
+        This requires one dedicated processor; if this is enabled with a set of
+        128 processors available, only 127 will be available to iterate over
+        objects as one will be load balancing the rest.
+
+
+    Examples
+    --------
+    Here is a simple example of a ring loop around a set of integers, with a
+    custom dtype.
+
+    >>> dt = numpy.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
+    >>> def gfunc(o):
+    ...     numpy.random.seed(o)
+    ...     rv = np.empty(1000, dtype=dt)
+    ...     rv['x'] = numpy.random.random(1000)
+    ...     rv['y'] = numpy.random.random(1000)
+    ...     rv['z'] = numpy.random.random(1000)
+    ...     return rv
+    ...
+    >>> obj = range(8)
+    >>> for obj, arr in parallel_ring(obj, gfunc):
+    ...     print arr['x'].sum(), arr['y'].sum(), arr['z'].sum()
+    ...
+
+    """
+    if mutable: raise NotImplementedError
+    my_comm = communication_system.communicators[-1]
+    my_size = my_comm.size
+    my_rank = my_comm.rank # This will also be the first object we access
+    if not parallel_capable and not mutable:
+        for obj in objects:
+            yield obj, generator_func(obj)
+        return
+    generate_endpoints = len(objects) != my_size
+    # gback False: send the object backwards
+    # gforw False: receive an object from forwards
+    if len(objects) == my_size:
+        generate_endpoints = False
+        gback = False
+        gforw = False
+    else:
+        # In this case, the first processor (my_rank == 0) will generate.
+        generate_endpoints = True
+        gback = (my_rank == 0)
+        gforw = (my_rank == my_size - 1)
+    if generate_endpoints and mutable:
+        raise NotImplementedError
+    # Now we need to do pairwise sends
+    source = (my_rank + 1) % my_size
+    dest = (my_rank - 1) % my_size
+    oiter = itertools.islice(itertools.cycle(objects),
+                             my_rank, my_rank+len(objects))
+    idata = None
+    isize = np.zeros((1,), dtype="int64")
+    osize = np.zeros((1,), dtype="int64")
+    for obj in oiter:
+        if idata is None or gforw:
+            idata = generator_func(obj)
+            idtype = odtype = idata.dtype
+            if get_mpi_type(idtype) is None:
+                idtype = 'c'
+        yield obj, idata
+        # We first send to the previous processor
+        tags = []
+        if not gforw:
+            tags.append(my_comm.mpi_nonblocking_recv(isize, source))
+        if not gback:
+            osize[0] = idata.size
+            tags.append(my_comm.mpi_nonblocking_send(osize, dest))
+        my_comm.mpi_Request_Waitall(tags)
+        odata = idata
+        tags = []
+        if not gforw:
+            idata = np.empty(isize[0], dtype=odtype)
+            tags.append(my_comm.mpi_nonblocking_recv(
+                          idata.view(idtype), source, dtype=idtype))
+        if not gback:
+            tags.append(my_comm.mpi_nonblocking_send(
+                          odata.view(idtype), dest, dtype=idtype))
+        my_comm.mpi_Request_Waitall(tags)
+        del odata
+
 class CommunicationSystem(object):
     communicators = []
 


https://bitbucket.org/yt_analysis/yt/commits/559bca49e90f/
Changeset:   559bca49e90f
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-01 23:40:29
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #54)

Octree diet
Affected #:  45 files

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2495,7 +2495,7 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
             else:
                 total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -443,7 +443,7 @@
 	    /* Else, this slot was full, go to the next one */
 	    hp++;
 	    if (hp>=smx->hash+smx->nHashLength) hp = smx->hash;
-	    if (++count>1000) {
+	    if (++count>1000000) {
 		fprintf(stderr,"Hash Table is too full.\n");
 		exit(1);
 	    }

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -109,16 +109,19 @@
         """
         # Pick out the stars.
         if self.mode == 'data_source':
-            ct = self._data_source["creation_time"]
+            ct = self._data_source["stars","particle_age"]
+            if ct == None :
+                print 'data source must have particle_age!'
+                sys.exit(1)
             ct_stars = ct[ct > 0]
-            mass_stars = self._data_source["ParticleMassMsun"][ct > 0]
+            mass_stars = self._data_source["stars", "ParticleMassMsun"][ct > 0]
         elif self.mode == 'provided':
             ct_stars = self.star_creation_time
             mass_stars = self.star_mass
         # Find the oldest stars in units of code time.
         tmin= min(ct_stars)
         # Multiply the end to prevent numerical issues.
-        self.time_bins = np.linspace(tmin*0.99, self._pf.current_time,
+        self.time_bins = np.linspace(tmin*1.01, self._pf.current_time,
             num = self.bin_count + 1)
         # Figure out which bins the stars go into.
         inds = np.digitize(ct_stars, self.time_bins) - 1
@@ -131,7 +134,7 @@
         for index in xrange(self.bin_count):
             self.cum_mass_bins[index+1] += self.cum_mass_bins[index]
         # We will want the time taken between bins.
-        self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
+        self.time_bins_dt = self.time_bins[:-1] - self.time_bins[1:]
     
     def attach_arrays(self):
         """
@@ -147,7 +150,7 @@
                 vol = ds.volume('mpc')
         elif self.mode == 'provided':
             vol = self.volume
-        tc = self._pf["Time"]
+        tc = self._pf["Time"] #time to seconds?
         self.time = []
         self.lookback_time = []
         self.redshift = []

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,7 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.size)
+            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -310,7 +310,6 @@
         np.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
         np.add(py, oy, py)
         np.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
-
         if self.weight_field is not None:
             np.divide(nvals, nwvals[:,None], nvals)
         if self.weight_field is None:
@@ -345,7 +344,7 @@
             dl = 1.0
         else:
             dl = chunk.fwidth[:, self.axis]
-        v = np.empty((chunk.size, len(fields)), dtype="float64")
+        v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i in range(len(fields)):
             v[:,i] = chunk[fields[i]] * dl
         if self.weight_field is not None:
@@ -353,7 +352,7 @@
             np.multiply(v, w[:,None], v)
             np.multiply(w, dl, w)
         else:
-            w = np.ones(chunk.size, dtype="float64")
+            w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords
         i1 = icoords[:,x_dict[self.axis]]
         i2 = icoords[:,y_dict[self.axis]]
@@ -420,6 +419,38 @@
         self._setup_data_source()
 
     @property
+    def icoords(self):
+        ic = np.indices(self.ActiveDimensions).astype("int64")
+        return np.column_stack([i.ravel() + gi for i, gi in
+            zip(ic, self.get_global_startindex())])
+
+    @property
+    def fwidth(self):
+        fw = np.ones((self.ActiveDimensions.prod(), 3), dtype="float64")
+        fw *= self.dds
+        return fw
+
+    @property
+    def fcoords(self):
+        LE = self.LeftEdge + self.dds/2.0
+        RE = self.RightEdge - self.dds/2.0
+        N = self.ActiveDimensions
+        fc = np.mgrid[LE[0]:RE[0]:N[0]*1j,
+                      LE[1]:RE[1]:N[1]*1j,
+                      LE[2]:RE[2]:N[2]*1j]
+        return np.column_stack([f.ravel() for f in fc])
+
+    @property
+    def ires(self):
+        tr = np.ones(self.ActiveDimensions.prod(), dtype="int64")
+        tr *= self.level
+        return tr
+
+    def _reshape_vals(self, arr):
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
+    @property
     def shape(self):
         return tuple(self.ActiveDimensions.tolist())
 
@@ -510,7 +541,7 @@
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="F")
+        return vals.reshape(self.ActiveDimensions, order="C")
 
 class YTArbitraryGridBase(YTCoveringGridBase):
     """A 3D region with arbitrary bounds and dimensions.

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -243,33 +243,23 @@
         return rv
 
     def _generate_spatial_fluid(self, field, ngz):
-        rv = np.empty(self.size, dtype="float64")
+        rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
             for io_chunk in self.chunks([], "io"):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
-                    mask = self._current_chunk.objs[0].select(self.selector)
-                    if mask is None: continue
-                    data = self[field]
-                    if len(data.shape) == 4:
-                        # This is how we keep it consistent between oct ordering
-                        # and grid ordering.
-                        data = data.T[mask.T]
-                    else:
-                        data = data[mask]
-                    rv[ind:ind+data.size] = data
-                    ind += data.size
+                    ind += self._current_chunk.objs[0].select(
+                            self.selector, self[field], rv, ind)
         else:
             chunks = self.hierarchy._chunk(self, "spatial", ngz = ngz)
             for i, chunk in enumerate(chunks):
                 with self._chunked_read(chunk):
                     gz = self._current_chunk.objs[0]
                     wogz = gz._base_grid
-                    mask = wogz.select(self.selector)
-                    if mask is None: continue
-                    data = gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz][mask]
-                    rv[ind:ind+data.size] = data
-                    ind += data.size
+                    ind += wogz.select(
+                        self.selector,
+                        gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],
+                        rv, ind)
         return rv
 
     def _generate_particle_field(self, field):
@@ -418,9 +408,10 @@
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
             for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
-                mask = self._current_chunk.objs[0].select(self.selector)
+                g = self._current_chunk.objs[0]
+                mask = g._get_selector_mask(self.selector)
                 if mask is None: continue
-                yield self._current_chunk.objs[0], mask
+                yield g, mask
 
 class GenerationInProgress(Exception):
     def __init__(self, fields):
@@ -432,8 +423,6 @@
     _sort_by = None
     _selector = None
     _current_chunk = None
-    size = None
-    shape = None
 
     def __init__(self, *args, **kwargs):
         super(YTSelectionContainer, self).__init__(*args, **kwargs)
@@ -551,16 +540,10 @@
         # There are several items that need to be swapped out
         # field_data, size, shape
         old_field_data, self.field_data = self.field_data, YTFieldData()
-        old_size, self.size = self.size, chunk.data_size
         old_chunk, self._current_chunk = self._current_chunk, chunk
         old_locked, self._locked = self._locked, False
-        if not self._spatial:
-            self.shape = (self.size,)
         yield
         self.field_data = old_field_data
-        self.size = old_size
-        if not self._spatial:
-            self.shape = (old_size,)
         self._current_chunk = old_chunk
         self._locked = old_locked
 

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -264,9 +264,18 @@
                 lambda: np.ones((nd * nd * nd), dtype='float64')
                 + 1e-4*np.random.random((nd * nd * nd)))
 
+    def _reshape_vals(self, arr):
+        if not self._spatial: return arr
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
     def __missing__(self, item):
-        if hasattr(self.pf, "field_info") and isinstance(item, tuple):
-            finfo = self.pf._get_field_info(*item)
+        if hasattr(self.pf, "field_info"):
+            if not isinstance(item, tuple):
+                field = ("unknown", item)
+            else:
+                field = item
+            finfo = self.pf._get_field_info(*field)
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
             if item in FI:
@@ -278,7 +287,7 @@
                 vv = finfo(self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd + ngz * 2)
+                nfd = FieldDetector(self.nd + ngz * 2, pf = self.pf)
                 nfd._num_ghost_zones = ngz
                 vv = finfo(nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -113,6 +113,10 @@
     def shape(self):
         return self.ActiveDimensions
 
+    def _reshape_vals(self, arr):
+        if len(arr.shape) == 3: return arr
+        return arr.reshape(self.ActiveDimensions, order="C")
+
     def _generate_container_field(self, field):
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)
@@ -441,14 +445,14 @@
         return new_field
 
     def select_icoords(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='int64')
         coords = convert_mask_to_indices(mask, mask.sum())
         coords += self.get_global_startindex()[None, :]
         return coords
 
     def select_fcoords(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
         coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
         coords += 0.5
@@ -457,15 +461,15 @@
         return coords
 
     def select_fwidth(self, dobj):
-        mask = self.select(dobj.selector)
-        if mask is None: return np.empty((0,3), dtype='float64')
-        coords = np.empty((mask.sum(), 3), dtype='float64')
+        count = self.count(dobj.selector)
+        if count == 0: return np.empty((0,3), dtype='float64')
+        coords = np.empty((count, 3), dtype='float64')
         for axis in range(3):
             coords[:,axis] = self.dds[axis]
         return coords
 
     def select_ires(self, dobj):
-        mask = self.select(dobj.selector)
+        mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty(0, dtype='int64')
         coords = np.empty(mask.sum(), dtype='int64')
         coords[:] = self.Level
@@ -484,21 +488,27 @@
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="F")
+        return vals.reshape(self.ActiveDimensions, order="C")
 
-    def select(self, selector):
+    def _get_selector_mask(self, selector):
         if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
+            mask = self._last_mask
+        else:
+            self._last_mask = mask = selector.fill_mask(self)
+            self._last_selector_id = id(selector)
+        return mask
+
+    def select(self, selector, source, dest, offset):
+        mask = self._get_selector_mask(selector)
+        count = self.count(selector)
+        if count == 0: return 0
+        dest[offset:offset+count] = source[mask]
+        return count
 
     def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+        mask = self._get_selector_mask(selector)
+        if mask is None: return 0
+        return mask.sum()
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,6 +36,7 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
@@ -43,28 +44,25 @@
     _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
-    _con_args = ('domain', 'mask', 'cell_count')
+    _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
+    _domain_offset = 0
+    _num_octs = -1
 
-    def __init__(self, domain, mask, cell_count):
+    def __init__(self, base_region, domain, pf):
         self.field_data = YTFieldData()
         self.field_parameters = {}
-        self.mask = mask
         self.domain = domain
+        self.domain_id = domain.domain_id
         self.pf = domain.pf
         self.hierarchy = self.pf.hierarchy
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
+        self.oct_handler = domain.oct_handler
         self._last_mask = None
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
 
     def _generate_container_field(self, field):
         if self._current_chunk is None:
@@ -75,31 +73,8 @@
             return self._current_chunk.fwidth[:,1]
         elif field == "dz":
             return self._current_chunk.fwidth[:,2]
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
+        else:
+            raise RuntimeError
 
     def __getitem__(self, key):
         tr = super(OctreeSubset, self).__getitem__(key)
@@ -117,9 +92,11 @@
         return tr
 
     def _reshape_vals(self, arr):
+        if len(arr.shape) == 4: return arr
         nz = self._num_zones + 2*self._num_ghost_zones
         n_oct = arr.shape[0] / (nz**3.0)
         arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        arr = np.asfortranarray(arr)
         return arr
 
     _domain_ind = None
@@ -127,7 +104,7 @@
     @property
     def domain_ind(self):
         if self._domain_ind is None:
-            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            di = self.oct_handler.domain_ind(self.selector)
             self._domain_ind = di
         return self._domain_ind
 
@@ -136,22 +113,52 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (self.domain_ind >= 0).sum() * 8
+        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
+        mylog.debug("Depositing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
-                          self.domain.domain_id)
+            self.domain_id, self._domain_offset)
         vals = op.finalize()
-        return self._reshape_vals(vals)
+        return np.asfortranarray(vals)
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = self.oct_handler.domain_mask(
-                self.mask, self.domain.domain_id)
-        if self._last_mask.sum() == 0: return None
-        self._last_selector_id = id(selector)
-        return self._last_mask
+    def select_icoords(self, dobj):
+        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
+                                     num_octs = self._num_octs)
+        self._num_octs = d.shape[0] / 8
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_fcoords(self, dobj):
+        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
+                                     num_octs = self._num_octs)
+        self._num_octs = d.shape[0] / 8
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_fwidth(self, dobj):
+        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
+                                  num_octs = self._num_octs)
+        self._num_octs = d.shape[0] / 8
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select_ires(self, dobj):
+        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
+                                  num_octs = self._num_octs)
+        self._num_octs = d.shape[0] / 8
+        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
+                                            domain_id = self.domain_id)
+        return tr
+
+    def select(self, selector, source, dest, offset):
+        n = self.oct_handler.selector_fill(selector, source, dest, offset,
+                                           domain_id = self.domain_id)
+        return n
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
@@ -168,3 +175,30 @@
     def select_particles(self, selector, x, y, z):
         mask = selector.select_points(x,y,z)
         return mask
+
+class ParticleOctreeSubset(OctreeSubset):
+    # Subclassing OctreeSubset is somewhat dubious.
+    # This is some subset of an octree.  Note that the sum of subsets of an
+    # octree may multiply include data files.  While we can attempt to mitigate
+    # this, it's unavoidable for many types of data storage on disk.
+    _type_name = 'particle_octree_subset'
+    _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
+    domain_id = -1
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+        # The first attempt at this will not work in parallel.
+        self.data_files = data_files
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = pf.h.oct_handler
+        self.min_ind = min_ind
+        if max_ind == 0: max_ind = (1 << 63)
+        self.max_ind = max_ind
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -69,7 +69,7 @@
     def particle_density(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-        d /= data["CellVolume"]
+        d /= data["gas","CellVolume"]
         return d
 
     registry.add_field(("deposit", "%s_density" % ptype),
@@ -83,7 +83,7 @@
     def particle_cic(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
-        d /= data["CellVolume"]
+        d /= data["gas","CellVolume"]
         return d
 
     registry.add_field(("deposit", "%s_cic" % ptype),
@@ -146,3 +146,4 @@
     registry.add_field((ptype, "Velocities"),
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
+

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -265,6 +265,8 @@
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
         if fname == self._last_freq[1]:
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        self._last_freq[0], self._last_freq[1])
             return self._last_finfo
         if fname in self.field_info:
             self._last_freq = field
@@ -275,6 +277,8 @@
         if guessing_type and ("all", fname) in self.field_info:
             self._last_freq = ("all", fname)
             self._last_finfo = self.field_info["all", fname]
+            mylog.debug("Guessing field %s is (%s, %s)", fname,
+                        "all", fname)
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -50,3 +50,7 @@
         a_std = np.sqrt((ad["CellMass"] * (ad["Density"] - a_mean)**2).sum() / 
                         ad["CellMass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
+
+if __name__ == "__main__":
+    for i in test_extrema():
+        i[0](*i[1:])

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -85,13 +85,17 @@
 
 def test_all_fields():
     for field in FieldInfo:
-        if field.startswith("CuttingPlane"): continue
-        if field.startswith("particle"): continue
-        if field.startswith("CIC"): continue
-        if field.startswith("WeakLensingConvergence"): continue
-        if field.startswith("DensityPerturbation"): continue
-        if field.startswith("Matter_Density"): continue
-        if field.startswith("Overdensity"): continue
+        if isinstance(field, types.TupleType):
+            fname = field[0]
+        else:
+            fname = field
+        if fname.startswith("CuttingPlane"): continue
+        if fname.startswith("particle"): continue
+        if fname.startswith("CIC"): continue
+        if fname.startswith("WeakLensingConvergence"): continue
+        if fname.startswith("DensityPerturbation"): continue
+        if fname.startswith("Matter_Density"): continue
+        if fname.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -5,6 +5,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Chris Moody <chrisemoody at gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -90,13 +92,16 @@
           display_field=False)
 
 def _Zeros(field, data):
-    return np.zeros(data.shape, dtype='float64')
+    return np.zeros(data["Ones"].shape, dtype='float64')
 add_field("Zeros", function=_Zeros,
           projection_conversion="unitary",
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    tr = np.ones(data.ires.shape, dtype="float64")
+    if data._spatial:
+        return data._reshape_vals(tr)
+    return tr
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)
@@ -871,6 +876,66 @@
 add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
           convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
 
+def _ParticleRadialVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphr = get_sph_r_component(vel, theta, phi, normal)
+    return sphr
+
+add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticleThetaVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    spht = get_sph_theta_component(vel, theta, phi, normal)
+    return sphrt
+
+add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
+def _ParticlePhiVelocity(field, data):
+    normal = data.get_field_parameter('normal')
+    center = data.get_field_parameter('center')
+    bv = data.get_field_parameter("bulk_velocity")
+    pos = "particle_position_%s"
+    pos = np.array([data[pos % ax] for ax in "xyz"])
+    vel = "particle_velocity_%s"
+    vel = np.array([data[vel % ax] for ax in "xyz"])
+    theta = get_sph_theta(pos.copy(), center)
+    phi = get_sph_phi(pos.copy(), center)
+    pos = pos - np.reshape(center, (3, 1))
+    vel = vel - np.reshape(bv, (3, 1))
+    sphp = get_sph_phi_component(vel, theta, phi, normal)
+    return sphrp
+
+add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
+          particle_type=True, units=r"\rm{cm}/\rm{s}",
+          validators=[ValidateParameter("normal"), 
+                      ValidateParameter("center")])
+
 def _TangentialVelocity(field, data):
     return np.sqrt(data["VelocityMagnitude"]**2.0
                  - data["RadialVelocity"]**2.0)
@@ -940,13 +1005,6 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-# We add these fields so that the field detector can use them
-for field in ["particle_position_%s" % ax for ax in "xyz"]:
-    # This marker should let everyone know not to use the fields, but NullFunc
-    # should do that, too.
-    add_field(field, function=NullFunc, particle_type = True,
-        units=r"UNDEFINED")
-
 def _pdensity(field, data):
     pmass = data[('deposit','all_mass')]
     np.divide(pmass, data["CellVolume"], pmass)

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -20,7 +20,7 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-.
+
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
@@ -43,7 +43,7 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 from yt.geometry.oct_container import \
-    ARTOctreeContainer
+    OctreeContainer
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 from .fields import \
@@ -106,21 +106,22 @@
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l)
-                        for l in range(self.pf.max_level)]
+        self.oct_handler = OctreeContainer(
+            self.parameter_file.domain_dimensions/2,  # dd is # of root cells
+            self.parameter_file.domain_left_edge,
+            self.parameter_file.domain_right_edge,
+            1)
+        # The 1 here refers to domain_id == 1 always for ARTIO.
+        self.domains = [ARTDomainFile(self.parameter_file, nv, 
+                                      self.oct_handler, 1)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
-        self.oct_handler = ARTOctreeContainer(
-            self.parameter_file.domain_dimensions/2,  # dd is # of root cells
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
-        for domain in self.domains:
-            if domain.domain_level == 0:
-                domain._read_amr_root(self.oct_handler)
-            else:
-                domain._read_amr_level(self.oct_handler)
+        domain = self.domains[0]
+        domain._read_amr_root(self.oct_handler)
+        domain._read_amr_level(self.oct_handler)
+        self.oct_handler.finalize()
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
@@ -154,28 +155,21 @@
         """
         if getattr(dobj, "_chunk_info", None) is None:
             # Get all octs within this oct handler
-            mask = dobj.selector.select_octs(self.oct_handler)
-            if mask.sum() == 0:
-                mylog.debug("Warning: selected zero octs")
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            # For all domains, figure out how many counts we have
-            # and build a subset=mask of domains
-            subsets = []
-            for d, c in zip(self.domains, counts):
-                if c < 1:
-                    continue
-                subset = ARTDomainSubset(d, mask, c, d.domain_level)
-                subsets.append(subset)
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
+            subsets = [ARTDomainSubset(base_region, domain, self.parameter_file)
+                       for domain in domains]
             dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We pass the chunk both the current chunk and list of chunks,
         # as well as the referring data source
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -184,9 +178,7 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            yield YTDataChunk(dobj, "spatial", [g], None)
 
     def _chunk_io(self, dobj):
         """
@@ -197,7 +189,7 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+            yield YTDataChunk(dobj, "io", [subset], None)
 
 
 class ARTStaticOutput(StaticOutput):
@@ -449,11 +441,8 @@
         return False
 
 class ARTDomainSubset(OctreeSubset):
-    def __init__(self, domain, mask, cell_count, domain_level):
-        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
-        self.domain_level = domain_level
 
-    def fill_root(self, content, ftfields):
+    def fill(self, content, ftfields, selector):
         """
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
@@ -464,49 +453,42 @@
         oct_handler = self.oct_handler
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in ftfields]
-        level_offset = 0
         field_idxs = [all_fields.index(f) for f in fields]
-        dest = {}
+        source, tr = {}, {}
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
         for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')-1.
-        level = self.domain_level
-        source = {}
+            tr[field] = np.zeros(cell_count, 'float64')
         data = _read_root_level(content, self.domain.level_child_offsets,
                                 self.domain.level_count)
-        for field, i in zip(fields, field_idxs):
-            temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
-                              order='F').astype('float64').T
-            source[field] = temp
-        level_offset += oct_handler.fill_level_from_grid(
-            self.domain.domain_id,
-            level, dest, source, self.mask, level_offset)
-        return dest
-
-    def fill_level(self, content, ftfields):
-        oct_handler = self.oct_handler
-        fields = [f for ft, f in ftfields]
-        level_offset = 0
-        dest = {}
-        for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')-1.
-        level = self.domain_level
-        no = self.domain.level_count[level]
-        noct_range = [0, no]
-        source = _read_child_level(
-            content, self.domain.level_child_offsets,
-            self.domain.level_offsets,
-            self.domain.level_count, level, fields,
-            self.domain.pf.domain_dimensions,
-            self.domain.pf.parameters['ncell0'],
-            noct_range=noct_range)
-        nocts_filling = noct_range[1]-noct_range[0]
-        level_offset += oct_handler.fill_level(self.domain.domain_id,
-                                               level, dest, source,
-                                               self.mask, level_offset,
-                                               noct_range[0],
-                                               nocts_filling)
-        return dest
-
+        ns = (self.domain.pf.domain_dimensions.prod() / 8, 8)
+        for field, fi in zip(fields, field_idxs):
+            source[field] = np.empty(ns, dtype="float64", order="C")
+            dt = data[fi,:].reshape(self.domain.pf.domain_dimensions,
+                                    order="F")
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        source[field][:,ii] = \
+                            dt[i::2,j::2,k::2].ravel(order="F")
+        oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
+        del source
+        # Now we continue with the additional levels.
+        for level in range(1, self.pf.max_level + 1):
+            no = self.domain.level_count[level]
+            noct_range = [0, no]
+            source = _read_child_level(
+                content, self.domain.level_child_offsets,
+                self.domain.level_offsets,
+                self.domain.level_count, level, fields,
+                self.domain.pf.domain_dimensions,
+                self.domain.pf.parameters['ncell0'],
+                noct_range=noct_range)
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr,
+                source)
+        return tr
 
 class ARTDomainFile(object):
     """
@@ -518,14 +500,14 @@
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self, pf, domain_id, nvar, level):
+    def __init__(self, pf, nvar, oct_handler, domain_id):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
-        self.domain_level = level
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
+        self.oct_handler = oct_handler
 
     @property
     def level_count(self):
@@ -533,7 +515,7 @@
         if self._level_count is not None:
             return self._level_count
         self.level_offsets
-        return self._level_count[self.domain_level]
+        return self._level_count
 
     @property
     def level_child_offsets(self):
@@ -573,26 +555,25 @@
         """
         self.level_offsets
         f = open(self.pf._file_amr, "rb")
-        level = self.domain_level
-        unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
-            f,
-            self._level_oct_offsets, level,
-            coarse_grid=self.pf.domain_dimensions[0],
-            root_level=self.pf.root_level)
-        nocts_check = oct_handler.add(self.domain_id, level, nocts,
-                                      unitary_center, self.domain_id)
-        assert(nocts_check == nocts)
-        mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                    nocts, level, oct_handler.nocts)
+        for level in range(1, self.pf.max_level + 1):
+            unitary_center, fl, iocts, nocts, root_level = \
+                _read_art_level_info( f,
+                    self._level_oct_offsets, level,
+                    coarse_grid=self.pf.domain_dimensions[0],
+                    root_level=self.pf.root_level)
+            nocts_check = oct_handler.add(self.domain_id, level,
+                                          unitary_center)
+            assert(nocts_check == nocts)
+            mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
+                        nocts, level, oct_handler.nocts)
 
     def _read_amr_root(self, oct_handler):
         self.level_offsets
         f = open(self.pf._file_amr, "rb")
         # add the root *cell* not *oct* mesh
-        level = self.domain_level
         root_octs_side = self.pf.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
-        octs_side = NX*2**level
+        octs_side = NX*2 # Level == 0
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
@@ -603,24 +584,14 @@
                            LL[1]:RL[1]:NX[1]*1j,
                            LL[2]:RL[2]:NX[2]*1j]
         root_fc = np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(self.domain_id, level,
-                                      root_octs_side**3,
-                                      root_fc, self.domain_id)
+        nocts_check = oct_handler.add(self.domain_id, 0, root_fc)
         assert(oct_handler.nocts == root_fc.shape[0])
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     root_octs_side**3, 0, oct_handler.nocts)
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
-
-    def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None:
-                return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+    def included(self, selector):
+        return True
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
+        domain_ids = self.pf.h.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -30,6 +30,7 @@
 import os
 import os.path
 
+from yt.funcs import *
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.lib as au
@@ -55,7 +56,7 @@
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
-        tr = dict((f, np.empty(size, dtype='float64')) for f in fields)
+        tr = defaultdict(list)
         cp = 0
         for chunk in chunks:
             for subset in chunk.objs:
@@ -63,18 +64,18 @@
                 f = open(subset.domain.pf._file_amr, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                if subset.domain_level == 0:
-                    rv = subset.fill_root(f, fields)
-                else:
-                    rv = subset.fill_level(f, fields)
+                rv = subset.fill(f, fields, selector)
                 for ft, f in fields:
-                    mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
-                                subset.domain_level,
-                                f, subset.cell_count, rv[f].min(), rv[f].max(),
-                                cp, cp+subset.cell_count)
-                    tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
-                cp += subset.cell_count
-        return tr
+                    d = rv.pop(f)
+                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
+                                f, d.size, d.min(), d.max(),
+                                cp, cp+d.size)
+                    tr[(ft, f)].append(d)
+                cp += d.size
+        d = {}
+        for field in fields:
+            d[field] = np.concatenate(tr.pop(field))
+        return d
 
     def _get_mask(self, selector, ftype):
         key = (selector, ftype)

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -341,7 +341,7 @@
                             if selected_mass[ispec] :
                                 count = len(data[selected_mass[ispec]])
                                 data[selected_mass[ispec]].resize(count+1)
-                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"]
+                                data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"][0]
                         
                     status = artio_particle_read_species_end( self.handle )
                     check_artio_status(status)

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -32,7 +32,7 @@
     artio_is_valid, artio_fileset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
-from .fields import ARTIOFieldInfo, KnownARTIOFields
+from .fields import ARTIOFieldInfo, KnownARTIOFields, b2t
 
 from yt.funcs import *
 from yt.geometry.geometry_handler import \
@@ -145,7 +145,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return (self.parameter_file.domain_width/(2**self.max_level)).min()
+        return  1.0/(2**self.max_level)
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]
@@ -391,7 +391,7 @@
             list(set(art_to_yt[s] for s in
                      self.artio_parameters["particle_species_labels"])))
 
-        self.current_time = self.artio_parameters["tl"][0]
+        self.current_time = b2t(self.artio_parameters["tl"][0])
 
         # detect cosmology
         if "abox" in self.artio_parameters:

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -295,12 +295,12 @@
 
 #add_artio_field("creation_time", function=NullFunc, particle_type=True)
 def _particle_age(field, data):
-    pa = b2t(data['creation_time'])
+    pa = b2t(data['stars','creation_time'])
 #    tr = np.zeros(pa.shape,dtype='float')-1.0
 #    tr[pa>0] = pa[pa>0]
     tr = pa
     return tr
-add_field("particle_age", function=_particle_age, units=r"\rm{s}",
+add_field(("stars","particle_age"), function=_particle_age, units=r"\rm{s}",
           particle_type=True)
 
 
@@ -416,10 +416,10 @@
 
 def b2t(tb, n=1e2, logger=None, **kwargs):
     tb = np.array(tb)
-    if isinstance(tb, 1.1):
+    if len(np.atleast_1d(tb)) == 1: 
         return a2t(b2a(tb))
     if tb.shape == ():
-        return a2t(b2a(tb))
+        return None 
     if len(tb) < n:
         n = len(tb)
     age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
@@ -434,7 +434,7 @@
     ages = np.array(ages)
     fb2t = np.interp(tb, tbs, ages)
     #fb2t = interp1d(tbs,ages)
-    return fb2t
+    return fb2t*1e9*31556926
 
 
 def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/artio/setup.py
--- a/yt/frontends/artio/setup.py
+++ b/yt/frontends/artio/setup.py
@@ -16,7 +16,10 @@
                          include_dirs=["yt/frontends/artio/artio_headers/",
                                        "yt/geometry/",
                                        "yt/utilities/lib/"],
-                         depends=artio_sources)
+                         depends=artio_sources + 
+                                 ["yt/utilities/lib/fp_utils.pxd",
+                                  "yt/geometry/oct_container.pxd",
+                                  "yt/geometry/selection_routines.pxd"])
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -507,7 +507,9 @@
     add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"] \
+        + ["particle_position_%s" % ax for ax in 'xyz'] \
+        + ["particle_velocity_%s" % ax for ax in 'xyz']:
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -153,13 +153,10 @@
         for chunk in chunks:
             data = self._read_chunk_data(chunk, fields)
             for g in chunk.objs:
-                mask = g.select(selector)
-                if mask is None: continue
-                nd = mask.sum()
                 for field in fields:
                     ftype, fname = field
-                    gdata = data[g.id].pop(fname).swapaxes(0,2)
-                    nd = mask_fill(rv[field], ind, mask, gdata)
+                    ds = data[g.id].pop(fname).swapaxes(0,2)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
                 ind += nd
                 data.pop(g.id)
         return rv

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,9 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    mask = g.select(selector) # caches
-                    if mask is None: continue
                     data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
-                    rv[field][ind:ind+data.size] = data
-                    ind += data.size
+                    ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -84,13 +84,8 @@
             ind = 0
             for chunk in chunks:
                 for grid in chunk.objs:
-                    mask = grid.select(selector)  # caches
-                    if mask is None:
-                        continue
+                    data = fhandle[field_dname(grid.id, fname)][:]
                     if self.pf.field_ordering == 1:
-                        data = fhandle[field_dname(grid.id, fname)][:].swapaxes(0, 2)[mask]
-                    else:
-                        data = fhandle[field_dname(grid.id, fname)][mask]
-                    rv[field][ind:ind + data.size] = data
-                    ind += data.size
+                        data = data.swapaxes(0, 2)
+                    ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -69,10 +69,14 @@
             setattr(self, "%s_fn" % t, basename % t)
         self._read_amr_header()
         self._read_particle_header()
+        self._read_amr()
 
     _hydro_offset = None
     _level_count = None
 
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
     @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
@@ -183,21 +187,26 @@
         self.amr_header = hvals
         self.amr_offset = f.tell()
         self.local_oct_count = hvals['numbl'][self.pf.min_level:, self.domain_id - 1].sum()
+        self.total_oct_count = hvals['numbl'][self.pf.min_level:,:].sum(axis=0)
 
-    def _read_amr(self, oct_handler):
+    def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
            For each oct, only the position, index, level and domain 
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
         """
+        self.oct_handler = RAMSESOctreeContainer(self.pf.domain_dimensions/2,
+                self.pf.domain_left_edge, self.pf.domain_right_edge)
+        root_nodes = self.amr_header['numbl'][self.pf.min_level,:].sum()
+        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
         fb = open(self.amr_fn, "rb")
         fb.seek(self.amr_offset)
         f = cStringIO.StringIO()
         f.write(fb.read())
         f.seek(0)
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
-            self.domain_id, self.local_oct_count, self.ngridbound.sum())
+            self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
         def _ng(c, l):
             if c < self.amr_header['ncpu']:
                 ng = self.amr_header['numbl'][l, c]
@@ -206,7 +215,6 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
-        total = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -236,40 +244,34 @@
                 #    rmap[:,i] = fpu.read_vector(f, "I")
                 # We don't want duplicate grids.
                 # Note that we're adding *grids*, not individual cells.
-                if level >= min_level and cpu + 1 >= self.domain_id: 
+                if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    if cpu + 1 == self.domain_id:
-                        total += ng
-                    oct_handler.add(cpu + 1, level - min_level, ng, pos, 
-                                    self.domain_id)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos)
+                    assert(n == ng)
+        self.oct_handler.finalize()
 
-    def select(self, selector):
-        if id(selector) == self._last_selector_id:
-            return self._last_mask
-        self._last_mask = selector.fill_mask(self)
-        self._last_selector_id = id(selector)
-        return self._last_mask
-
-    def count(self, selector):
-        if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+    def included(self, selector):
+        if getattr(selector, "domain_id", None) is not None:
+            return selector.domain_id == self.domain_id
+        domain_ids = self.oct_handler.domain_identify(selector)
+        return self.domain_id in domain_ids
 
 class RAMSESDomainSubset(OctreeSubset):
 
-    def fill(self, content, fields):
+    _domain_offset = 1
+
+    def fill(self, content, fields, selector):
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.
         oct_handler = self.oct_handler
         all_fields = self.domain.pf.h.fluid_field_list
         fields = [f for ft, f in fields]
         tr = {}
-        filled = pos = level_offset = 0
-        min_level = self.domain.pf.min_level
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
         for field in fields:
-            tr[field] = np.zeros(self.cell_count, 'float64')
+            tr[field] = np.zeros(cell_count, 'float64')
         for level, offset in enumerate(self.domain.hydro_offset):
             if offset == -1: continue
             content.seek(offset)
@@ -280,17 +282,10 @@
             for i in range(8):
                 for field in all_fields:
                     if field not in fields:
-                        #print "Skipping %s in %s : %s" % (field, level,
-                        #        self.domain.domain_id)
                         fpu.skip(content)
                     else:
-                        #print "Reading %s in %s : %s" % (field, level,
-                        #        self.domain.domain_id)
                         temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            level_offset += oct_handler.fill_level(self.domain.domain_id, level,
-                                   tr, temp, self.mask, level_offset)
-            #print "FILL (%s : %s) %s" % (self.domain.domain_id, level, level_offset)
-        #print "DONE (%s) %s of %s" % (self.domain.domain_id, level_offset, self.cell_count)
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
         return tr
 
 class RAMSESGeometryHandler(OctreeGeometryHandler):
@@ -314,21 +309,6 @@
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
         self.num_grids = total_octs
-        #this merely allocates space for the oct tree
-        #and nothing else
-        self.oct_handler = RAMSESOctreeContainer(
-            self.parameter_file.domain_dimensions/2,
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
-        mylog.debug("Allocating %s octs", total_octs)
-        self.oct_handler.allocate_domains(
-            [dom.local_oct_count #+ dom.ngridbound.sum()
-             for dom in self.domains])
-        #this actually reads every oct and loads it into the octree
-        for dom in self.domains:
-            dom._read_amr(self.oct_handler)
-        #for dom in self.domains:
-        #    self.oct_handler.check(dom.domain_id)
 
     def _detect_fields(self):
         # TODO: Add additional fields
@@ -345,18 +325,19 @@
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            mask = dobj.selector.select_octs(self.oct_handler)
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            subsets = [RAMSESDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
+            domains = [dom for dom in self.domains if
+                       dom.included(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            if len(domains) > 1:
+                mylog.debug("Identified %s intersecting domains", len(domains))
+            subsets = [RAMSESDomainSubset(base_region, domain, self.parameter_file)
+                       for domain in domains]
             dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", oobjs, None)
 
     def _chunk_spatial(self, dobj, ngz, sort = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -365,14 +346,12 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            yield YTDataChunk(dobj, "spatial", [g], None)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
+            yield YTDataChunk(dobj, "io", [subset], None)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler
@@ -422,6 +401,8 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
+            self.units['%scm' % unit] = (self.units[unit] /
+                                          (1 + self.current_redshift))
             self.units['%shcm' % unit] = (self.units['%sh' % unit] /
                                           (1 + self.current_redshift))
         for unit in sec_conversion.keys():
@@ -479,7 +460,7 @@
         self.omega_lambda = rheader["omega_l"]
         self.omega_matter = rheader["omega_m"]
         self.hubble_constant = rheader["H0"] / 100.0 # This is H100
-        self.max_level = rheader['levelmax'] - rheader['levelmin']
+        self.max_level = rheader['levelmax'] - self.min_level
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -39,7 +39,7 @@
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
-        tr = dict((f, np.empty(size, dtype='float64')) for f in fields)
+        tr = defaultdict(list)
         cp = 0
         for chunk in chunks:
             for subset in chunk.objs:
@@ -48,14 +48,16 @@
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
                 content = cStringIO.StringIO(f.read())
-                rv = subset.fill(content, fields)
+                rv = subset.fill(content, fields, selector)
                 for ft, f in fields:
-                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
-                        f, subset.cell_count, rv[f].min(), rv[f].max(),
-                        cp, cp+subset.cell_count)
-                    tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
-                cp += subset.cell_count
-        return tr
+                    d = rv.pop(f)
+                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
+                        f, d.size, d.min(), d.max(), d.size)
+                    tr[(ft, f)].append(d)
+        d = {}
+        for field in fields:
+            d[field] = np.concatenate(tr.pop(field))
+        return d
 
     def _read_particle_selection(self, chunks, selector, fields):
         size = 0

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -32,10 +32,8 @@
 
 from yt.utilities.fortran_utils import read_record
 from yt.funcs import *
-from yt.geometry.oct_geometry_handler import \
-    OctreeGeometryHandler
-from yt.geometry.oct_container import \
-    ParticleOctreeContainer
+from yt.geometry.particle_geometry_handler import \
+    ParticleGeometryHandler
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
@@ -61,12 +59,12 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
-class ParticleDomainFile(object):
-    def __init__(self, pf, io, domain_filename, domain_id):
+class ParticleFile(object):
+    def __init__(self, pf, io, filename, file_id):
         self.pf = pf
         self.io = weakref.proxy(io)
-        self.domain_filename = domain_filename
-        self.domain_id = domain_id
+        self.filename = filename
+        self.file_id = file_id
         self.total_particles = self.io._count_particles(self)
 
     def select(self, selector):
@@ -78,103 +76,21 @@
     def _calculate_offsets(self, fields):
         pass
 
-class ParticleDomainSubset(OctreeSubset):
-    pass
-
-class ParticleGeometryHandler(OctreeGeometryHandler):
-
-    def __init__(self, pf, data_style):
-        self.data_style = data_style
-        self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
-        self.hierarchy_filename = self.parameter_file.parameter_filename
-        self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = np.float64
-        super(ParticleGeometryHandler, self).__init__(pf, data_style)
-        
-    def _initialize_oct_handler(self):
-        self._setup_data_io()
-        template = self.parameter_file.domain_template
-        ndoms = self.parameter_file.domain_count
-        cls = self.parameter_file._domain_class
-        self.domains = [cls(self.parameter_file, self.io, template % {'num':i}, i)
-                        for i in range(ndoms)]
-        total_particles = sum(sum(d.total_particles.values())
-                              for d in self.domains)
-        self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions/2,
-            self.parameter_file.domain_left_edge,
-            self.parameter_file.domain_right_edge)
-        self.oct_handler.n_ref = 64
-        mylog.info("Allocating for %0.3e particles", total_particles)
-        for dom in self.domains:
-            self.io._initialize_octree(dom, self.oct_handler)
-        self.oct_handler.finalize()
-        self.max_level = self.oct_handler.max_level
-        tot = self.oct_handler.linearly_count()
-        mylog.info("Identified %0.3e octs", tot)
-
-    def _detect_fields(self):
-        # TODO: Add additional fields
-        pfl = []
-        for dom in self.domains:
-            fl = self.io._identify_fields(dom)
-            dom._calculate_offsets(fl)
-            for f in fl:
-                if f not in pfl: pfl.append(f)
-        self.field_list = pfl
-        pf = self.parameter_file
-        pf.particle_types = tuple(set(pt for pt, pf in pfl))
-        pf.particle_types += ('all',)
-    
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        super(ParticleGeometryHandler, self)._setup_classes(dd)
-        self.object_types.sort()
-
-    def _identify_base_chunk(self, dobj):
-        if getattr(dobj, "_chunk_info", None) is None:
-            mask = dobj.selector.select_octs(self.oct_handler)
-            counts = self.oct_handler.count_cells(dobj.selector, mask)
-            subsets = [ParticleDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
-            dobj._chunk_info = subsets
-            dobj.size = sum(counts)
-            dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
-
-    def _chunk_all(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, dobj.size)
-
-    def _chunk_spatial(self, dobj, ngz, sort = None):
-        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
-            if ngz > 0:
-                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
-            else:
-                g = og
-            size = og.cell_count
-            if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
-
-    def _chunk_io(self, dobj):
-        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
-
-class GadgetBinaryDomainFile(ParticleDomainFile):
-    def __init__(self, pf, io, domain_filename, domain_id):
-        with open(domain_filename, "rb") as f:
+class GadgetBinaryFile(ParticleFile):
+    def __init__(self, pf, io, filename, file_id):
+        with open(filename, "rb") as f:
             self.header = read_record(f, pf._header_spec)
             self._position_offset = f.tell()
+            f.seek(0, os.SEEK_END)
+            self._file_size = f.tell()
 
-        super(GadgetBinaryDomainFile, self).__init__(pf, io,
-                domain_filename, domain_id)
+        super(GadgetBinaryFile, self).__init__(pf, io,
+                filename, file_id)
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_field_offsets(
-                field_list, self.total_particles)
+                field_list, self.total_particles,
+                self._position_offset, self._file_size)
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
@@ -210,7 +126,7 @@
 
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = GadgetBinaryDomainFile
+    _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
     _header_spec = (('Npart', 6, 'i'),
@@ -232,10 +148,8 @@
                     ('unused', 16, 'i') )
 
     def __init__(self, filename, data_style="gadget_binary",
-                 additional_fields = (), root_dimensions = 64,
+                 additional_fields = (),
                  unit_base = None):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -268,7 +182,7 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -306,11 +220,11 @@
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
 
         if hvals["NumFiles"] > 1:
-            self.domain_template = "%s.%%(num)s" % (prefix)
+            self.filename_template = "%s.%%(num)s" % (prefix)
         else:
-            self.domain_template = self.parameter_filename
+            self.filename_template = self.parameter_filename
 
-        self.domain_count = hvals["NumFiles"]
+        self.file_count = hvals["NumFiles"]
 
         f.close()
 
@@ -341,17 +255,14 @@
 
 class OWLSStaticOutput(GadgetStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = ParticleDomainFile
+    _file_class = ParticleFile
     _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
     _fieldinfo_known = KnownOWLSFields
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", root_dimensions = 64):
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
+    def __init__(self, filename, data_style="OWLS"):
         self.storage_filename = None
         super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               root_dimensions,
                                                unit_base = None)
 
     def __repr__(self):
@@ -372,7 +283,7 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -383,8 +294,8 @@
 
         prefix = self.parameter_filename.split(".", 1)[0]
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
-        self.domain_template = "%s.%%(num)i.%s" % (prefix, suffix)
-        self.domain_count = hvals["NumFilesPerSnapshot"]
+        self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
+        self.file_count = hvals["NumFilesPerSnapshot"]
 
         # To avoid having to open files twice
         self._unit_base = {}
@@ -407,23 +318,23 @@
             pass
         return False
 
-class TipsyDomainFile(ParticleDomainFile):
+class TipsyFile(ParticleFile):
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_particle_offsets(self)
 
-    def __init__(self, pf, io, domain_filename, domain_id):
+    def __init__(self, pf, io, filename, file_id):
         # To go above 1 domain, we need to include an indexing step in the
         # IOHandler, rather than simply reading from a single file.
-        assert domain_id == 0 
-        super(TipsyDomainFile, self).__init__(pf, io,
-                domain_filename, domain_id)
+        assert file_id == 0
+        super(TipsyFile, self).__init__(pf, io,
+                filename, file_id)
         io._create_dtypes(self)
 
 
 class TipsyStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
-    _domain_class = TipsyDomainFile
+    _file_class = TipsyFile
     _fieldinfo_fallback = TipsyFieldInfo
     _fieldinfo_known = KnownTipsyFields
     _header_spec = (('time',    'd'),
@@ -435,15 +346,13 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64, endian = ">",
+                 endian = ">",
                  field_dtypes = None,
                  domain_left_edge = None,
                  domain_right_edge = None,
                  unit_base = None,
                  cosmology_parameters = None):
         self.endian = endian
-        self._root_dimensions = root_dimensions
-        # Set up the template for domain files
         self.storage_filename = None
         if domain_left_edge is None:
             domain_left_edge = np.zeros(3, "float64") - 0.5
@@ -489,7 +398,7 @@
         # NOTE: These are now set in the main initializer.
         #self.domain_left_edge = np.zeros(3, "float64") - 0.5
         #self.domain_right_edge = np.ones(3, "float64") + 0.5
-        self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.domain_dimensions = np.ones(3, "int32") * 2
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -506,8 +415,8 @@
 
         self.parameters = hvals
 
-        self.domain_template = self.parameter_filename
-        self.domain_count = 1
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
 
         f.close()
 

diff -r 24915104ff1604a95f1342f13c10f8fc1c1c0b07 -r 559bca49e90fda8b2e57c941070bc1e681bd80ec yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -234,3 +234,13 @@
         func = _field_concat_slice(iname, axi)
         OWLSFieldInfo.add_field(("all", oname + ax), function=func,
                 particle_type = True)
+
+def SmoothedGas(field, data):
+    pos = data["PartType0", "Coordinates"]
+    sml = data["PartType0", "SmoothingLength"]
+    dens = data["PartType0", "Density"]
+    rv = data.deposit(pos, [sml, dens], method="simple_smooth")
+    return rv
+OWLSFieldInfo.add_field(("deposit", "PartType0_simple_smooth"),
+                function = SmoothedGas, validators = [ValidateSpatial()])
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/294e62e00fa8/
Changeset:   294e62e00fa8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-01 23:44:02
Summary:     Merging from Doug
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt/commits/8ee62b09392c/
Changeset:   8ee62b09392c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-06-30 15:27:58
Summary:     Fixing FLASH reader.
Affected #:  2 files

diff -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,13 +212,6 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        # We'll take the max of 128 and the number of processors
-        nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
-        for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r ab5d001a882986a751b6f6edffeb4fdeb0ee49fc -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,6 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
+                    data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv


https://bitbucket.org/yt_analysis/yt/commits/aba44265d752/
Changeset:   aba44265d752
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 16:39:54
Summary:     Adding a new exception, subclassing YTFieldNotFound

This is for instances when a field could not be generated.  Since this
subclasses YTFieldNotFound, existing machinery for catching that particular
exception will continue to work.  And, we should now have a generic solution to
the problem of fields returning None.  However, this may result in additional
derived fields being swallowed rather than created.
Affected #:  2 files

diff -r 2b865392b01b1f5458947d629dee2707c8140765 -r aba44265d752191a46e8a6a7a6bb86385ad4938d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -219,11 +219,14 @@
         finfo = self.pf._get_field_info(*field)
         with self._field_type_state(ftype, finfo):
             if fname in self._container_fields:
-                return self._generate_container_field(field)
+                tr = self._generate_container_field(field)
             if finfo.particle_type:
-                return self._generate_particle_field(field)
+                tr = self._generate_particle_field(field)
             else:
-                return self._generate_fluid_field(field)
+                tr = self._generate_fluid_field(field)
+            if tr is None:
+                raise YTCouldNotGenerateField(field, self.pf)
+            return tr
 
     def _generate_fluid_field(self, field):
         # First we check the validator

diff -r 2b865392b01b1f5458947d629dee2707c8140765 -r aba44265d752191a46e8a6a7a6bb86385ad4938d yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -77,6 +77,10 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.pf)
 
+class YTCouldNotGenerateField(YTFieldNotFound):
+    def __str__(self):
+        return "Could field '%s' in %s could not be generated." % (self.fname, self.pf)
+
 class YTFieldTypeNotFound(YTException):
     def __init__(self, fname):
         self.fname = fname


https://bitbucket.org/yt_analysis/yt/commits/9cdb65cd9b41/
Changeset:   9cdb65cd9b41
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 20:04:50
Summary:     Initial implementation of ParticleFilters.
Affected #:  2 files

diff -r aba44265d752191a46e8a6a7a6bb86385ad4938d -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 yt/data_objects/particle_filters.py
--- /dev/null
+++ b/yt/data_objects/particle_filters.py
@@ -0,0 +1,81 @@
+"""
+This is a library for defining and using particle filters.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+from contextlib import contextmanager
+
+from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import *
+
+# One to many mapping
+filter_registry = defaultdict(list)
+
+class ParticleFilter(object):
+    def __init__(self, name, function, requires):
+        self.name = name
+        self.function = function
+        self.requires = requires[:]
+        pt = []
+        for r in requires:
+            if not isinstance(r, types.TupleType):
+                raise RuntimeError
+            if r[0] not in pt:
+                pt.append(r[0])
+        if len(pt) > 1:
+            raise RuntimeError
+        self.particle_type = pt[0]
+
+    @contextmanager
+    def apply(self, dobj):
+        with dobj._chunked_read(dobj._current_chunk):
+            # We won't be storing the field data from the whole read, so we
+            # start by filtering now.
+            filter = self.function(self, dobj)
+            yield
+            # Retain a reference here, and we'll filter all appropriate fields
+            # later.
+            fd = dobj.field_data
+        for f, tr in fd.items():
+            if f[0] != self.particle_type: continue
+            if tr.shape != filter.shape:
+                raise YTIllDefinedFilter(self, tr.shape, filter.shape)
+            dobj.field_data[self.name, f[1]] = tr[filter]
+
+    def available(self, field_list):
+        # Note that this assumes that all the fields in field_list have the
+        # same form as the 'requires' attributes.  This won't be true if the
+        # fields are implicitly "all" or something.
+        return all(field in field_list for field in self.requires)
+
+def add_particle_filter(name, function, requires = None):
+    if requires is None: requires = []
+    filter = ParticleFilter(name, function, requires)
+    filter_registry[name].append(filter)
+
+def particle_filter(name, requires = None):
+    def _pfilter(func):
+        add_particle_filter(name, func, requires)
+        return func
+    return _pfilter

diff -r aba44265d752191a46e8a6a7a6bb86385ad4938d -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -277,3 +277,13 @@
     def __str__(self):
         return "Particle bounds %s and %s exceed domain bounds %s and %s" % (
             self.mi, self.ma, self.dle, self.dre)
+
+class YTIllDefinedFilter(YTException):
+    def __init__(self, filter, s1, s2):
+        self.filter = filter
+        self.s1 = s1
+        self.s2 = s2
+
+    def __str__(self):
+        return "Filter '%s' ill-defined.  Applied to shape %s but is shape %s." % (
+            self.filter, self.s1, self.s2)


https://bitbucket.org/yt_analysis/yt/commits/996606f015ee/
Changeset:   996606f015ee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 20:26:35
Summary:     Make filtered_type explicit.
Affected #:  1 file

diff -r 9cdb65cd9b41b2e87cf01b53648eb119c321e675 -r 996606f015ee15221e2787900793c74c3ed73a2d yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -32,31 +32,28 @@
 # One to many mapping
 filter_registry = defaultdict(list)
 
+class DummyFieldInfo(object):
+    particle_type = True
+dfi = DummyFieldInfo()
+
 class ParticleFilter(object):
-    def __init__(self, name, function, requires):
+    def __init__(self, name, function, requires, filtered_type):
         self.name = name
         self.function = function
         self.requires = requires[:]
-        pt = []
-        for r in requires:
-            if not isinstance(r, types.TupleType):
-                raise RuntimeError
-            if r[0] not in pt:
-                pt.append(r[0])
-        if len(pt) > 1:
-            raise RuntimeError
-        self.particle_type = pt[0]
+        self.particle_type = filtered_type
 
     @contextmanager
     def apply(self, dobj):
         with dobj._chunked_read(dobj._current_chunk):
-            # We won't be storing the field data from the whole read, so we
-            # start by filtering now.
-            filter = self.function(self, dobj)
-            yield
-            # Retain a reference here, and we'll filter all appropriate fields
-            # later.
-            fd = dobj.field_data
+            with dobj._field_type_state(self.particle_type, dfi):
+                # We won't be storing the field data from the whole read, so we
+                # start by filtering now.
+                filter = self.function(self, dobj)
+                yield
+                # Retain a reference here, and we'll filter all appropriate fields
+                # later.
+                fd = dobj.field_data
         for f, tr in fd.items():
             if f[0] != self.particle_type: continue
             if tr.shape != filter.shape:
@@ -69,13 +66,13 @@
         # fields are implicitly "all" or something.
         return all(field in field_list for field in self.requires)
 
-def add_particle_filter(name, function, requires = None):
+def add_particle_filter(name, function, requires = None, filtered_type = "all"):
     if requires is None: requires = []
-    filter = ParticleFilter(name, function, requires)
+    filter = ParticleFilter(name, function, requires, filtered_type)
     filter_registry[name].append(filter)
 
-def particle_filter(name, requires = None):
+def particle_filter(name, requires = None, filtered_type = "all"):
     def _pfilter(func):
-        add_particle_filter(name, func, requires)
+        add_particle_filter(name, func, requires, filtered_type)
         return func
     return _pfilter


https://bitbucket.org/yt_analysis/yt/commits/fc78b0a80896/
Changeset:   fc78b0a80896
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 21:38:14
Summary:     First draft of working particle filter implementation.
Affected #:  4 files

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -467,7 +467,15 @@
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)
         if fields is None: return
-        fields = self._determine_fields(fields)
+        nfields = []
+        for field in self._determine_fields(fields):
+            if field[0] in self.pf.h.filtered_particle_types:
+                f = self.pf.known_filters[field[0]]
+                with f.apply(self):
+                    self.get_data([(f.filtered_type, field[1])])
+            else:
+                nfields.append(field)
+        fields = nfields
         # Now we collect all our fields
         # Here is where we need to perform a validation step, so that if we
         # have a field requested that we actually *can't* yet get, we put it

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -24,8 +24,11 @@
 """
 
 import numpy as np
+import copy
 from contextlib import contextmanager
 
+from yt.data_objects.field_info_container import \
+    NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
 from yt.funcs import *
 
@@ -41,12 +44,12 @@
         self.name = name
         self.function = function
         self.requires = requires[:]
-        self.particle_type = filtered_type
+        self.filtered_type = filtered_type
 
     @contextmanager
     def apply(self, dobj):
         with dobj._chunked_read(dobj._current_chunk):
-            with dobj._field_type_state(self.particle_type, dfi):
+            with dobj._field_type_state(self.filtered_type, dfi):
                 # We won't be storing the field data from the whole read, so we
                 # start by filtering now.
                 filter = self.function(self, dobj)
@@ -55,7 +58,7 @@
                 # later.
                 fd = dobj.field_data
         for f, tr in fd.items():
-            if f[0] != self.particle_type: continue
+            if f[0] != self.filtered_type: continue
             if tr.shape != filter.shape:
                 raise YTIllDefinedFilter(self, tr.shape, filter.shape)
             dobj.field_data[self.name, f[1]] = tr[filter]
@@ -64,7 +67,12 @@
         # Note that this assumes that all the fields in field_list have the
         # same form as the 'requires' attributes.  This won't be true if the
         # fields are implicitly "all" or something.
-        return all(field in field_list for field in self.requires)
+        return all((self.filtered_type, field) in field_list for field in self.requires)
+
+    def wrap_func(self, field_name, old_fi):
+        new_fi = copy.copy(old_fi)
+        new_fi.name = (self.filtered_type, field_name[1])
+        return new_fi
 
 def add_particle_filter(name, function, requires = None, filtered_type = "all"):
     if requires is None: requires = []

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -88,6 +88,7 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
+        self.known_filters = {}
 
         # path stuff
         self.parameter_filename = str(filename)
@@ -249,6 +250,9 @@
         else:
             raise YTGeometryNotSupported(self.geometry)
 
+    def add_particle_filter(self, filter):
+        self.known_filters[filter.name] = filter
+
     _last_freq = (None, None)
     _last_finfo = None
     def _get_field_info(self, ftype, fname):

diff -r 996606f015ee15221e2787900793c74c3ed73a2d -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -162,8 +162,35 @@
                 self.parameter_file.field_info[field] = known_fields[field]
 
     def _setup_derived_fields(self):
+        self.derived_field_list = []
+        self.filtered_particle_types = []
+        fc, fac = self._derived_fields_to_check()
+        self._derived_fields_add(fc, fac)
+        # Now we do a special case for all filters.
+        kf = self.parameter_file.known_filters
         fi = self.parameter_file.field_info
-        self.derived_field_list = []
+        fd = self.parameter_file.field_dependencies
+        for filter_name in kf:
+            filter = kf[filter_name]
+            if not filter.available(self.derived_field_list):
+                continue
+            # Only fields whose dependencies have been reached get added here.
+            available = False
+            for fn in self.derived_field_list:
+                if fn[0] == filter.filtered_type:
+                    # Now we can add this
+                    available = True
+                    self.derived_field_list.append(
+                        (filter.name, fn[1]))
+                    fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
+                    # Now we append the dependencies
+                    fd[filter.name, fn[1]] = fd[fn]
+            if available:
+                self.parameter_file.particle_types += (filter_name,)
+                self.filtered_particle_types.append(filter_name)
+
+    def _derived_fields_to_check(self):
+        fi = self.parameter_file.field_info
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
@@ -187,6 +214,15 @@
                 new_fields.append(new_fi.name)
             fields_to_check += new_fields
             fields_to_allcheck.append(field)
+        return fields_to_check, fields_to_allcheck
+
+    def _derived_fields_add(self, fields_to_check = None,
+                            fields_to_allcheck = None):
+        if fields_to_check is None:
+            fields_to_check = []
+        if fields_to_allcheck is None:
+            fields_to_allcheck = []
+        fi = self.parameter_file.field_info
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)


https://bitbucket.org/yt_analysis/yt/commits/67d98f2ef85c/
Changeset:   67d98f2ef85c
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-02 17:06:01
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #55)

Reduce overall memory usage and set Enzo particle_mass to be mass always
Affected #:  12 files

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,8 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
+            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                get_memory_usage()/1024.)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -246,7 +246,7 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
-            for io_chunk in self.chunks([], "io"):
+            for io_chunk in self.chunks([], "io", cache = False):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     ind += self._current_chunk.objs[0].select(
                             self.selector, self[field], rv, ind)
@@ -279,8 +279,8 @@
             size = self._count_particles(ftype)
             rv = np.empty(size, dtype="float64")
             ind = 0
-            for io_chunk in self.chunks([], "io"):
-                for i,chunk in enumerate(self.chunks(field, "spatial")):
+            for io_chunk in self.chunks([], "io", cache = False):
+                for i, chunk in enumerate(self.chunks(field, "spatial")):
                     x, y, z = (self[ftype, 'particle_position_%s' % ax]
                                for ax in 'xyz')
                     if x.size == 0: continue
@@ -301,7 +301,7 @@
             if f1 == ftype:
                 return val.size
         size = 0
-        for io_chunk in self.chunks([], "io"):
+        for io_chunk in self.chunks([], "io", cache = False):
             for i,chunk in enumerate(self.chunks([], "spatial")):
                 x, y, z = (self[ftype, 'particle_position_%s' % ax]
                             for ax in 'xyz')

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -180,7 +180,7 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         """
         Since subsets are calculated per domain,
         i.e. per file, yield each domain at a time to
@@ -189,7 +189,8 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None,
+                              cache = cache)
 
 
 class ARTStaticOutput(StaticOutput):

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -232,11 +232,12 @@
     def _chunk_spatial(self, dobj, ngz):
         raise NotImplementedError
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for chunk in oobjs:
-            yield YTDataChunk(dobj, "io", [chunk], self._data_size)
+            yield YTDataChunk(dobj, "io", [chunk], self._data_size,
+                              cache = cache)
 
     def _read_fluid_fields(self, fields, dobj, chunk=None):
         if len(fields) == 0:

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -388,14 +388,16 @@
     """
     particle_field = field.name[4:]
     pos = data[('all', 'Coordinates')]
+    # Get back into density
+    pden = data['all', 'particle_mass'] / data["CellVolume"] 
     top = data.deposit(
         pos,
-        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        [data[('all', particle_field)]*pden],
         method = 'cic'
         )
     bottom = data.deposit(
         pos,
-        [data[('all', 'particle_mass')]],
+        [pden],
         method = 'cic'
         )
     top[bottom == 0] = 0.0
@@ -513,7 +515,22 @@
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
+# We have now multiplied by grid.dds.prod() inside the IO function.
+# So here we multiply just by the conversion to density.
+add_field(('all', "particle_mass"), function=NullFunc, 
+          particle_type=True, convert_function = _convertParticleMass)
+
+add_field("ParticleMass",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMass)
+add_field("ParticleMassMsun",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMassMsun)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -524,32 +541,6 @@
           validators=[ValidateDataField("creation_time")],
           particle_type=True, convert_function=_convertParticleAge)
 
-def _ParticleMass(field, data):
-    particles = data['all', "particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
 
 #
 # Now we do overrides for 2D fields

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,6 +36,8 @@
 import numpy as np
 from yt.funcs import *
 
+_convert_mass = ("particle_mass",)
+
 class IOHandlerPackedHDF5(BaseIOHandler):
 
     _data_style = "enzo_packed_3d"
@@ -81,6 +83,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
                 data.pop(g.id)
@@ -130,6 +134,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
         return rv

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,12 +212,13 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We'll take the max of 128 and the number of processors
         nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
         for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection)
+            yield YTDataChunk(dobj, "io", gs, self._count_selection,
+                              cache = cache)
 
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -348,10 +348,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -403,8 +403,9 @@
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
-                    self._chunk_io(dobj), selector,
-                    fields_to_read)
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
             finfo = self.pf._get_field_info(*field)
@@ -425,10 +426,11 @@
         fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
-        fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),
-                                                   selector,
-                                                   fields_to_read,
-                                                   chunk_size)
+        fields_to_return = self.io._read_fluid_selection(
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read,
+            chunk_size)
         for field in fields_to_read:
             ftype, fname = field
             conv_factor = self.pf.field_info[fname]._convert_function(self)
@@ -453,20 +455,30 @@
         else:
             raise NotImplementedError
 
+def cached_property(func):
+    n = '_%s' % func.func_name
+    def cached_func(self):
+        if self._cache and getattr(self, n, None) is not None:
+            return getattr(self, n)
+        if self.data_size is None:
+            tr = self._accumulate_values(n[1:])
+        else:
+            tr = func(self)
+        if self._cache:
+            setattr(self, n, tr)
+        return tr
+    return property(cached_func)
+
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
+    def __init__(self, dobj, chunk_type, objs, data_size = None,
+                 field_type = None, cache = False):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
-        self._data_size = data_size
+        self.data_size = data_size
         self._field_type = field_type
-
-    @property
-    def data_size(self):
-        if callable(self._data_size):
-            self._data_size = self._data_size(self.dobj, self.objs)
-        return self._data_size
+        self._cache = cache
 
     def _accumulate_values(self, method):
         # We call this generically.  It's somewhat slower, since we're doing
@@ -477,35 +489,25 @@
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
         arrs = np.concatenate(arrs)
-        self._data_size = arrs.shape[0]
+        self.data_size = arrs.shape[0]
         return arrs
 
-    _fcoords = None
-    @property
+    @cached_property
     def fcoords(self):
-        if self.data_size is None:
-            self._fcoords = self._accumulate_values("fcoords")
-        if self._fcoords is not None: return self._fcoords
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fcoords = ci
-        if self.data_size == 0: return self._fcoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fcoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
             ind += c.shape[0]
-        return self._fcoords
+        return ci
 
-    _icoords = None
-    @property
+    @cached_property
     def icoords(self):
-        if self.data_size is None:
-            self._icoords = self._accumulate_values("icoords")
-        if self._icoords is not None: return self._icoords
         ci = np.empty((self.data_size, 3), dtype='int64')
-        self._icoords = ci
-        if self.data_size == 0: return self._icoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_icoords(self.dobj)
@@ -514,15 +516,10 @@
             ind += c.shape[0]
         return ci
 
-    _fwidth = None
-    @property
+    @cached_property
     def fwidth(self):
-        if self.data_size is None:
-            self._fwidth = self._accumulate_values("fwidth")
-        if self._fwidth is not None: return self._fwidth
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fwidth = ci
-        if self.data_size == 0: return self._fwidth
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fwidth(self.dobj)
@@ -531,15 +528,10 @@
             ind += c.shape[0]
         return ci
 
-    _ires = None
-    @property
+    @cached_property
     def ires(self):
-        if self.data_size is None:
-            self._ires = self._accumulate_values("ires")
-        if self._ires is not None: return self._ires
         ci = np.empty(self.data_size, dtype='int64')
-        self._ires = ci
-        if self.data_size == 0: return self._ires
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_ires(self.dobj)
@@ -548,22 +540,17 @@
             ind += c.size
         return ci
 
-    _tcoords = None
-    @property
+    @cached_property
     def tcoords(self):
-        if self._tcoords is None:
-            self.dtcoords
+        self.dtcoords
         return self._tcoords
 
-    _dtcoords = None
-    @property
+    @cached_property
     def dtcoords(self):
-        if self._dtcoords is not None: return self._dtcoords
         ct = np.empty(self.data_size, dtype='float64')
         cdt = np.empty(self.data_size, dtype='float64')
-        self._tcoords = ct
-        self._dtcoords = cdt
-        if self.data_size == 0: return self._dtcoords
+        self._tcoords = ct # Se this for tcoords
+        if self.data_size == 0: return cdt
         ind = 0
         for obj in self.objs:
             gdt, gt = obj.tcoords(self.dobj)

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -245,16 +245,16 @@
             dobj.size = self._count_selection(dobj)
         if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+        dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
         if grids is None: grids = dobj._chunk_info
         count = sum((g.count(dobj.selector) for g in grids))
         return count
 
-    def _chunk_all(self, dobj):
+    def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -271,13 +271,16 @@
                 g = og
             size = self._count_selection(dobj, [og])
             if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            # We don't want to cache any of the masks or icoords or fcoords for
+            # individual grids.
+            yield YTDataChunk(dobj, "spatial", [g], size, cache = False)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gfiles = defaultdict(list)
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for g in gobjs:
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs))
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -170,10 +170,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g])
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class ParticleDataChunk(YTDataChunk):
     def __init__(self, oct_handler, regions, *args, **kwargs):

diff -r 294e62e00fa88b8aa3a9e35875178d70aecc964c -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -131,7 +131,7 @@
         cdef int i, j
         cdef QuadTreeNode *node
         cdef np.int64_t pos[2]
-        cdef np.float64_t *vals = <np.float64_t *> alloca(
+        cdef np.float64_t *vals = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*nvals)
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
@@ -160,6 +160,7 @@
                 self.root_nodes[i][j] = QTN_initialize(
                     pos, nvals, vals, weight_val)
         self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
+        free(vals)
 
     cdef int count_total_cells(self, QuadTreeNode *root):
         cdef int total = 0
@@ -373,7 +374,7 @@
         cdef np.float64_t *vdata = <np.float64_t *> nvals.data
         cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
         cdef np.float64_t wtoadd
-        cdef np.float64_t *vtoadd = <np.float64_t *> alloca(
+        cdef np.float64_t *vtoadd = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*self.nvals)
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
@@ -381,6 +382,7 @@
                 wtoadd = 0.0
                 curpos += self.fill(self.root_nodes[i][j],
                     curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+        free(vtoadd)
         return opx, opy, opdx, opdy, nvals, nwvals
 
     cdef int count(self, QuadTreeNode *node):
@@ -406,7 +408,7 @@
                         np.int64_t level):
         cdef int i, j, n
         cdef np.float64_t *vorig
-        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
+        vorig = <np.float64_t *> malloc(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -444,6 +446,7 @@
             for i in range(self.nvals):
                 vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
+        free(vorig)
         return added
 
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/8594e3c7ecb9/
Changeset:   8594e3c7ecb9
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-02 21:59:01
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #57)

Adding a new exception, subclassing YTFieldNotFound
Affected #:  2 files

diff -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a -r 8594e3c7ecb9f39175b2013eb0adb5bfda100aa3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -219,11 +219,14 @@
         finfo = self.pf._get_field_info(*field)
         with self._field_type_state(ftype, finfo):
             if fname in self._container_fields:
-                return self._generate_container_field(field)
+                tr = self._generate_container_field(field)
             if finfo.particle_type:
-                return self._generate_particle_field(field)
+                tr = self._generate_particle_field(field)
             else:
-                return self._generate_fluid_field(field)
+                tr = self._generate_fluid_field(field)
+            if tr is None:
+                raise YTCouldNotGenerateField(field, self.pf)
+            return tr
 
     def _generate_fluid_field(self, field):
         # First we check the validator

diff -r 67d98f2ef85c7831a1f5bb0c640b06ed89ea151a -r 8594e3c7ecb9f39175b2013eb0adb5bfda100aa3 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -77,6 +77,10 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.pf)
 
+class YTCouldNotGenerateField(YTFieldNotFound):
+    def __str__(self):
+        return "Could field '%s' in %s could not be generated." % (self.fname, self.pf)
+
 class YTFieldTypeNotFound(YTException):
     def __init__(self, fname):
         self.fname = fname


https://bitbucket.org/yt_analysis/yt/commits/2d1f7f07cce0/
Changeset:   2d1f7f07cce0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 22:05:27
Summary:     Merged
Affected #:  24 files

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,8 @@
         # This needs to be parallel_objects-ified
         for chunk in parallel_objects(self.data_source.chunks(
                 chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
+            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                get_memory_usage()/1024.)
             self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -219,11 +219,14 @@
         finfo = self.pf._get_field_info(*field)
         with self._field_type_state(ftype, finfo):
             if fname in self._container_fields:
-                return self._generate_container_field(field)
+                tr = self._generate_container_field(field)
             if finfo.particle_type:
-                return self._generate_particle_field(field)
+                tr = self._generate_particle_field(field)
             else:
-                return self._generate_fluid_field(field)
+                tr = self._generate_fluid_field(field)
+            if tr is None:
+                raise YTCouldNotGenerateField(field, self.pf)
+            return tr
 
     def _generate_fluid_field(self, field):
         # First we check the validator
@@ -246,7 +249,7 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
-            for io_chunk in self.chunks([], "io"):
+            for io_chunk in self.chunks([], "io", cache = False):
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     ind += self._current_chunk.objs[0].select(
                             self.selector, self[field], rv, ind)
@@ -279,8 +282,8 @@
             size = self._count_particles(ftype)
             rv = np.empty(size, dtype="float64")
             ind = 0
-            for io_chunk in self.chunks([], "io"):
-                for i,chunk in enumerate(self.chunks(field, "spatial")):
+            for io_chunk in self.chunks([], "io", cache = False):
+                for i, chunk in enumerate(self.chunks(field, "spatial")):
                     x, y, z = (self[ftype, 'particle_position_%s' % ax]
                                for ax in 'xyz')
                     if x.size == 0: continue
@@ -301,7 +304,7 @@
             if f1 == ftype:
                 return val.size
         size = 0
-        for io_chunk in self.chunks([], "io"):
+        for io_chunk in self.chunks([], "io", cache = False):
             for i,chunk in enumerate(self.chunks([], "spatial")):
                 x, y, z = (self[ftype, 'particle_position_%s' % ax]
                             for ax in 'xyz')

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -270,8 +270,12 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.pf, "field_info") and isinstance(item, tuple):
-            finfo = self.pf._get_field_info(*item)
+        if hasattr(self.pf, "field_info"):
+            if not isinstance(item, tuple):
+                field = ("unknown", item)
+            else:
+                field = item
+            finfo = self.pf._get_field_info(*field)
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
             if item in FI:
@@ -283,7 +287,7 @@
                 vv = finfo(self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd + ngz * 2)
+                nfd = FieldDetector(self.nd + ngz * 2, pf = self.pf)
                 nfd._num_ghost_zones = ngz
                 vv = finfo(nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -146,3 +146,4 @@
     registry.add_field((ptype, "Velocities"),
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
+

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -60,6 +60,7 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    storage_filename = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -50,3 +50,7 @@
         a_std = np.sqrt((ad["CellMass"] * (ad["Density"] - a_mean)**2).sum() / 
                         ad["CellMass"].sum())
         yield assert_rel_equal, my_std, a_std, 12
+
+if __name__ == "__main__":
+    for i in test_extrema():
+        i[0](*i[1:])

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -85,13 +85,17 @@
 
 def test_all_fields():
     for field in FieldInfo:
-        if field.startswith("CuttingPlane"): continue
-        if field.startswith("particle"): continue
-        if field.startswith("CIC"): continue
-        if field.startswith("WeakLensingConvergence"): continue
-        if field.startswith("DensityPerturbation"): continue
-        if field.startswith("Matter_Density"): continue
-        if field.startswith("Overdensity"): continue
+        if isinstance(field, types.TupleType):
+            fname = field[0]
+        else:
+            fname = field
+        if fname.startswith("CuttingPlane"): continue
+        if fname.startswith("particle"): continue
+        if fname.startswith("CIC"): continue
+        if fname.startswith("WeakLensingConvergence"): continue
+        if fname.startswith("DensityPerturbation"): continue
+        if fname.startswith("Matter_Density"): continue
+        if fname.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             yield TestFieldAccess(field, nproc)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1005,13 +1005,6 @@
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
 
-# We add these fields so that the field detector can use them
-for field in ["particle_position_%s" % ax for ax in "xyz"]:
-    # This marker should let everyone know not to use the fields, but NullFunc
-    # should do that, too.
-    add_field(("all", field), function=NullFunc, particle_type = True,
-        units=r"UNDEFINED")
-
 def _pdensity(field, data):
     pmass = data[('deposit','all_mass')]
     np.divide(pmass, data["CellVolume"], pmass)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -180,7 +180,7 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         """
         Since subsets are calculated per domain,
         i.e. per file, yield each domain at a time to
@@ -189,7 +189,8 @@
         """
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None,
+                              cache = cache)
 
 
 class ARTStaticOutput(StaticOutput):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -232,11 +232,12 @@
     def _chunk_spatial(self, dobj, ngz):
         raise NotImplementedError
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for chunk in oobjs:
-            yield YTDataChunk(dobj, "io", [chunk], self._data_size)
+            yield YTDataChunk(dobj, "io", [chunk], self._data_size,
+                              cache = cache)
 
     def _read_fluid_fields(self, fields, dobj, chunk=None):
         if len(fields) == 0:

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -388,14 +388,16 @@
     """
     particle_field = field.name[4:]
     pos = data[('all', 'Coordinates')]
+    # Get back into density
+    pden = data['all', 'particle_mass'] / data["CellVolume"] 
     top = data.deposit(
         pos,
-        [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+        [data[('all', particle_field)]*pden],
         method = 'cic'
         )
     bottom = data.deposit(
         pos,
-        [data[('all', 'particle_mass')]],
+        [pden],
         method = 'cic'
         )
     top[bottom == 0] = 0.0
@@ -507,11 +509,28 @@
     add_enzo_field(("all", pf), function=NullFunc, convert_function=cfunc,
               particle_type=True)
 
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"] \
+        + ["particle_position_%s" % ax for ax in 'xyz'] \
+        + ["particle_velocity_%s" % ax for ax in 'xyz']:
     add_enzo_field(pf, function=NullFunc,
               validators = [ValidateDataField(pf)],
               particle_type=True)
-add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
+# We have now multiplied by grid.dds.prod() inside the IO function.
+# So here we multiply just by the conversion to density.
+add_field(('all', "particle_mass"), function=NullFunc, 
+          particle_type=True, convert_function = _convertParticleMass)
+
+add_field("ParticleMass",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMass)
+add_field("ParticleMassMsun",
+          function=TranslationFunc("particle_mass"),
+          particle_type=True, convert_function=_convertParticleMassMsun)
 
 def _ParticleAge(field, data):
     current_time = data.pf.current_time
@@ -522,32 +541,6 @@
           validators=[ValidateDataField("creation_time")],
           particle_type=True, convert_function=_convertParticleAge)
 
-def _ParticleMass(field, data):
-    particles = data['all', "particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
 
 #
 # Now we do overrides for 2D fields

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,6 +36,8 @@
 import numpy as np
 from yt.funcs import *
 
+_convert_mass = ("particle_mass",)
+
 class IOHandlerPackedHDF5(BaseIOHandler):
 
     _data_style = "enzo_packed_3d"
@@ -81,6 +83,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
                 data.pop(g.id)
@@ -130,6 +134,8 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
+                    if fname == "particle_mass":
+                        gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
         return rv

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -74,6 +74,9 @@
     _hydro_offset = None
     _level_count = None
 
+    def __repr__(self):
+        return "RAMSESDomainFile: %i" % self.domain_id
+
     @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
@@ -345,10 +348,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g], None)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler
@@ -398,6 +401,8 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
+            self.units['%scm' % unit] = (self.units[unit] /
+                                          (1 + self.current_redshift))
             self.units['%shcm' % unit] = (self.units['%sh' % unit] /
                                           (1 + self.current_redshift))
         for unit in sec_conversion.keys():

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -32,11 +32,12 @@
     BaseIOHandler
 
 from yt.utilities.fortran_utils import read_record
-from yt.utilities.lib.geometry_utils import get_morton_indices, \
-    get_morton_indices_unravel
+from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
 
+CHUNKSIZE = 10000000
+
 _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
 class IOHandlerOWLS(BaseIOHandler):
@@ -104,16 +105,18 @@
         f = h5py.File(data_file.filename, "r")
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
         morton = np.empty(pcount, dtype='uint64')
-        DLE = data_file.pf.domain_left_edge
-        DRE = data_file.pf.domain_right_edge
-        dx = (DRE - DLE) / 2**_ORDER_MAX
         ind = 0
         for key in f.keys():
             if not key.startswith("PartType"): continue
-            pos = f[key]["Coordinates"][:].astype("float64")
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
             regions.add_data_file(pos, data_file.file_id)
-            pos = np.floor((pos - DLE)/dx).astype("uint64")
-            morton[ind:ind+pos.shape[0]] = get_morton_indices(pos)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
             ind += pos.shape[0]
         f.close()
         return morton
@@ -257,7 +260,6 @@
 
     def _initialize_index(self, data_file, regions):
         count = sum(data_file.total_particles.values())
-        dt = [("px", "float32"), ("py", "float32"), ("pz", "float32")]
         DLE = data_file.pf.domain_left_edge
         DRE = data_file.pf.domain_right_edge
         dx = (DRE - DLE) / 2**_ORDER_MAX
@@ -266,16 +268,10 @@
             # We add on an additionally 4 for the first record.
             f.seek(data_file._position_offset + 4)
             # The first total_particles * 3 values are positions
-            pp = np.fromfile(f, dtype = dt, count = count)
-        pos = np.column_stack([pp['px'], pp['py'], pp['pz']]).astype("float64")
-        del pp
-        regions.add_data_file(pos, data_file.file_id)
-        lx = np.floor((pos[:,0] - DLE[0])/dx[0]).astype("uint64")
-        ly = np.floor((pos[:,1] - DLE[1])/dx[1]).astype("uint64")
-        lz = np.floor((pos[:,2] - DLE[2])/dx[2]).astype("uint64")
-        del pos
-        morton = get_morton_indices_unravel(lx, ly, lz)
-        del lx, ly, lz
+            pp = np.fromfile(f, dtype = 'float32', count = count*3)
+            pp.shape = (count, 3)
+        regions.add_data_file(pp, data_file.file_id)
+        morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE)
         return morton
 
     def _count_particles(self, data_file):
@@ -431,34 +427,33 @@
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
                 if count == 0: continue
-                pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                 count = count)
-                mis = np.empty(3, dtype="float64")
-                mas = np.empty(3, dtype="float64")
-                for axi, ax in enumerate('xyz'):
-                    mi = pp["Coordinates"][ax].min()
-                    ma = pp["Coordinates"][ax].max()
-                    mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
-                    mis[axi] = mi
-                    mas[axi] = ma
-                if np.any(mis < pf.domain_left_edge) or \
-                   np.any(mas > pf.domain_right_edge):
-                    raise YTDomainOverflow(mis, mas,
-                                           pf.domain_left_edge,
-                                           pf.domain_right_edge)
-                fpos = np.empty((count, 3), dtype="float64")
-                fpos[:,0] = pp["Coordinates"]["x"]
-                fpos[:,1] = pp["Coordinates"]["y"]
-                fpos[:,2] = pp["Coordinates"]["z"]
-                regions.add_data_file(fpos, data_file.file_id)
-                del fpos
-                pos = np.empty((count, 3), dtype="uint64")
-                for axi, ax in enumerate("xyz"):
-                    coords = pp['Coordinates'][ax].astype("float64")
-                    coords = np.floor((coords - DLE[axi])/dx[axi])
-                    pos[:,axi] = coords
-                morton[ind:ind+count] = get_morton_indices(pos)
-                del pp, pos
+                start, stop = ind, ind + count
+                while ind < stop:
+                    c = min(CHUNKSIZE, stop - ind)
+                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
+                                     count = c)
+                    mis = np.empty(3, dtype="float64")
+                    mas = np.empty(3, dtype="float64")
+                    for axi, ax in enumerate('xyz'):
+                        mi = pp["Coordinates"][ax].min()
+                        ma = pp["Coordinates"][ax].max()
+                        mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                        mis[axi] = mi
+                        mas[axi] = ma
+                    if np.any(mis < pf.domain_left_edge) or \
+                       np.any(mas > pf.domain_right_edge):
+                        raise YTDomainOverflow(mis, mas,
+                                               pf.domain_left_edge,
+                                               pf.domain_right_edge)
+                    pos = np.empty((pp.size, 3), dtype="float64")
+                    pos[:,0] = pp["Coordinates"]["x"]
+                    pos[:,1] = pp["Coordinates"]["y"]
+                    pos[:,2] = pp["Coordinates"]["z"]
+                    regions.add_data_file(pos, data_file.file_id)
+                    morton[ind:ind+c] = compute_morton(
+                        pos[:,0], pos[:,1], pos[:,2],
+                        DLE, DRE)
+                    ind += c
         mylog.info("Adding %0.3e particles", morton.size)
         return morton
 

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -42,6 +42,10 @@
 add_field = StreamFieldInfo.add_field
 
 add_stream_field("density", function = NullFunc)
+add_stream_field("x-velocity", function = NullFunc)
+add_stream_field("y-velocity", function = NullFunc)
+add_stream_field("z-velocity", function = NullFunc)
+
 add_field("Density", function = TranslationFunc("density"))
 
 add_stream_field("particle_position_x", function = NullFunc, particle_type=True)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -31,6 +31,7 @@
 from types import ClassType
 import numpy as np
 import abc
+import copy
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -166,8 +167,7 @@
         # First we construct our list of fields to check
         fields_to_check = []
         fields_to_allcheck = []
-        fields_to_add = []
-        for field in fi:
+        for field in fi.keys():
             finfo = fi[field]
             # Explicitly defined
             if isinstance(field, tuple):
@@ -179,13 +179,14 @@
                 fields_to_check.append(field)
                 continue
             # We do a special case for 'all' later
-            new_fields = [(pt, field) for pt in
-                          self.parameter_file.particle_types]
+            new_fields = []
+            for pt in self.parameter_file.particle_types:
+                new_fi = copy.copy(finfo)
+                new_fi.name = (pt, new_fi.name)
+                fi[new_fi.name] = new_fi
+                new_fields.append(new_fi.name)
             fields_to_check += new_fields
-            fields_to_add.extend( (new_field, fi[field]) for
-                                   new_field in new_fields )
             fields_to_allcheck.append(field)
-        fi.update(fields_to_add)
         for field in fields_to_check:
             try:
                 fd = fi[field].get_dependencies(pf = self.parameter_file)
@@ -402,8 +403,9 @@
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
-                    self._chunk_io(dobj), selector,
-                    fields_to_read)
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read)
         for field in fields_to_read:
             ftype, fname = field
             finfo = self.pf._get_field_info(*field)
@@ -424,10 +426,11 @@
         fields_to_read, fields_to_generate = self._split_fields(fields)
         if len(fields_to_read) == 0:
             return {}, fields_to_generate
-        fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),
-                                                   selector,
-                                                   fields_to_read,
-                                                   chunk_size)
+        fields_to_return = self.io._read_fluid_selection(
+            self._chunk_io(dobj, cache = False),
+            selector,
+            fields_to_read,
+            chunk_size)
         for field in fields_to_read:
             ftype, fname = field
             conv_factor = self.pf.field_info[fname]._convert_function(self)
@@ -452,20 +455,30 @@
         else:
             raise NotImplementedError
 
+def cached_property(func):
+    n = '_%s' % func.func_name
+    def cached_func(self):
+        if self._cache and getattr(self, n, None) is not None:
+            return getattr(self, n)
+        if self.data_size is None:
+            tr = self._accumulate_values(n[1:])
+        else:
+            tr = func(self)
+        if self._cache:
+            setattr(self, n, tr)
+        return tr
+    return property(cached_func)
+
 class YTDataChunk(object):
 
-    def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
+    def __init__(self, dobj, chunk_type, objs, data_size = None,
+                 field_type = None, cache = False):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
-        self._data_size = data_size
+        self.data_size = data_size
         self._field_type = field_type
-
-    @property
-    def data_size(self):
-        if callable(self._data_size):
-            self._data_size = self._data_size(self.dobj, self.objs)
-        return self._data_size
+        self._cache = cache
 
     def _accumulate_values(self, method):
         # We call this generically.  It's somewhat slower, since we're doing
@@ -476,35 +489,25 @@
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
         arrs = np.concatenate(arrs)
-        self._data_size = arrs.shape[0]
+        self.data_size = arrs.shape[0]
         return arrs
 
-    _fcoords = None
-    @property
+    @cached_property
     def fcoords(self):
-        if self.data_size is None:
-            self._fcoords = self._accumulate_values("fcoords")
-        if self._fcoords is not None: return self._fcoords
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fcoords = ci
-        if self.data_size == 0: return self._fcoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fcoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
             ind += c.shape[0]
-        return self._fcoords
+        return ci
 
-    _icoords = None
-    @property
+    @cached_property
     def icoords(self):
-        if self.data_size is None:
-            self._icoords = self._accumulate_values("icoords")
-        if self._icoords is not None: return self._icoords
         ci = np.empty((self.data_size, 3), dtype='int64')
-        self._icoords = ci
-        if self.data_size == 0: return self._icoords
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_icoords(self.dobj)
@@ -513,15 +516,10 @@
             ind += c.shape[0]
         return ci
 
-    _fwidth = None
-    @property
+    @cached_property
     def fwidth(self):
-        if self.data_size is None:
-            self._fwidth = self._accumulate_values("fwidth")
-        if self._fwidth is not None: return self._fwidth
         ci = np.empty((self.data_size, 3), dtype='float64')
-        self._fwidth = ci
-        if self.data_size == 0: return self._fwidth
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_fwidth(self.dobj)
@@ -530,15 +528,10 @@
             ind += c.shape[0]
         return ci
 
-    _ires = None
-    @property
+    @cached_property
     def ires(self):
-        if self.data_size is None:
-            self._ires = self._accumulate_values("ires")
-        if self._ires is not None: return self._ires
         ci = np.empty(self.data_size, dtype='int64')
-        self._ires = ci
-        if self.data_size == 0: return self._ires
+        if self.data_size == 0: return ci
         ind = 0
         for obj in self.objs:
             c = obj.select_ires(self.dobj)
@@ -547,22 +540,17 @@
             ind += c.size
         return ci
 
-    _tcoords = None
-    @property
+    @cached_property
     def tcoords(self):
-        if self._tcoords is None:
-            self.dtcoords
+        self.dtcoords
         return self._tcoords
 
-    _dtcoords = None
-    @property
+    @cached_property
     def dtcoords(self):
-        if self._dtcoords is not None: return self._dtcoords
         ct = np.empty(self.data_size, dtype='float64')
         cdt = np.empty(self.data_size, dtype='float64')
-        self._tcoords = ct
-        self._dtcoords = cdt
-        if self.data_size == 0: return self._dtcoords
+        self._tcoords = ct # Se this for tcoords
+        if self.data_size == 0: return cdt
         ind = 0
         for obj in self.objs:
             gdt, gt = obj.tcoords(self.dobj)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -245,16 +245,16 @@
             dobj.size = self._count_selection(dobj)
         if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+        dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
 
     def _count_selection(self, dobj, grids = None):
         if grids is None: grids = dobj._chunk_info
         count = sum((g.count(dobj.selector) for g in grids))
         return count
 
-    def _chunk_all(self, dobj):
+    def _chunk_all(self, dobj, cache = True):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
     def _chunk_spatial(self, dobj, ngz, sort = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -271,13 +271,16 @@
                 g = og
             size = self._count_selection(dobj, [og])
             if size == 0: continue
-            yield YTDataChunk(dobj, "spatial", [g], size)
+            # We don't want to cache any of the masks or icoords or fcoords for
+            # individual grids.
+            yield YTDataChunk(dobj, "spatial", [g], size, cache = False)
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         gfiles = defaultdict(list)
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for g in gobjs:
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
             gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs))
+            yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+                              cache = cache)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -170,10 +170,10 @@
                 g = og
             yield YTDataChunk(dobj, "spatial", [g])
 
-    def _chunk_io(self, dobj):
+    def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None)
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
 class ParticleDataChunk(YTDataChunk):
     def __init__(self, oct_handler, regions, *args, **kwargs):

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -261,6 +261,10 @@
                         self.visit(o.children[cind(i,j,k)], counts, level + 1)
         return
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class ParticleRegions:
     cdef np.float64_t left_edge[3]
     cdef np.float64_t dds[3]
@@ -282,7 +286,14 @@
         for i in range(nfiles/64 + 1):
             self.masks.append(np.zeros(dims, dtype="uint64"))
 
-    def add_data_file(self, np.ndarray[np.float64_t, ndim=2] pos, int file_id):
+    def add_data_file(self, np.ndarray pos, int file_id):
+        if pos.dtype == np.float32:
+            self._mask_positions[np.float32_t](pos, file_id)
+        elif pos.dtype == np.float64:
+            self._mask_positions[np.float64_t](pos, file_id)
+
+    cdef void _mask_positions(self, np.ndarray[anyfloat, ndim=2] pos,
+                              int file_id):
         cdef np.int64_t no = pos.shape[0]
         cdef np.int64_t p
         cdef int ind[3], i

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1124,6 +1124,8 @@
 
     def __init__(self, dobj):
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
         self.domain_id = dobj.domain_id
         self.overlap_cells = 1
 
@@ -1175,6 +1177,8 @@
         self.min_ind = dobj.min_ind
         self.max_ind = dobj.max_ind
         self.base_selector = dobj.base_selector
+        self.min_level = self.base_selector.min_level
+        self.max_level = self.base_selector.max_level
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -77,6 +77,10 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.pf)
 
+class YTCouldNotGenerateField(YTFieldNotFound):
+    def __str__(self):
+        return "Could field '%s' in %s could not be generated." % (self.fname, self.pf)
+
 class YTFieldTypeNotFound(YTException):
     def __init__(self, fname):
         self.fname = fname

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -131,7 +131,7 @@
         cdef int i, j
         cdef QuadTreeNode *node
         cdef np.int64_t pos[2]
-        cdef np.float64_t *vals = <np.float64_t *> alloca(
+        cdef np.float64_t *vals = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*nvals)
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
@@ -160,6 +160,7 @@
                 self.root_nodes[i][j] = QTN_initialize(
                     pos, nvals, vals, weight_val)
         self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
+        free(vals)
 
     cdef int count_total_cells(self, QuadTreeNode *root):
         cdef int total = 0
@@ -373,7 +374,7 @@
         cdef np.float64_t *vdata = <np.float64_t *> nvals.data
         cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
         cdef np.float64_t wtoadd
-        cdef np.float64_t *vtoadd = <np.float64_t *> alloca(
+        cdef np.float64_t *vtoadd = <np.float64_t *> malloc(
                 sizeof(np.float64_t)*self.nvals)
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
@@ -381,6 +382,7 @@
                 wtoadd = 0.0
                 curpos += self.fill(self.root_nodes[i][j],
                     curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+        free(vtoadd)
         return opx, opy, opdx, opdy, nvals, nwvals
 
     cdef int count(self, QuadTreeNode *node):
@@ -406,7 +408,7 @@
                         np.int64_t level):
         cdef int i, j, n
         cdef np.float64_t *vorig
-        vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
+        vorig = <np.float64_t *> malloc(sizeof(np.float64_t) * self.nvals)
         if node.children[0][0] == NULL:
             if self.merged == -1:
                 for i in range(self.nvals):
@@ -444,6 +446,7 @@
             for i in range(self.nvals):
                 vtoadd[i] = vorig[i]
             wtoadd -= node.weight_val
+        free(vorig)
         return added
 
     @cython.boundscheck(False)

diff -r 8ee62b09392c1c43bc886d106b1807bcb45d10f3 -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -353,6 +353,51 @@
         morton_indices[i] = mi
     return morton_indices
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
+cdef position_to_morton(np.ndarray[anyfloat, ndim=1] pos_x,
+                        np.ndarray[anyfloat, ndim=1] pos_y,
+                        np.ndarray[anyfloat, ndim=1] pos_z,
+                        np.float64_t dds[3], np.float64_t DLE[3],
+                        np.ndarray[np.uint64_t, ndim=1] ind):
+    cdef np.uint64_t mi, ii[3]
+    cdef np.float64_t p[3]
+    cdef np.int64_t i, j
+    for i in range(pos_x.shape[0]):
+        p[0] = <np.float64_t> pos_x[i]
+        p[1] = <np.float64_t> pos_y[i]
+        p[2] = <np.float64_t> pos_z[i]
+        for j in range(3):
+            ii[j] = <np.uint64_t> ((p[j] - DLE[j])/dds[j])
+        mi = 0
+        mi |= spread_bits(ii[2])<<0
+        mi |= spread_bits(ii[1])<<1
+        mi |= spread_bits(ii[0])<<2
+        ind[i] = mi
+
+DEF ORDER_MAX=20
+        
+def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z,
+                   domain_left_edge, domain_right_edge):
+    cdef int i
+    cdef np.float64_t dds[3], DLE[3], DRE[3]
+    for i in range(3):
+        DLE[i] = domain_left_edge[i]
+        DRE[i] = domain_right_edge[i]
+        dds[i] = (DRE[i] - DLE[i]) / (1 << ORDER_MAX)
+    cdef np.ndarray[np.uint64_t, ndim=1] ind
+    ind = np.zeros(pos_x.shape[0], dtype="uint64")
+    if pos_x.dtype == np.float32:
+        position_to_morton[np.float32_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    elif pos_x.dtype == np.float64:
+        position_to_morton[np.float64_t](pos_x, pos_y, pos_z, dds, DLE, ind)
+    else:
+        print "Could not identify dtype.", pos_x.dtype
+        raise NotImplementedError
+    return ind
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/75aa65ad91ac/
Changeset:   75aa65ad91ac
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 22:59:23
Summary:     Consolidate processing for filtered particles.  Fix filtering of 0-size arrays.

Also, add returning of newly added registry fields, which will help facilitate
setting up dependencies for dynamically added particle types.
Affected #:  3 files

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -468,14 +468,20 @@
             self.hierarchy._identify_base_chunk(self)
         if fields is None: return
         nfields = []
+        apply_fields = defaultdict(list)
         for field in self._determine_fields(fields):
             if field[0] in self.pf.h.filtered_particle_types:
                 f = self.pf.known_filters[field[0]]
-                with f.apply(self):
-                    self.get_data([(f.filtered_type, field[1])])
+                apply_fields[field[0]].append(
+                    (f.filtered_type, field[1]))
             else:
                 nfields.append(field)
+        for filter_type in apply_fields:
+            f = self.pf.known_filters[filter_type]
+            with f.apply(self):
+                self.get_data(apply_fields[filter_type])
         fields = nfields
+        if len(fields) == 0: return
         # Now we collect all our fields
         # Here is where we need to perform a validation step, so that if we
         # have a field requested that we actually *can't* yet get, we put it

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -42,6 +42,7 @@
     mh
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+    orig = set(registry.keys())
     def particle_count(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
@@ -112,6 +113,9 @@
             particle_type = True,
             units = r"\mathrm{M}_\odot")
 
+    return list(set(registry.keys()).difference(orig))
+
+
 def particle_scalar_functions(ptype, coord_name, vel_name, registry):
 
     # Now we have to set up the various velocity and coordinate things.  In the
@@ -119,6 +123,8 @@
     # elsewhere, and stop using these.
     
     # Note that we pass in _ptype here so that it's defined inside the closure.
+    orig = set(registry.keys())
+
     def _get_coord_funcs(axi, _ptype):
         def _particle_velocity(field, data):
             return data[_ptype, vel_name][:,axi]
@@ -132,9 +138,12 @@
         registry.add_field((ptype, "particle_position_%s" % ax),
             particle_type = True, function = p)
 
+    return list(set(registry.keys()).difference(orig))
+
 def particle_vector_functions(ptype, coord_names, vel_names, registry):
 
     # This will column_stack a set of scalars to create vector fields.
+    orig = set(registry.keys())
 
     def _get_vec_func(_ptype, names):
         def particle_vectors(field, data):
@@ -147,3 +156,4 @@
                        function=_get_vec_func(ptype, vel_names),
                        particle_type=True)
 
+    return list(set(registry.keys()).difference(orig))

diff -r fc78b0a808965279c508a3c5fecad5d1ae9f6f67 -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -59,9 +59,18 @@
                 fd = dobj.field_data
         for f, tr in fd.items():
             if f[0] != self.filtered_type: continue
-            if tr.shape != filter.shape:
+            if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
                 raise YTIllDefinedFilter(self, tr.shape, filter.shape)
-            dobj.field_data[self.name, f[1]] = tr[filter]
+            elif filter.size == 0:
+                # Filtering empty set.  This keeps our dimensions correct.
+                # Otherwise we end up with out-of-axis and shape problems.
+                d = tr.copy() 
+            elif len(tr.shape) > len(filter.shape):
+                # Filter must always be 1D
+                d = tr[filter,:]
+            else:
+                d = tr[filter]
+            dobj.field_data[self.name, f[1]] = d
 
     def available(self, field_list):
         # Note that this assumes that all the fields in field_list have the


https://bitbucket.org/yt_analysis/yt/commits/fa53eed1f628/
Changeset:   fa53eed1f628
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-02 23:26:34
Summary:     Ensuring that filtered types don't get concatenated.
Affected #:  1 file

diff -r 75aa65ad91ac63409fb9cea0bc0c1af8f2c24d94 -r fa53eed1f6280805034803765678eff2a9defd2d yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -72,7 +72,9 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
-            if ptype == "all": continue
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
             v.append(data[ptype, fname].copy())
         rv = np.concatenate(v, axis=0)
         return rv
@@ -82,7 +84,9 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
-            if ptype == "all": continue
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
             v.append(data[ptype, fname][:,axi])
         rv = np.concatenate(v, axis=0)
         return rv


https://bitbucket.org/yt_analysis/yt/commits/a035ee4a2397/
Changeset:   a035ee4a2397
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 00:12:43
Summary:     Refactoring and starting to setup generic particle field addition.

This will also be the start of Chris's idea for generic particle fields, as we
can start having registries of particle types, etc etc.
Affected #:  5 files

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
     output_type_registry
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
+from yt.data_objects.particle_filters import \
+    filter_registry
 from yt.utilities.minimal_representation import \
     MinimalStaticOutput
 
@@ -60,6 +62,8 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    _particle_mass_name = None
+    _particle_coordinates_name = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -251,6 +255,16 @@
             raise YTGeometryNotSupported(self.geometry)
 
     def add_particle_filter(self, filter):
+        if isinstance(filter, types.StringTypes):
+            used = False
+            for f in filter_registry[filter]:
+                used = self.h._setup_filtered_type(f)
+                if used: break
+            if not used: return
+            filter = f
+        else:
+            used = self.h._setup_filtered_type(filter)
+        if not used: return
         self.known_filters[filter.name] = filter
 
     _last_freq = (None, None)

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -706,6 +706,9 @@
     _hierarchy_class = EnzoHierarchy
     _fieldinfo_fallback = EnzoFieldInfo
     _fieldinfo_known = KnownEnzoFields
+    _particle_mass_name = "ParticleMass"
+    _particle_coordinates_name = "Coordinates"
+
     def __init__(self, filename, data_style=None,
                  file_style = None,
                  parameter_override = None,

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -357,6 +357,8 @@
     _hierarchy_class = RAMSESGeometryHandler
     _fieldinfo_fallback = RAMSESFieldInfo
     _fieldinfo_known = KnownRAMSESFields
+    _particle_mass_name = "ParticleMass"
+    _particle_coordinates_name = "Coordinates"
     
     def __init__(self, filename, data_style='ramses',
                  fields = None,

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -129,6 +129,8 @@
     _file_class = GadgetBinaryFile
     _fieldinfo_fallback = GadgetFieldInfo
     _fieldinfo_known = KnownGadgetFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = (('Npart', 6, 'i'),
                     ('Massarr', 6, 'd'),
                     ('Time', 1, 'd'),

diff -r fa53eed1f6280805034803765678eff2a9defd2d -r a035ee4a2397a086950061ed55a9918093bec4ec yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -39,6 +39,8 @@
     data_object_registry
 from yt.data_objects.field_info_container import \
     NullFunc
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -166,28 +168,36 @@
         self.filtered_particle_types = []
         fc, fac = self._derived_fields_to_check()
         self._derived_fields_add(fc, fac)
-        # Now we do a special case for all filters.
-        kf = self.parameter_file.known_filters
+
+    def _setup_filtered_type(self, filter):
+        if not filter.available(self.derived_field_list):
+            return False
         fi = self.parameter_file.field_info
         fd = self.parameter_file.field_dependencies
-        for filter_name in kf:
-            filter = kf[filter_name]
-            if not filter.available(self.derived_field_list):
-                continue
-            # Only fields whose dependencies have been reached get added here.
-            available = False
-            for fn in self.derived_field_list:
-                if fn[0] == filter.filtered_type:
-                    # Now we can add this
-                    available = True
-                    self.derived_field_list.append(
-                        (filter.name, fn[1]))
-                    fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
-                    # Now we append the dependencies
-                    fd[filter.name, fn[1]] = fd[fn]
-            if available:
-                self.parameter_file.particle_types += (filter_name,)
-                self.filtered_particle_types.append(filter_name)
+        available = False
+        for fn in self.derived_field_list:
+            if fn[0] == filter.filtered_type:
+                # Now we can add this
+                available = True
+                self.derived_field_list.append(
+                    (filter.name, fn[1]))
+                fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
+                # Now we append the dependencies
+                fd[filter.name, fn[1]] = fd[fn]
+        if available:
+            self.parameter_file.particle_types += (filter.name,)
+            self.filtered_particle_types.append(filter.name)
+            self._setup_particle_fields(filter.name, True)
+        return available
+
+    def _setup_particle_fields(self, ptype, filtered = False):
+        pf = self.parameter_file
+        pmass = self.parameter_file._particle_mass_name
+        pcoord = self.parameter_file._particle_coordinates_name
+        if pmass is None or pcoord is None: return
+        df = particle_deposition_functions(ptype,
+            pcoord, pmass, self.parameter_file.field_info)
+        self._derived_fields_add(df)
 
     def _derived_fields_to_check(self):
         fi = self.parameter_file.field_info


https://bitbucket.org/yt_analysis/yt/commits/856bb17297ca/
Changeset:   856bb17297ca
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 00:22:16
Summary:     Simplify filter identification and add particle field names to Tipsy & OWLS
Affected #:  2 files

diff -r a035ee4a2397a086950061ed55a9918093bec4ec -r 856bb17297ca44f470396878fcfb85b18d3c4c81 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -259,13 +259,15 @@
             used = False
             for f in filter_registry[filter]:
                 used = self.h._setup_filtered_type(f)
-                if used: break
-            if not used: return
-            filter = f
+                if used:
+                    filter = f
+                    break
         else:
             used = self.h._setup_filtered_type(filter)
-        if not used: return
+        if not used:
+            return False
         self.known_filters[filter.name] = filter
+        return True
 
     _last_freq = (None, None)
     _last_finfo = None

diff -r a035ee4a2397a086950061ed55a9918093bec4ec -r 856bb17297ca44f470396878fcfb85b18d3c4c81 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -260,6 +260,8 @@
     _file_class = ParticleFile
     _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
     _fieldinfo_known = KnownOWLSFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
     def __init__(self, filename, data_style="OWLS"):
@@ -339,6 +341,8 @@
     _file_class = TipsyFile
     _fieldinfo_fallback = TipsyFieldInfo
     _fieldinfo_known = KnownTipsyFields
+    _particle_mass_name = "Mass"
+    _particle_coordinates_name = "Coordinates"
     _header_spec = (('time',    'd'),
                     ('nbodies', 'i'),
                     ('ndim',    'i'),


https://bitbucket.org/yt_analysis/yt/commits/8482acd3f0f0/
Changeset:   8482acd3f0f0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 16:04:25
Summary:     Cache grid mask count.
Affected #:  1 file

diff -r 856bb17297ca44f470396878fcfb85b18d3c4c81 -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -67,6 +67,7 @@
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
         self._last_mask = None
+        self._last_count = -1
         self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
@@ -447,14 +448,14 @@
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='int64')
-        coords = convert_mask_to_indices(mask, mask.sum())
+        coords = convert_mask_to_indices(mask, self._last_count)
         coords += self.get_global_startindex()[None, :]
         return coords
 
     def select_fcoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
-        coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
+        coords = convert_mask_to_indices(mask, self._last_count).astype("float64")
         coords += 0.5
         coords *= self.dds[None, :]
         coords += self.LeftEdge[None, :]
@@ -471,7 +472,7 @@
     def select_ires(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty(0, dtype='int64')
-        coords = np.empty(mask.sum(), dtype='int64')
+        coords = np.empty(self._last_count, dtype='int64')
         coords[:] = self.Level
         return coords
 
@@ -496,6 +497,10 @@
         else:
             self._last_mask = mask = selector.fill_mask(self)
             self._last_selector_id = id(selector)
+            if mask is None:
+                self._last_count = 0
+            else:
+                self._last_count = mask.sum()
         return mask
 
     def select(self, selector, source, dest, offset):
@@ -508,7 +513,7 @@
     def count(self, selector):
         mask = self._get_selector_mask(selector)
         if mask is None: return 0
-        return mask.sum()
+        return self._last_count
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results


https://bitbucket.org/yt_analysis/yt/commits/ac4accea24c1/
Changeset:   ac4accea24c1
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-03 00:09:53
Summary:     Only plot requested fields.  Closes #529.
Affected #:  1 file

diff -r 8594e3c7ecb9f39175b2013eb0adb5bfda100aa3 -r ac4accea24c18de98bfce8e8529594dbc464d249 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -55,7 +55,7 @@
 from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
-    fix_axis, get_image_suffix
+    ensure_tuple, fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -784,6 +784,7 @@
             self._frb_generator = kwargs.pop("frb_generator")
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
+        self.plot_fields = ensure_list(kwargs.pop("fields"))
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
@@ -816,8 +817,9 @@
             return 0.0, 0.0
         else:
             mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
-                   'or "center" as the last term in origin.').format(self.origin)
+            msg = \
+              ('origin keyword "{0}" not recognized, must declare "domain" '
+               'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -854,13 +856,21 @@
         else:
             fields = self._frb.keys()
         self._colorbar_valid = True
-        for f in self.fields:
+        for f in self.plot_fields:
+            if isinstance(f, tuple):
+                pass
+            else:
+                for ftup in self.fields:
+                    if isinstance(ftup, tuple):
+                        if f == ftup[1]:
+                             f = ftup
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
 
             if self._axes_unit_names is None:
-                unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
+                unit = get_smallest_appropriate_unit(
+                    self.xlim[1] - self.xlim[0], self.pf)
                 (unit_x, unit_y) = (unit, unit)
             else:
                 (unit_x, unit_y) = self._axes_unit_names
@@ -875,15 +885,18 @@
             else:
                 zlim = (None, None)
 
-            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = \
+              (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
 
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             cbar_frac = 0.0
             if plot_aspect > 1.0:
-                size = (self.window_size*(1.+cbar_frac), self.window_size/plot_aspect)
+                size = (self.window_size*(1.+cbar_frac),
+                        self.window_size/plot_aspect)
             else:
-                size = (plot_aspect*self.window_size*(1.+cbar_frac), self.window_size)
+                size = (plot_aspect*self.window_size*(1.+cbar_frac),
+                        self.window_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
@@ -1244,7 +1257,7 @@
         slc = pf.h.slice(axis, center[axis],
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
-        PWViewerMPL.__init__(self, slc, bounds, origin=origin,
+        PWViewerMPL.__init__(self, slc, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1363,8 +1376,9 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source, field_parameters = field_parameters)
-        PWViewerMPL.__init__(self,proj,bounds,origin=origin,
+                         center=center, data_source=data_source,
+                         field_parameters = field_parameters)
+        PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1426,8 +1440,9 @@
         cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True,
-                             fontsize=fontsize)
+        PWViewerMPL.__init__(self, cutting, bounds, fields=fields,
+                             origin='center-window',periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisProjectionDummyDataSource(object):
@@ -1530,18 +1545,23 @@
                  depth=(1, '1'), axes_unit=None, weight_field=None,
                  max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
         fields = ensure_list(fields)[:]
-        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
-        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
-                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
-                                                       le=le, re=re, north_vector=north_vector)
+        width = np.array((bounds[1] - bounds[0],
+                          bounds[3] - bounds[2],
+                          bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(
+            center_rot, pf, normal, width, fields, interpolated,
+            weight=weight_field,  volume=volume, no_ghost=no_ghost,
+            le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self, OffAxisProj, bounds, origin='center-window', periodic=False,
-                             oblique=True, fontsize=fontsize)
+        PWViewerMPL.__init__(
+            self, OffAxisProj, bounds, fields=fields, origin='center-window',
+            periodic=False, oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """


https://bitbucket.org/yt_analysis/yt/commits/1bee0402a19f/
Changeset:   1bee0402a19f
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-03 01:39:56
Summary:     Fixes for failing to_pw() tests.
Affected #:  2 files

diff -r ac4accea24c18de98bfce8e8529594dbc464d249 -r 1bee0402a19fd578a677a29b40e8df1ec632bb7a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -603,14 +603,15 @@
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
         self.fields = [k for k in self.field_data.keys()
-                       if k not in self._container_fields]
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, fields=list(self.fields), origin=origin,
+                         frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw

diff -r ac4accea24c18de98bfce8e8529594dbc464d249 -r 1bee0402a19fd578a677a29b40e8df1ec632bb7a yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -490,14 +490,21 @@
         """
         normal = self.normal
         center = self.center
+        self.fields = [k for k in self.field_data.keys()
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetObliqueWindowParameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        from yt.visualization.fixed_resolution import \
+            ObliqueFixedResolutionBuffer
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_units = units
-        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
-                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw = PWViewerMPL(
+            self, bounds, fields=self.fields, origin='center-window',
+            periodic=False, oblique=True,
+            frb_generator=ObliqueFixedResolutionBuffer,
+            plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)
         return pw
 


https://bitbucket.org/yt_analysis/yt/commits/c5ea3a0ce599/
Changeset:   c5ea3a0ce599
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-03 06:10:45
Summary:     Using determine fields to avoid unecessary tuple disentangling.
Affected #:  1 file

diff -r 1bee0402a19fd578a677a29b40e8df1ec632bb7a -r c5ea3a0ce599d3f1015106e41be734d8bf357a5a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -856,14 +856,7 @@
         else:
             fields = self._frb.keys()
         self._colorbar_valid = True
-        for f in self.plot_fields:
-            if isinstance(f, tuple):
-                pass
-            else:
-                for ftup in self.fields:
-                    if isinstance(ftup, tuple):
-                        if f == ftup[1]:
-                             f = ftup
+        for f in self.data_source._determine_fields(self.plot_fields):
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()


https://bitbucket.org/yt_analysis/yt/commits/22f1ff74887f/
Changeset:   22f1ff74887f
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-03 06:11:21
Summary:     More whitespace fixes.  This whole file now fits inside an 80 character terminal.
Affected #:  1 file

diff -r c5ea3a0ce599d3f1015106e41be734d8bf357a5a -r 22f1ff74887f7070588c443b37b84fbd2c3ec03b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -75,7 +75,8 @@
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+    if version.LooseVersion(matplotlib.__version__) <
+        version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
         from matplotlib.pyparsing_py2 import ParseFatalException
@@ -197,7 +198,8 @@
         width = validate_iterable_width(width)
     else:
         try:
-            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
+            assert isinstance(width, Number), \
+              "width (%s) is invalid" % str(width)
         except AssertionError, e:
             raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
@@ -208,7 +210,8 @@
             assert_valid_width_tuple(depth)
         else:
             try:
-                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+                assert isinstance(depth, Number), \
+                  "width (%s) is invalid" % str(depth)
             except AssertionError, e:
                 raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
@@ -270,7 +273,8 @@
 
     Parameters
     ----------
-    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or
+                  :class:`yt.data_objects.data_containers.AMRSliceBase`
         This is the source to be pixelized, which can be a projection or a
         slice.  (For cutting planes, see
         `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
@@ -294,7 +298,8 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
-                 periodic=True, origin='center-window', oblique=False, window_size=10.0):
+                 periodic=True, origin='center-window', oblique=False,
+                 window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -311,7 +316,8 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
+            center = [self.data_source.center[i] for i in
+                      range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -461,9 +467,10 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
-             Width can have four different formats to support windows with variable
-             x and y widths.  They are:
+        width : float, array of floats, (float, unit) tuple, or tuple of
+                (float, unit) tuples.
+             Width can have four different formats to support windows with
+             variable x and y widths.  They are:
 
              ==================================     =======================
              format                                 example
@@ -474,13 +481,14 @@
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
 
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-             the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-             in code units.  If units are provided the resulting plot axis labels will
-             use the supplied units.
+             For example, (10, 'kpc') requests a plot window that is 10
+             kiloparsecs wide in the x and y directions,
+             ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs
+             wide along the x axis and 15 kiloparsecs wide along the y axis.
+             In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y
+             width of 0.3 in code units.  If units are provided the resulting
+             plot axis labels will use the supplied units.
         unit : str
              the unit the width has been specified in. If width is a tuple, this
              argument is ignored. Defaults to code units.
@@ -734,9 +742,9 @@
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the
-            units, and only show the axes name. If unit_name is a tuple, the first
-            element is assumed to be the unit for the x axis and the second element
-            the unit for the y axis.
+            units, and only show the axes name. If unit_name is a tuple, the
+            first element is assumed to be the unit for the x axis and the
+            second element the unit for the y axis.
 
         Raises
         ------
@@ -881,8 +889,9 @@
             plot_aspect = \
               (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
 
-            # This sets the size of the figure, and defaults to making one of the dimensions smaller.
-            # This should protect against giant images in the case of a very large aspect ratio.
+            # This sets the size of the figure, and defaults to making one of
+            # the dimensions smaller.  This should protect against giant images
+            # in the case of a very large aspect ratio.
             cbar_frac = 0.0
             if plot_aspect > 1.0:
                 size = (self.window_size*(1.+cbar_frac),
@@ -896,7 +905,8 @@
 
             image = self._frb[f]
 
-            if image.max() == image.min() and self._field_transform[f] == log_transform:
+            if image.max() == image.min():
+              if self._field_transform[f] == log_transform:
                 mylog.warning("Plot image for field %s has zero dynamic " \
                               "range. Min = Max = %d." % \
                               (f, image.max()))
@@ -984,8 +994,8 @@
         :py:class:`matplotlib.font_manager.FontProperties`.
 
         Possible keys include
-        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
-          'monospace'.
+        * family - The font family. Can be serif, sans-serif, cursive,
+          fantasy, monospace, or a specific font name.
         * style - The font style. Either normal, italic or oblique.
         * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
           'orange'.
@@ -1134,8 +1144,8 @@
             raise YTNotInsideNotebook
 
     def display(self, name=None, mpl_kwargs=None):
-        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
-        plot will be saved to disk."""
+        """Will attempt to show the plot in in an IPython notebook.  Failing
+        that, the plot will be saved to disk."""
         try:
             return self.show()
         except YTNotInsideNotebook:
@@ -1161,7 +1171,8 @@
          or the axis name itself
     fields : string
          The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1181,12 +1192,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1197,14 +1208,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1221,7 +1233,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1274,7 +1287,8 @@
          or the axis name itself
     fields : string
         The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1294,12 +1308,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1310,14 +1324,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1333,8 +1348,8 @@
          ==================================     ============================
 
     data_source : AMR3DData Object
-         Object to be used for data selection.  Defaults to a region covering the
-         entire simulation.
+         Object to be used for data selection.  Defaults to a region covering
+         the entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1342,7 +1357,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1358,8 +1374,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
-                 field_parameters=None, data_source=None):
+                 weight_field=None, max_level=None, origin='center-window',
+                 fontsize=18, field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1415,7 +1431,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
     """
 
     _plot_type = 'OffAxisSlice'
@@ -1424,7 +1441,8 @@
     def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
@@ -1504,12 +1522,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     depth : A tuple or a float
         A tuple containing the depth to project thourhg and the string
         key of the unit: (width, 'unit').  If set to a float, code units
@@ -1797,13 +1815,15 @@
             self._field_transform[field] = linear_transform
 
 class WindowPlotMPL(ImagePlotMPL):
-    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size,
+                 fontsize):
         fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
         if np.any(np.array(axrect) < 0):
-            mylog.warning('The axis ratio of the requested plot is very narrow.  '
-                          'There is a good chance the plot will not look very good, '
-                          'consider making the plot manually using FixedResolutionBuffer '
-                          'and matplotlib.')
+            msg = 'The axis ratio of the requested plot is very narrow. ' \
+                  'There is a good chance the plot will not look very good, '
+                  'consider making the plot manually using '
+                  'FixedResolutionBuffer and matplotlib.')
+            mylog.warn(msg)
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)


https://bitbucket.org/yt_analysis/yt/commits/fa97d1a3cfe8/
Changeset:   fa97d1a3cfe8
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-07-03 06:48:41
Summary:     Fixing some syntax errors.
Affected #:  1 file

diff -r 22f1ff74887f7070588c443b37b84fbd2c3ec03b -r fa97d1a3cfe850c9a3367afb95c8a203ed226790 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -75,7 +75,7 @@
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    if version.LooseVersion(matplotlib.__version__) <
+    if version.LooseVersion(matplotlib.__version__) < \
         version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
@@ -1820,9 +1820,9 @@
         fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
         if np.any(np.array(axrect) < 0):
             msg = 'The axis ratio of the requested plot is very narrow. ' \
-                  'There is a good chance the plot will not look very good, '
-                  'consider making the plot manually using '
-                  'FixedResolutionBuffer and matplotlib.')
+                  'There is a good chance the plot will not look very good, ' \
+                  'consider making the plot manually using ' \
+                  'FixedResolutionBuffer and matplotlib.'
             mylog.warn(msg)
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)


https://bitbucket.org/yt_analysis/yt/commits/2edc94f8cf08/
Changeset:   2edc94f8cf08
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 14:15:14
Summary:     Merged in ngoldbaum/yt-3.0 (pull request #58)

Only plot requested fields.  Closes #529.
Affected #:  3 files

diff -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -603,14 +603,15 @@
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
         self.fields = [k for k in self.field_data.keys()
-                       if k not in self._container_fields]
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, fields=list(self.fields), origin=origin,
+                         frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw

diff -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -490,14 +490,21 @@
         """
         normal = self.normal
         center = self.center
+        self.fields = [k for k in self.field_data.keys()
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetObliqueWindowParameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        from yt.visualization.fixed_resolution import \
+            ObliqueFixedResolutionBuffer
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_units = units
-        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
-                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw = PWViewerMPL(
+            self, bounds, fields=self.fields, origin='center-window',
+            periodic=False, oblique=True,
+            frb_generator=ObliqueFixedResolutionBuffer,
+            plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)
         return pw
 

diff -r 2d1f7f07cce0c5d44e0092982a48027ad24c0114 -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -55,7 +55,7 @@
 from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
-    fix_axis, get_image_suffix
+    ensure_tuple, fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -75,7 +75,8 @@
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+    if version.LooseVersion(matplotlib.__version__) < \
+        version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
         from matplotlib.pyparsing_py2 import ParseFatalException
@@ -197,7 +198,8 @@
         width = validate_iterable_width(width)
     else:
         try:
-            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
+            assert isinstance(width, Number), \
+              "width (%s) is invalid" % str(width)
         except AssertionError, e:
             raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
@@ -208,7 +210,8 @@
             assert_valid_width_tuple(depth)
         else:
             try:
-                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+                assert isinstance(depth, Number), \
+                  "width (%s) is invalid" % str(depth)
             except AssertionError, e:
                 raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
@@ -270,7 +273,8 @@
 
     Parameters
     ----------
-    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or
+                  :class:`yt.data_objects.data_containers.AMRSliceBase`
         This is the source to be pixelized, which can be a projection or a
         slice.  (For cutting planes, see
         `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
@@ -294,7 +298,8 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
-                 periodic=True, origin='center-window', oblique=False, window_size=10.0):
+                 periodic=True, origin='center-window', oblique=False,
+                 window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -311,7 +316,8 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
+            center = [self.data_source.center[i] for i in
+                      range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -461,9 +467,10 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
-             Width can have four different formats to support windows with variable
-             x and y widths.  They are:
+        width : float, array of floats, (float, unit) tuple, or tuple of
+                (float, unit) tuples.
+             Width can have four different formats to support windows with
+             variable x and y widths.  They are:
 
              ==================================     =======================
              format                                 example
@@ -474,13 +481,14 @@
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
 
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-             the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-             in code units.  If units are provided the resulting plot axis labels will
-             use the supplied units.
+             For example, (10, 'kpc') requests a plot window that is 10
+             kiloparsecs wide in the x and y directions,
+             ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs
+             wide along the x axis and 15 kiloparsecs wide along the y axis.
+             In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y
+             width of 0.3 in code units.  If units are provided the resulting
+             plot axis labels will use the supplied units.
         unit : str
              the unit the width has been specified in. If width is a tuple, this
              argument is ignored. Defaults to code units.
@@ -734,9 +742,9 @@
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the
-            units, and only show the axes name. If unit_name is a tuple, the first
-            element is assumed to be the unit for the x axis and the second element
-            the unit for the y axis.
+            units, and only show the axes name. If unit_name is a tuple, the
+            first element is assumed to be the unit for the x axis and the
+            second element the unit for the y axis.
 
         Raises
         ------
@@ -784,6 +792,7 @@
             self._frb_generator = kwargs.pop("frb_generator")
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
+        self.plot_fields = ensure_list(kwargs.pop("fields"))
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
@@ -816,8 +825,9 @@
             return 0.0, 0.0
         else:
             mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
-                   'or "center" as the last term in origin.').format(self.origin)
+            msg = \
+              ('origin keyword "{0}" not recognized, must declare "domain" '
+               'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -854,13 +864,14 @@
         else:
             fields = self._frb.keys()
         self._colorbar_valid = True
-        for f in self.fields:
+        for f in self.data_source._determine_fields(self.plot_fields):
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
 
             if self._axes_unit_names is None:
-                unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
+                unit = get_smallest_appropriate_unit(
+                    self.xlim[1] - self.xlim[0], self.pf)
                 (unit_x, unit_y) = (unit, unit)
             else:
                 (unit_x, unit_y) = self._axes_unit_names
@@ -875,22 +886,27 @@
             else:
                 zlim = (None, None)
 
-            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = \
+              (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
 
-            # This sets the size of the figure, and defaults to making one of the dimensions smaller.
-            # This should protect against giant images in the case of a very large aspect ratio.
+            # This sets the size of the figure, and defaults to making one of
+            # the dimensions smaller.  This should protect against giant images
+            # in the case of a very large aspect ratio.
             cbar_frac = 0.0
             if plot_aspect > 1.0:
-                size = (self.window_size*(1.+cbar_frac), self.window_size/plot_aspect)
+                size = (self.window_size*(1.+cbar_frac),
+                        self.window_size/plot_aspect)
             else:
-                size = (plot_aspect*self.window_size*(1.+cbar_frac), self.window_size)
+                size = (plot_aspect*self.window_size*(1.+cbar_frac),
+                        self.window_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
 
             image = self._frb[f]
 
-            if image.max() == image.min() and self._field_transform[f] == log_transform:
+            if image.max() == image.min():
+              if self._field_transform[f] == log_transform:
                 mylog.warning("Plot image for field %s has zero dynamic " \
                               "range. Min = Max = %d." % \
                               (f, image.max()))
@@ -978,8 +994,8 @@
         :py:class:`matplotlib.font_manager.FontProperties`.
 
         Possible keys include
-        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
-          'monospace'.
+        * family - The font family. Can be serif, sans-serif, cursive,
+          fantasy, monospace, or a specific font name.
         * style - The font style. Either normal, italic or oblique.
         * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
           'orange'.
@@ -1128,8 +1144,8 @@
             raise YTNotInsideNotebook
 
     def display(self, name=None, mpl_kwargs=None):
-        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
-        plot will be saved to disk."""
+        """Will attempt to show the plot in in an IPython notebook.  Failing
+        that, the plot will be saved to disk."""
         try:
             return self.show()
         except YTNotInsideNotebook:
@@ -1155,7 +1171,8 @@
          or the axis name itself
     fields : string
          The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1175,12 +1192,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1191,14 +1208,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1215,7 +1233,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1244,7 +1263,7 @@
         slc = pf.h.slice(axis, center[axis],
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
-        PWViewerMPL.__init__(self, slc, bounds, origin=origin,
+        PWViewerMPL.__init__(self, slc, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1268,7 +1287,8 @@
          or the axis name itself
     fields : string
         The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1288,12 +1308,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1304,14 +1324,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1327,8 +1348,8 @@
          ==================================     ============================
 
     data_source : AMR3DData Object
-         Object to be used for data selection.  Defaults to a region covering the
-         entire simulation.
+         Object to be used for data selection.  Defaults to a region covering
+         the entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1336,7 +1357,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1352,8 +1374,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
-                 field_parameters=None, data_source=None):
+                 weight_field=None, max_level=None, origin='center-window',
+                 fontsize=18, field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1363,8 +1385,9 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source, field_parameters = field_parameters)
-        PWViewerMPL.__init__(self,proj,bounds,origin=origin,
+                         center=center, data_source=data_source,
+                         field_parameters = field_parameters)
+        PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1408,7 +1431,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
     """
 
     _plot_type = 'OffAxisSlice'
@@ -1417,7 +1441,8 @@
     def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
@@ -1426,8 +1451,9 @@
         cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True,
-                             fontsize=fontsize)
+        PWViewerMPL.__init__(self, cutting, bounds, fields=fields,
+                             origin='center-window',periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisProjectionDummyDataSource(object):
@@ -1496,12 +1522,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     depth : A tuple or a float
         A tuple containing the depth to project thourhg and the string
         key of the unit: (width, 'unit').  If set to a float, code units
@@ -1530,18 +1556,23 @@
                  depth=(1, '1'), axes_unit=None, weight_field=None,
                  max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
         fields = ensure_list(fields)[:]
-        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
-        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
-                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
-                                                       le=le, re=re, north_vector=north_vector)
+        width = np.array((bounds[1] - bounds[0],
+                          bounds[3] - bounds[2],
+                          bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(
+            center_rot, pf, normal, width, fields, interpolated,
+            weight=weight_field,  volume=volume, no_ghost=no_ghost,
+            le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self, OffAxisProj, bounds, origin='center-window', periodic=False,
-                             oblique=True, fontsize=fontsize)
+        PWViewerMPL.__init__(
+            self, OffAxisProj, bounds, fields=fields, origin='center-window',
+            periodic=False, oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """
@@ -1784,13 +1815,15 @@
             self._field_transform[field] = linear_transform
 
 class WindowPlotMPL(ImagePlotMPL):
-    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size,
+                 fontsize):
         fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
         if np.any(np.array(axrect) < 0):
-            mylog.warning('The axis ratio of the requested plot is very narrow.  '
-                          'There is a good chance the plot will not look very good, '
-                          'consider making the plot manually using FixedResolutionBuffer '
-                          'and matplotlib.')
+            msg = 'The axis ratio of the requested plot is very narrow. ' \
+                  'There is a good chance the plot will not look very good, ' \
+                  'consider making the plot manually using ' \
+                  'FixedResolutionBuffer and matplotlib.'
+            mylog.warn(msg)
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)


https://bitbucket.org/yt_analysis/yt/commits/3deb823bc6b8/
Changeset:   3deb823bc6b8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 20:56:52
Summary:     Adding an arbitrary particle loader, load_particles.

Example: http://paste.yt-project.org/show/3664/
Affected #:  3 files

diff -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 -r 3deb823bc6b8beb1cfdaec903d7aa93759c2aa7a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -34,6 +34,8 @@
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.geometry.particle_geometry_handler import \
+    ParticleGeometryHandler
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
@@ -47,6 +49,8 @@
     mpc_conversion, sec_conversion
 from yt.utilities.flagging_methods import \
     FlaggingGrid
+from yt.frontends.sph.data_structures import \
+    ParticleFile
 
 from .fields import \
     StreamFieldInfo, \
@@ -704,3 +708,122 @@
         assign_particle_data(pf, pdata)
     
     return pf
+
+class StreamParticleGeometryHandler(ParticleGeometryHandler):
+
+    
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+class StreamParticleFile(ParticleFile):
+    pass
+
+class StreamParticlesStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamParticleGeometryHandler
+    _file_class = StreamParticleFile
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_particles"
+    file_count = 1
+    filename_template = "stream_file"
+
+def load_particles(data, sim_unit_to_cm, bbox=None,
+                      sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a set of particles into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+
+    This should allow a collection of particle data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        Particles positions must be named "particle_position_x",
+        "particle_position_y", "particle_position_z".
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    Examples
+    --------
+
+    >>> pos = [np.random.random(128*128*128) for i in range(3)]
+    >>> data = dict(particle_position_x = pos[0],
+    ...             particle_position_y = pos[1],
+    ...             particle_position_z = pos[2])
+    >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
+    >>> pf = load_particles(data, 3.08e24, bbox=bbox)
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'stream_file':data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "ParticleData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamParticlesStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 -r 3deb823bc6b8beb1cfdaec903d7aa93759c2aa7a yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,9 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
 
 KnownStreamFields = FieldInfoContainer()
 add_stream_field = KnownStreamFields.add_field
@@ -69,3 +72,9 @@
 
 add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
           particle_type=True)
+
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
+                          StreamFieldInfo)
+particle_deposition_functions("all", "Coordinates", "ParticleMass",
+                               StreamFieldInfo)

diff -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 -r 3deb823bc6b8beb1cfdaec903d7aa93759c2aa7a yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -32,6 +32,8 @@
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.exceptions import *
 
 class IOHandlerStream(BaseIOHandler):
 
@@ -127,3 +129,83 @@
     def _read_exception(self):
         return KeyError
 
+class StreamParticleIOHandler(BaseIOHandler):
+
+    _data_style = "stream_particles"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        # We first need a set of masks for each particle type
+        ptf = defaultdict(list)
+        psize = defaultdict(lambda: 0)
+        chunks = list(chunks)
+        for ftype, fname in fields:
+            ptf[ftype].append(fname)
+        # For this type of file, we actually have something slightly different.
+        # We are given a list of ParticleDataChunks, which is composed of
+        # individual ParticleOctreeSubsets.  The data_files attribute on these
+        # may in fact overlap.  So we will iterate over a union of all the
+        # data_files.
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                psize[ptype] += selector.count_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+        # Now we have all the sizes, and we can allocate
+        ind = {}
+        for field in fields:
+            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
+            rv[field] = np.empty(psize[field[0]], dtype="float64")
+            ind[field] = 0
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                mask = selector.select_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+                if mask is None: continue
+                for field in field_list:
+                    data = f[field][mask,...]
+                    my_ind = ind[ptype, field]
+                    mylog.debug("Filling from %s to %s with %s",
+                        my_ind, my_ind+data.shape[0], field)
+                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+                    ind[ptype, field] += data.shape[0]
+        return rv
+
+    def _initialize_index(self, data_file, regions):
+        # self.fields[g.id][fname] is the pattern here
+        pos = np.column_stack(self.fields[data_file.filename][
+                              "particle_position_%s" % ax] for ax in 'xyz')
+        if np.any(pos.min(axis=0) <= data_file.pf.domain_left_edge) or \
+           np.any(pos.max(axis=0) >= data_file.pf.domain_right_edge):
+            raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                   data_file.pf.domain_left_edge,
+                                   data_file.pf.domain_right_edge)
+        regions.add_data_file(pos, data_file.file_id)
+        morton = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        npart = self.fields[data_file.filename]["particle_position_x"].size
+        return {'all': npart}
+
+    def _identify_fields(self, data_file):
+        return [ ("all", k) for k in self.fields[data_file.filename].keys()]


https://bitbucket.org/yt_analysis/yt/commits/db8b43782c42/
Changeset:   db8b43782c42
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-03 23:33:33
Summary:     Particle deposition needs to have fortran-ordered arrays.
Affected #:  1 file

diff -r 2edc94f8cf0819f4eaede36d7dc2dc0556982e04 -r db8b43782c42ae18cb27aae2fa22295486898e38 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -271,15 +271,15 @@
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64")
+        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray omkarr= self.omk
         self.mk= <np.float64_t*> omkarr.data
         # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64")
+        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
         # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64")
+        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oiarr = self.oi
         self.i = <np.float64_t*> oiarr.data
 
@@ -368,11 +368,11 @@
     cdef np.float64_t *w
     cdef public object ow
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64')
+        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray wfarr = self.owf
         self.wf = <np.float64_t*> wfarr.data
         
-        self.ow = np.zeros(self.nvals, dtype='float64')
+        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     


https://bitbucket.org/yt_analysis/yt/commits/ce576a114f13/
Changeset:   ce576a114f13
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-08 14:56:00
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #61)

Particle deposition needs to have fortran-ordered arrays.
Affected #:  1 file

diff -r 3deb823bc6b8beb1cfdaec903d7aa93759c2aa7a -r ce576a114f133a73ea16ae6cc7c705bf444f8833 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -271,15 +271,15 @@
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64")
+        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray omkarr= self.omk
         self.mk= <np.float64_t*> omkarr.data
         # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64")
+        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
         # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64")
+        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oiarr = self.oi
         self.i = <np.float64_t*> oiarr.data
 
@@ -368,11 +368,11 @@
     cdef np.float64_t *w
     cdef public object ow
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64')
+        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray wfarr = self.owf
         self.wf = <np.float64_t*> wfarr.data
         
-        self.ow = np.zeros(self.nvals, dtype='float64')
+        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     


https://bitbucket.org/yt_analysis/yt/commits/bd8062a43ab5/
Changeset:   bd8062a43ab5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-08 15:24:03
Summary:     Merging with tip
Affected #:  10 files

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -617,14 +617,15 @@
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
         self.fields = [k for k in self.field_data.keys()
-                       if k not in self._container_fields]
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, fields=list(self.fields), origin=origin,
+                         frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -490,14 +490,21 @@
         """
         normal = self.normal
         center = self.center
+        self.fields = [k for k in self.field_data.keys()
+                       if k not in self._key_fields]
         from yt.visualization.plot_window import \
             GetObliqueWindowParameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        from yt.visualization.fixed_resolution import \
+            ObliqueFixedResolutionBuffer
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_units = units
-        pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
-                         frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+        pw = PWViewerMPL(
+            self, bounds, fields=self.fields, origin='center-window',
+            periodic=False, oblique=True,
+            frb_generator=ObliqueFixedResolutionBuffer,
+            plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)
         return pw
 

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -62,6 +62,7 @@
     geometry = "cartesian"
     coordinates = None
     max_level = 99
+    storage_filename = None
     _particle_mass_name = None
     _particle_coordinates_name = None
 

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,14 +212,6 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_io(self, dobj, cache = True):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        # We'll take the max of 128 and the number of processors
-        nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
-        for gs in list_chunks(gobjs, nl):
-            yield YTDataChunk(dobj, "io", gs, self._count_selection,
-                              cache = cache)
-
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
     _fieldinfo_fallback = FLASHFieldInfo

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,6 +92,6 @@
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
+                    data = ds[g.id - g._id_offset,:,:,:].transpose()
                     ind += g.select(selector, data, rv[field], ind) # caches
         return rv

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -34,6 +34,8 @@
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.geometry.particle_geometry_handler import \
+    ParticleGeometryHandler
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
@@ -47,6 +49,8 @@
     mpc_conversion, sec_conversion
 from yt.utilities.flagging_methods import \
     FlaggingGrid
+from yt.frontends.sph.data_structures import \
+    ParticleFile
 
 from .fields import \
     StreamFieldInfo, \
@@ -704,3 +708,122 @@
         assign_particle_data(pf, pdata)
     
     return pf
+
+class StreamParticleGeometryHandler(ParticleGeometryHandler):
+
+    
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+class StreamParticleFile(ParticleFile):
+    pass
+
+class StreamParticlesStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamParticleGeometryHandler
+    _file_class = StreamParticleFile
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_particles"
+    file_count = 1
+    filename_template = "stream_file"
+
+def load_particles(data, sim_unit_to_cm, bbox=None,
+                      sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a set of particles into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+
+    This should allow a collection of particle data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        Particles positions must be named "particle_position_x",
+        "particle_position_y", "particle_position_z".
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    Examples
+    --------
+
+    >>> pos = [np.random.random(128*128*128) for i in range(3)]
+    >>> data = dict(particle_position_x = pos[0],
+    ...             particle_position_y = pos[1],
+    ...             particle_position_z = pos[2])
+    >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
+    >>> pf = load_particles(data, 3.08e24, bbox=bbox)
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'stream_file':data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "ParticleData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamParticlesStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,9 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+    particle_deposition_functions, \
+    particle_vector_functions
 
 KnownStreamFields = FieldInfoContainer()
 add_stream_field = KnownStreamFields.add_field
@@ -69,3 +72,9 @@
 
 add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
           particle_type=True)
+
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
+                          StreamFieldInfo)
+particle_deposition_functions("all", "Coordinates", "ParticleMass",
+                               StreamFieldInfo)

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -32,6 +32,8 @@
 from yt.utilities.io_handler import \
     BaseIOHandler, _axis_ids
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.exceptions import *
 
 class IOHandlerStream(BaseIOHandler):
 
@@ -127,3 +129,83 @@
     def _read_exception(self):
         return KeyError
 
+class StreamParticleIOHandler(BaseIOHandler):
+
+    _data_style = "stream_particles"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        # We first need a set of masks for each particle type
+        ptf = defaultdict(list)
+        psize = defaultdict(lambda: 0)
+        chunks = list(chunks)
+        for ftype, fname in fields:
+            ptf[ftype].append(fname)
+        # For this type of file, we actually have something slightly different.
+        # We are given a list of ParticleDataChunks, which is composed of
+        # individual ParticleOctreeSubsets.  The data_files attribute on these
+        # may in fact overlap.  So we will iterate over a union of all the
+        # data_files.
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                psize[ptype] += selector.count_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+        # Now we have all the sizes, and we can allocate
+        ind = {}
+        for field in fields:
+            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
+            rv[field] = np.empty(psize[field[0]], dtype="float64")
+            ind[field] = 0
+        for data_file in data_files:
+            f = self.fields[data_file.filename]
+            for ptype, field_list in sorted(ptf.items()):
+                assert(ptype == "all")
+                mask = selector.select_points(
+                        f["particle_position_x"],
+                        f["particle_position_y"],
+                        f["particle_position_z"])
+                if mask is None: continue
+                for field in field_list:
+                    data = f[field][mask,...]
+                    my_ind = ind[ptype, field]
+                    mylog.debug("Filling from %s to %s with %s",
+                        my_ind, my_ind+data.shape[0], field)
+                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+                    ind[ptype, field] += data.shape[0]
+        return rv
+
+    def _initialize_index(self, data_file, regions):
+        # self.fields[g.id][fname] is the pattern here
+        pos = np.column_stack(self.fields[data_file.filename][
+                              "particle_position_%s" % ax] for ax in 'xyz')
+        if np.any(pos.min(axis=0) <= data_file.pf.domain_left_edge) or \
+           np.any(pos.max(axis=0) >= data_file.pf.domain_right_edge):
+            raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                   data_file.pf.domain_left_edge,
+                                   data_file.pf.domain_right_edge)
+        regions.add_data_file(pos, data_file.file_id)
+        morton = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        npart = self.fields[data_file.filename]["particle_position_x"].size
+        return {'all': npart}
+
+    def _identify_fields(self, data_file):
+        return [ ("all", k) for k in self.fields[data_file.filename].keys()]

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -271,15 +271,15 @@
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64")
+        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray omkarr= self.omk
         self.mk= <np.float64_t*> omkarr.data
         # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64")
+        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
         # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64")
+        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
         cdef np.ndarray oiarr = self.oi
         self.i = <np.float64_t*> oiarr.data
 
@@ -368,11 +368,11 @@
     cdef np.float64_t *w
     cdef public object ow
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64')
+        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray wfarr = self.owf
         self.wf = <np.float64_t*> wfarr.data
         
-        self.ow = np.zeros(self.nvals, dtype='float64')
+        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
         cdef np.ndarray warr = self.ow
         self.w = <np.float64_t*> warr.data
     

diff -r 8482acd3f0f05dfb409517dc6ed81fca1b63397f -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -55,7 +55,7 @@
 from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
-    fix_axis, get_image_suffix
+    ensure_tuple, fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -75,7 +75,8 @@
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+    if version.LooseVersion(matplotlib.__version__) < \
+        version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
         from matplotlib.pyparsing_py2 import ParseFatalException
@@ -197,7 +198,8 @@
         width = validate_iterable_width(width)
     else:
         try:
-            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
+            assert isinstance(width, Number), \
+              "width (%s) is invalid" % str(width)
         except AssertionError, e:
             raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
@@ -208,7 +210,8 @@
             assert_valid_width_tuple(depth)
         else:
             try:
-                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+                assert isinstance(depth, Number), \
+                  "width (%s) is invalid" % str(depth)
             except AssertionError, e:
                 raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
@@ -270,7 +273,8 @@
 
     Parameters
     ----------
-    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or
+                  :class:`yt.data_objects.data_containers.AMRSliceBase`
         This is the source to be pixelized, which can be a projection or a
         slice.  (For cutting planes, see
         `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
@@ -294,7 +298,8 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
-                 periodic=True, origin='center-window', oblique=False, window_size=10.0):
+                 periodic=True, origin='center-window', oblique=False,
+                 window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -311,7 +316,8 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
+            center = [self.data_source.center[i] for i in
+                      range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -461,9 +467,10 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
-             Width can have four different formats to support windows with variable
-             x and y widths.  They are:
+        width : float, array of floats, (float, unit) tuple, or tuple of
+                (float, unit) tuples.
+             Width can have four different formats to support windows with
+             variable x and y widths.  They are:
 
              ==================================     =======================
              format                                 example
@@ -474,13 +481,14 @@
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
 
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-             the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-             in code units.  If units are provided the resulting plot axis labels will
-             use the supplied units.
+             For example, (10, 'kpc') requests a plot window that is 10
+             kiloparsecs wide in the x and y directions,
+             ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs
+             wide along the x axis and 15 kiloparsecs wide along the y axis.
+             In the other two examples, code units are assumed, for example
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y
+             width of 0.3 in code units.  If units are provided the resulting
+             plot axis labels will use the supplied units.
         unit : str
              the unit the width has been specified in. If width is a tuple, this
              argument is ignored. Defaults to code units.
@@ -734,9 +742,9 @@
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the
-            units, and only show the axes name. If unit_name is a tuple, the first
-            element is assumed to be the unit for the x axis and the second element
-            the unit for the y axis.
+            units, and only show the axes name. If unit_name is a tuple, the
+            first element is assumed to be the unit for the x axis and the
+            second element the unit for the y axis.
 
         Raises
         ------
@@ -784,6 +792,7 @@
             self._frb_generator = kwargs.pop("frb_generator")
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
+        self.plot_fields = ensure_list(kwargs.pop("fields"))
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
@@ -816,8 +825,9 @@
             return 0.0, 0.0
         else:
             mylog.warn("origin = {0}".format(origin))
-            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
-                   'or "center" as the last term in origin.').format(self.origin)
+            msg = \
+              ('origin keyword "{0}" not recognized, must declare "domain" '
+               'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -854,13 +864,14 @@
         else:
             fields = self._frb.keys()
         self._colorbar_valid = True
-        for f in self.fields:
+        for f in self.data_source._determine_fields(self.plot_fields):
             axis_index = self.data_source.axis
 
             xc, yc = self._setup_origin()
 
             if self._axes_unit_names is None:
-                unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
+                unit = get_smallest_appropriate_unit(
+                    self.xlim[1] - self.xlim[0], self.pf)
                 (unit_x, unit_y) = (unit, unit)
             else:
                 (unit_x, unit_y) = self._axes_unit_names
@@ -875,22 +886,27 @@
             else:
                 zlim = (None, None)
 
-            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            plot_aspect = \
+              (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
 
-            # This sets the size of the figure, and defaults to making one of the dimensions smaller.
-            # This should protect against giant images in the case of a very large aspect ratio.
+            # This sets the size of the figure, and defaults to making one of
+            # the dimensions smaller.  This should protect against giant images
+            # in the case of a very large aspect ratio.
             cbar_frac = 0.0
             if plot_aspect > 1.0:
-                size = (self.window_size*(1.+cbar_frac), self.window_size/plot_aspect)
+                size = (self.window_size*(1.+cbar_frac),
+                        self.window_size/plot_aspect)
             else:
-                size = (plot_aspect*self.window_size*(1.+cbar_frac), self.window_size)
+                size = (plot_aspect*self.window_size*(1.+cbar_frac),
+                        self.window_size)
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
 
             image = self._frb[f]
 
-            if image.max() == image.min() and self._field_transform[f] == log_transform:
+            if image.max() == image.min():
+              if self._field_transform[f] == log_transform:
                 mylog.warning("Plot image for field %s has zero dynamic " \
                               "range. Min = Max = %d." % \
                               (f, image.max()))
@@ -978,8 +994,8 @@
         :py:class:`matplotlib.font_manager.FontProperties`.
 
         Possible keys include
-        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
-          'monospace'.
+        * family - The font family. Can be serif, sans-serif, cursive,
+          fantasy, monospace, or a specific font name.
         * style - The font style. Either normal, italic or oblique.
         * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
           'orange'.
@@ -1128,8 +1144,8 @@
             raise YTNotInsideNotebook
 
     def display(self, name=None, mpl_kwargs=None):
-        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
-        plot will be saved to disk."""
+        """Will attempt to show the plot in in an IPython notebook.  Failing
+        that, the plot will be saved to disk."""
         try:
             return self.show()
         except YTNotInsideNotebook:
@@ -1155,7 +1171,8 @@
          or the axis name itself
     fields : string
          The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1175,12 +1192,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1191,14 +1208,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1215,7 +1233,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1244,7 +1263,7 @@
         slc = pf.h.slice(axis, center[axis],
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
-        PWViewerMPL.__init__(self, slc, bounds, origin=origin,
+        PWViewerMPL.__init__(self, slc, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1268,7 +1287,8 @@
          or the axis name itself
     fields : string
         The name of the field(s) to be plotted.
-    center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
+    center : two or three-element vector of sequence floats, 'c', or 'center',
+             or 'max'
          The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
@@ -1288,12 +1308,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     axes_unit : A string
          The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
@@ -1304,14 +1324,15 @@
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
          The second index is the x-location, given as 'left', 'right', or
-         'center'.  Finally, the whether the origin is applied in 'domain' space,
-         plot 'window' space or 'native' simulation coordinate system is given.
-         For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
-         both place the origin in the upper right hand corner of domain space.
-         If x or y are not given, a value is inffered.  For instance, 'left-domain'
-         corresponds to the lower-left hand corner of the simulation domain,
-         'center-domain' corresponds to the center of the simulation domain,
-         or 'center-window' for the center of the plot window. Further examples:
+         'center'.  Finally, the whether the origin is applied in 'domain'
+         space, plot 'window' space or 'native' simulation coordinate system
+         is given. For example, both 'upper-right-domain' and ['upper',
+         'right', 'domain'] both place the origin in the upper right hand
+         corner of domain space. If x or y are not given, a value is inffered.
+         For instance, 'left-domain' corresponds to the lower-left hand corner
+         of the simulation domain, 'center-domain' corresponds to the center
+         of the simulation domain, or 'center-window' for the center of the
+         plot window. Further examples:
 
          ==================================     ============================
          format                                 example
@@ -1327,8 +1348,8 @@
          ==================================     ============================
 
     data_source : AMR3DData Object
-         Object to be used for data selection.  Defaults to a region covering the
-         entire simulation.
+         Object to be used for data selection.  Defaults to a region covering
+         the entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1336,7 +1357,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
@@ -1352,8 +1374,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
-                 field_parameters=None, data_source=None):
+                 weight_field=None, max_level=None, origin='center-window',
+                 fontsize=18, field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1363,8 +1385,9 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source, field_parameters = field_parameters)
-        PWViewerMPL.__init__(self,proj,bounds,origin=origin,
+                         center=center, data_source=data_source,
+                         field_parameters = field_parameters)
+        PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
@@ -1408,7 +1431,8 @@
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
-         A dictionary of field parameters than can be accessed by derived fields.
+         A dictionary of field parameters than can be accessed by derived
+         fields.
     """
 
     _plot_type = 'OffAxisSlice'
@@ -1417,7 +1441,8 @@
     def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
@@ -1426,8 +1451,9 @@
         cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True,
-                             fontsize=fontsize)
+        PWViewerMPL.__init__(self, cutting, bounds, fields=fields,
+                             origin='center-window',periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisProjectionDummyDataSource(object):
@@ -1496,12 +1522,12 @@
          ==================================     =======================
 
          For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
-         the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
-         in code units.  If units are provided the resulting plot axis labels will
-         use the supplied units.
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+         window that is 10 kiloparsecs wide along the x axis and 15
+         kiloparsecs wide along the y axis.  In the other two examples, code
+         units are assumed, for example (0.2, 0.3) requests a plot that has an
+         x width of 0.2 and a y width of 0.3 in code units.  If units are
+         provided the resulting plot axis labels will use the supplied units.
     depth : A tuple or a float
         A tuple containing the depth to project thourhg and the string
         key of the unit: (width, 'unit').  If set to a float, code units
@@ -1530,18 +1556,23 @@
                  depth=(1, '1'), axes_unit=None, weight_field=None,
                  max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
-        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        (bounds, center_rot, units) = \
+          GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
         fields = ensure_list(fields)[:]
-        width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
-        OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
-                                                       weight=weight_field,  volume=volume, no_ghost=no_ghost,
-                                                       le=le, re=re, north_vector=north_vector)
+        width = np.array((bounds[1] - bounds[0],
+                          bounds[3] - bounds[2],
+                          bounds[5] - bounds[4]))
+        OffAxisProj = OffAxisProjectionDummyDataSource(
+            center_rot, pf, normal, width, fields, interpolated,
+            weight=weight_field,  volume=volume, no_ghost=no_ghost,
+            le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self, OffAxisProj, bounds, origin='center-window', periodic=False,
-                             oblique=True, fontsize=fontsize)
+        PWViewerMPL.__init__(
+            self, OffAxisProj, bounds, fields=fields, origin='center-window',
+            periodic=False, oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """
@@ -1784,13 +1815,15 @@
             self._field_transform[field] = linear_transform
 
 class WindowPlotMPL(ImagePlotMPL):
-    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size, fontsize):
+    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size,
+                 fontsize):
         fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
         if np.any(np.array(axrect) < 0):
-            mylog.warning('The axis ratio of the requested plot is very narrow.  '
-                          'There is a good chance the plot will not look very good, '
-                          'consider making the plot manually using FixedResolutionBuffer '
-                          'and matplotlib.')
+            msg = 'The axis ratio of the requested plot is very narrow. ' \
+                  'There is a good chance the plot will not look very good, ' \
+                  'consider making the plot manually using ' \
+                  'FixedResolutionBuffer and matplotlib.'
+            mylog.warn(msg)
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)


https://bitbucket.org/yt_analysis/yt/commits/405f1633aafa/
Changeset:   405f1633aafa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-05 16:54:57
Summary:     Adding boxlen to RAMSES units for mass and length.
Affected #:  1 file

diff -r 3deb823bc6b8beb1cfdaec903d7aa93759c2aa7a -r 405f1633aafae077ee8984a1e71357831ac02943 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -393,11 +393,16 @@
         self.conversion_factors["y-velocity"] = vel_u
         self.conversion_factors["z-velocity"] = vel_u
         # Necessary to get the length units in, which are needed for Mass
-        self.conversion_factors['mass'] = rho_u * self.parameters['unit_l']**3
+        # We also have to multiply by the boxlength here to scale into our
+        # domain.
+        self.conversion_factors['mass'] = rho_u * \
+                self.parameters['unit_l']**3 * self.parameters['boxlen']
 
     def _setup_nounits_units(self):
         # Note that unit_l *already* converts to proper!
-        unit_l = self.parameters['unit_l']
+        # Also note that unit_l must be multiplied by the boxlen parameter to
+        # ensure we are correctly set up for the current domain.
+        unit_l = self.parameters['unit_l'] * self.parameters['boxlen']
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant


https://bitbucket.org/yt_analysis/yt/commits/dfdb820a7883/
Changeset:   dfdb820a7883
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-05 17:06:56
Summary:     Adding parenthesis for the mass factor.  Now (unit_l*boxlen)**3
Affected #:  1 file

diff -r 405f1633aafae077ee8984a1e71357831ac02943 -r dfdb820a788336a926ce25d1d686cbb304dc3066 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -396,7 +396,7 @@
         # We also have to multiply by the boxlength here to scale into our
         # domain.
         self.conversion_factors['mass'] = rho_u * \
-                self.parameters['unit_l']**3 * self.parameters['boxlen']
+                (self.parameters['unit_l'] * self.parameters['boxlen'])**3
 
     def _setup_nounits_units(self):
         # Note that unit_l *already* converts to proper!


https://bitbucket.org/yt_analysis/yt/commits/d397b2e138de/
Changeset:   d397b2e138de
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-11 16:08:49
Summary:     Make Enzo somewhat more forgiving and flexible about particle field names.
Affected #:  1 file

diff -r bd8062a43ab5dca8087c78bace4fc5eb102ac0a8 -r d397b2e138de6eb61d0a5d1d21faae041558eb1c yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,7 +36,9 @@
 import numpy as np
 from yt.funcs import *
 
-_convert_mass = ("particle_mass",)
+_convert_mass = ("particle_mass","mass")
+
+_particle_position_names = {}
 
 class IOHandlerPackedHDF5(BaseIOHandler):
 
@@ -56,7 +58,8 @@
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
         if len(ptypes) > 1: raise NotImplementedError
-        pfields = [(ptypes[0], "particle_position_%s" % ax) for ax in 'xyz']
+        pn = _particle_position_names.get(ptypes[0], r"particle_position_%s")
+        pfields = [(ptypes[0], pn % ax) for ax in 'xyz']
         size = 0
         for chunk in chunks:
             data = self._read_chunk_data(chunk, pfields, 'active', 
@@ -83,7 +86,7 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
-                    if fname == "particle_mass":
+                    if fname in _convert_mass:
                         gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size
@@ -134,7 +137,7 @@
                 for field in set(fields):
                     ftype, fname = field
                     gdata = data[g.id].pop(fname)[mask]
-                    if fname == "particle_mass":
+                    if fname in _convert_mass:
                         gdata *= g.dds.prod()
                     rv[field][ind:ind+gdata.size] = gdata
                 ind += gdata.size

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list