[yt-svn] commit/yt: 8 new changesets

Bitbucket commits-noreply at bitbucket.org
Sat Sep 8 10:19:41 PDT 2012


8 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/45f9ac33caf2/
changeset:   45f9ac33caf2
branch:      yt
user:        bcrosby
date:        2012-09-05 22:44:10
summary:     Changes to the sorting of halos in mergertree.py to improve performance.
affected #:  1 file

diff -r 2825fd89deeba490c2d5dfc1c0200ed5493f0a1f -r 45f9ac33caf22fe5caca8cd255fdeaa7a67469b9 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -450,9 +450,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,19 +460,22 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(thisIDs.size,
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
+
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = na.array([], dtype='int32')
+                parent_masses = na.array([], dtype='int32')
+                parent_halos = na.array([], dtype='int32')
+            else:
+                parent_IDs = na.concatenate(parent_IDs)
+                parent_masses = na.concatenate(parent_masses)
+                parent_halos = na.concatenate(parent_halos)
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
@@ -482,30 +485,33 @@
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
-        for i,cname in enumerate(child_names):
+        child_IDs = []
+        child_masses = []
+        child_halos = []
+        for i,pname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
-                h5fp = h5py.File(cname)
+                h5fp = h5py.File(pname)
                 for group in h5fp:
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(thisIDs.size,
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
-        
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
+
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = na.array([], dtype='int32')
+            child_masses = na.array([], dtype='int32')
+            child_halos = na.array([], dtype='int32')
+        else:
+            child_IDs = na.concatenate(child_IDs)
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
         child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,



https://bitbucket.org/yt_analysis/yt/changeset/16388c698349/
changeset:   16388c698349
branch:      yt
user:        bcrosby
date:        2012-09-05 23:06:50
summary:     Fixing pname and cname typo. Only for consistency, code function isn't affected.
affected #:  1 file

diff -r 45f9ac33caf22fe5caca8cd255fdeaa7a67469b9 -r 16388c69834985b99e28f1b68cac859e7ad7f31c yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -488,9 +488,9 @@
         child_IDs = []
         child_masses = []
         child_halos = []
-        for i,pname in enumerate(child_names):
+        for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
-                h5fp = h5py.File(pname)
+                h5fp = h5py.File(cname)
                 for group in h5fp:
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]



https://bitbucket.org/yt_analysis/yt/changeset/0e857c676504/
changeset:   0e857c676504
branch:      yt
user:        bcrosby
date:        2012-09-07 00:25:40
summary:     datatypes are now consistent and mass use float rather than int
affected #:  1 file

diff -r 16388c69834985b99e28f1b68cac859e7ad7f31c -r 0e857c676504a42d04893c0812d0ffc86d1cd3c2 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -470,12 +470,12 @@
             # Sort the arrays by particle index in ascending order.
             if len(parent_IDs)==0:
                 parent_IDs = na.array([], dtype='int32')
-                parent_masses = na.array([], dtype='int32')
+                parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.concatenate(parent_IDs)
-                parent_masses = na.concatenate(parent_masses)
-                parent_halos = na.concatenate(parent_halos)
+                parent_IDs = na.concatenate(parent_IDs).astype('int32')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
@@ -505,12 +505,12 @@
         # Sort the arrays by particle index in ascending order.
         if len(child_IDs)==0:
             child_IDs = na.array([], dtype='int32')
-            child_masses = na.array([], dtype='int32')
+            child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.concatenate(child_IDs)
-            child_masses = na.concatenate(child_masses)
-            child_halos = na.concatenate(child_halos)
+            child_IDs = na.concatenate(child_IDs).astype('int32')
+            child_masses = na.concatenate(child_masses).astype('float64')
+            child_halos = na.concatenate(child_halos).astype('int32')
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt/changeset/1a87d0db31fc/
changeset:   1a87d0db31fc
branch:      yt
user:        bcrosby
date:        2012-09-07 20:10:12
summary:     Switched to using extend() rather than append() to add halos and characteristics to an existing list as individual values rather than as a list
affected #:  1 file

diff -r 0e857c676504a42d04893c0812d0ffc86d1cd3c2 -r 1a87d0db31fc5d9d974d14b9f5d20e388959c5be yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -460,28 +460,32 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs.append(thisIDs)
-                        parent_masses.append(thisMasses)
-                        parent_halos.append(na.ones(thisIDs.size,
+                        parent_IDs.extend(thisIDs)
+                        parent_masses.extend(thisMasses)
+                        parent_halos.extend(na.ones(len(thisIDs),
                             dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-
             # Sort the arrays by particle index in ascending order.
             if len(parent_IDs)==0:
-                parent_IDs = na.array([], dtype='int32')
+                parent_IDs = na.array([], dtype='int64')
                 parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.concatenate(parent_IDs).astype('int32')
-                parent_masses = na.concatenate(parent_masses).astype('float64')
-                parent_halos = na.concatenate(parent_halos).astype('int32')
+                parent_IDs = na.asarray(parent_IDs).astype('int64')
+                parent_masses = na.asarray(parent_masses).astype('float64')
+                parent_halos = na.asarray(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
         parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
@@ -495,22 +499,27 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs.append(thisIDs)
-                    child_masses.append(thisMasses)
-                    child_halos.append(na.ones(thisIDs.size,
+                    child_IDs.extend(thisIDs)
+                    child_masses.extend(thisMasses)
+                    child_halos.extend(na.ones(len(thisIDs),
                         dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
-
         # Sort the arrays by particle index in ascending order.
         if len(child_IDs)==0:
-            child_IDs = na.array([], dtype='int32')
+            child_IDs = na.array([], dtype='int64')
             child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.concatenate(child_IDs).astype('int32')
-            child_masses = na.concatenate(child_masses).astype('float64')
-            child_halos = na.concatenate(child_halos).astype('int32')
+            child_IDs = na.asarray(child_IDs).astype('int64')
+            child_masses = na.asarray(child_masses)
+            child_halos = na.asarray(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
+
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt/changeset/211e42b5af28/
changeset:   211e42b5af28
branch:      yt
user:        sskory
date:        2012-09-07 21:23:36
summary:     This combination of append/concatenate works for me for the merger tree.
affected #:  1 file

diff -r 1a87d0db31fc5d9d974d14b9f5d20e388959c5be -r 211e42b5af287d7e811e8b8cc08f5f5a341a7b41 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -460,9 +460,9 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs.extend(thisIDs)
-                        parent_masses.extend(thisMasses)
-                        parent_halos.extend(na.ones(len(thisIDs),
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(len(thisIDs),
                             dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
@@ -472,9 +472,9 @@
                 parent_masses = na.array([], dtype='float64')
                 parent_halos = na.array([], dtype='int32')
             else:
-                parent_IDs = na.asarray(parent_IDs).astype('int64')
-                parent_masses = na.asarray(parent_masses).astype('float64')
-                parent_halos = na.asarray(parent_halos).astype('int32')
+                parent_IDs = na.concatenate(parent_IDs).astype('int64')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
                 sort = parent_IDs.argsort()
                 parent_IDs = parent_IDs[sort]
                 parent_masses = parent_masses[sort]
@@ -499,9 +499,9 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs.extend(thisIDs)
-                    child_masses.extend(thisMasses)
-                    child_halos.extend(na.ones(len(thisIDs),
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(len(thisIDs),
                         dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
@@ -511,9 +511,9 @@
             child_masses = na.array([], dtype='float64')
             child_halos = na.array([], dtype='int32')
         else:
-            child_IDs = na.asarray(child_IDs).astype('int64')
-            child_masses = na.asarray(child_masses)
-            child_halos = na.asarray(child_halos)
+            child_IDs = na.concatenate(child_IDs).astype('int64')
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
             sort = child_IDs.argsort()
             child_IDs = child_IDs[sort]
             child_masses = child_masses[sort]



https://bitbucket.org/yt_analysis/yt/changeset/62a260cea40d/
changeset:   62a260cea40d
branch:      yt
user:        sskory
date:        2012-09-07 22:29:10
summary:     Swapping out fortran kdtree for the cython one.
affected #:  1 file

diff -r 211e42b5af287d7e811e8b8cc08f5f5a341a7b41 -r 62a260cea40df2a9c2b9784f471852bcab2ee4f7 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
             child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = na.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 



https://bitbucket.org/yt_analysis/yt/changeset/80c20f67f390/
changeset:   80c20f67f390
branch:      yt
user:        bcrosby
date:        2012-09-08 18:37:48
summary:     Pulled in Stephen's modification. Tested against the standard halo merger tree database created for the Enzo_64 dataset and found identical results.
affected #:  1 file

diff -r 62a260cea40df2a9c2b9784f471852bcab2ee4f7 -r 80c20f67f390ea2eadf9358d7ccba79f2fabb2a6 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -506,7 +506,7 @@
             child_masses = child_masses[sort]
             child_halos = child_halos[sort]
             del sort
-
+        
         child_send = na.ones(child_IDs.size, dtype='bool')
         
         # Match particles in halos.



https://bitbucket.org/yt_analysis/yt/changeset/5e78fb4e3812/
changeset:   5e78fb4e3812
branch:      yt
user:        sskory
date:        2012-09-08 19:19:38
summary:     Merged in bcrosby/crosby (pull request #266)
affected #:  1 file

diff -r 551e1238ab38bfd9f1951e6a3fe692cc995e5768 -r 5e78fb4e3812e186e208866d9b7300a244eb6ee5 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -37,10 +37,7 @@
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.pydot as pydot
-try:
-    from yt.utilities.kdtree import *
-except ImportError:
-    mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \
     ParallelAnalysisInterface, \
@@ -349,16 +346,8 @@
                 child_points.append([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-            # Turn it into fortran.
             child_points = na.array(child_points)
-            fKD.pos = na.asfortranarray(child_points.T)
-            fKD.qv = na.empty(3, dtype='float64')
-            fKD.dist = na.empty(NumNeighbors, dtype='float64')
-            fKD.tags = na.empty(NumNeighbors, dtype='int64')
-            fKD.nn = NumNeighbors
-            fKD.sort = True
-            fKD.rearrange = True
-            create_tree(0)
+            kdtree = cKDTree(child_points, leafsize = 10)
     
         # Find the parent points from the database.
         parent_pf = load(parentfile)
@@ -373,22 +362,20 @@
             candidates = {}
             for row in self.cursor:
                 # Normalize positions for use within the kdtree.
-                fKD.qv = na.array([row[1] / self.period[0],
+                query = na.array([row[1] / self.period[0],
                 row[2] / self.period[1],
                 row[3] / self.period[2]])
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
+                NNtags = kdtree.query(query, NumNeighbors, period=self.period)[1]
                 nIDs = []
                 for n in NNtags:
-                    nIDs.append(n)
+                    if n not in nIDs:
+                        nIDs.append(n)
                 # We need to fill in fake halos if there aren't enough halos,
                 # which can happen at high redshifts.
                 while len(nIDs) < NumNeighbors:
                     nIDs.append(-1)
                 candidates[row[0]] = nIDs
-            
-            del fKD.pos, fKD.tags, fKD.dist
-            free_tree(0) # Frees the kdtree object.
+            del kdtree
         else:
             candidates = None
 
@@ -450,9 +437,9 @@
             # the parent dataset.
             parent_names = list(self.names[parent_currt])
             parent_names.sort()
-            parent_IDs = na.array([], dtype='int64')
-            parent_masses = na.array([], dtype='float64')
-            parent_halos = na.array([], dtype='int32')
+            parent_IDs = []
+            parent_masses = []
+            parent_halos = []
             for i,pname in enumerate(parent_names):
                 if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
@@ -460,31 +447,38 @@
                         gID = int(group[4:])
                         thisIDs = h5fp[group]['particle_index'][:]
                         thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                        parent_IDs = na.concatenate((parent_IDs, thisIDs))
-                        parent_masses = na.concatenate((parent_masses, thisMasses))
-                        parent_halos = na.concatenate((parent_halos, 
-                            na.ones(thisIDs.size, dtype='int32') * gID))
+                        parent_IDs.append(thisIDs)
+                        parent_masses.append(thisMasses)
+                        parent_halos.append(na.ones(len(thisIDs),
+                            dtype='int32') * gID)
                         del thisIDs, thisMasses
                     h5fp.close()
-            
             # Sort the arrays by particle index in ascending order.
-            sort = parent_IDs.argsort()
-            parent_IDs = parent_IDs[sort]
-            parent_masses = parent_masses[sort]
-            parent_halos = parent_halos[sort]
-            del sort
+            if len(parent_IDs)==0:
+                parent_IDs = na.array([], dtype='int64')
+                parent_masses = na.array([], dtype='float64')
+                parent_halos = na.array([], dtype='int32')
+            else:
+                parent_IDs = na.concatenate(parent_IDs).astype('int64')
+                parent_masses = na.concatenate(parent_masses).astype('float64')
+                parent_halos = na.concatenate(parent_halos).astype('int32')
+                sort = parent_IDs.argsort()
+                parent_IDs = parent_IDs[sort]
+                parent_masses = parent_masses[sort]
+                parent_halos = parent_halos[sort]
+                del sort
         else:
             # We can use old data and save disk reading.
             (parent_IDs, parent_masses, parent_halos) = last
         # Used to communicate un-matched particles.
         parent_send = na.ones(parent_IDs.size, dtype='bool')
-        
+
         # Now get the child halo data.
         child_names = list(self.names[child_currt])
         child_names.sort()
-        child_IDs = na.array([], dtype='int64')
-        child_masses = na.array([], dtype='float64')
-        child_halos = na.array([], dtype='int32')
+        child_IDs = []
+        child_masses = []
+        child_halos = []
         for i,cname in enumerate(child_names):
             if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
@@ -492,20 +486,28 @@
                     gID = int(group[4:])
                     thisIDs = h5fp[group]['particle_index'][:]
                     thisMasses = h5fp[group]['ParticleMassMsun'][:]
-                    child_IDs = na.concatenate((child_IDs, thisIDs))
-                    child_masses = na.concatenate((child_masses, thisMasses))
-                    child_halos = na.concatenate((child_halos, 
-                        na.ones(thisIDs.size, dtype='int32') * gID))
+                    child_IDs.append(thisIDs)
+                    child_masses.append(thisMasses)
+                    child_halos.append(na.ones(len(thisIDs),
+                        dtype='int32') * gID)
                     del thisIDs, thisMasses
                 h5fp.close()
+        # Sort the arrays by particle index in ascending order.
+        if len(child_IDs)==0:
+            child_IDs = na.array([], dtype='int64')
+            child_masses = na.array([], dtype='float64')
+            child_halos = na.array([], dtype='int32')
+        else:
+            child_IDs = na.concatenate(child_IDs).astype('int64')
+            child_masses = na.concatenate(child_masses)
+            child_halos = na.concatenate(child_halos)
+            sort = child_IDs.argsort()
+            child_IDs = child_IDs[sort]
+            child_masses = child_masses[sort]
+            child_halos = child_halos[sort]
+            del sort
         
-        # Sort the arrays by particle index.
-        sort = child_IDs.argsort()
-        child_IDs = child_IDs[sort]
-        child_masses = child_masses[sort]
-        child_halos = child_halos[sort]
         child_send = na.ones(child_IDs.size, dtype='bool')
-        del sort
         
         # Match particles in halos.
         self._match(parent_IDs, child_IDs, parent_halos, child_halos,

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list