[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu May 26 13:41:50 PDT 2016


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a77af405c8e6/
Changeset:   a77af405c8e6
Branch:      yt
User:        ngoldbaum
Date:        2016-05-15 21:22:21+00:00
Summary:     add ds.particle_type_counts

this is a convenience property for finding the number of particles by type over
the whole simulation
Affected #:  13 files

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -15,6 +15,7 @@
 #-----------------------------------------------------------------------------
 
 import functools
+import itertools
 import numpy as np
 import os
 import time
@@ -62,7 +63,6 @@
 from yt.units.unit_systems import create_code_unit_system
 from yt.data_objects.region_expression import \
     RegionExpression
-
 from yt.geometry.coordinates.api import \
     CoordinateHandler, \
     CartesianCoordinateHandler, \
@@ -171,6 +171,7 @@
     derived_field_list = requires_index("derived_field_list")
     fields = requires_index("fields")
     _instantiated = False
+    _particle_type_counts = None
 
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, string_types):
@@ -763,6 +764,27 @@
         return fields
 
     @property
+    def particles_exist(self):
+        for pt, f in itertools.product(self.particle_types_raw, self.field_list):
+            if pt == f[0]:
+                return True
+        return False
+
+    @property
+    def particle_type_counts(self):
+        self.index
+        if self.particles_exist is False:
+            return {}
+
+        # frontends or index implementation can populate this dict while
+        # creating the index if they know particle counts at that time
+        if self._particle_type_counts is not None:
+            return self._particle_type_counts
+
+        self._particle_type_counts = self.index._get_particle_type_counts()
+        return self._particle_type_counts
+
+    @property
     def ires_factor(self):
         o2 = np.log2(self.refine_by)
         if o2 != int(o2):

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -1,4 +1,8 @@
-from yt.testing import fake_amr_ds, assert_equal
+from yt.testing import \
+    assert_equal, \
+    fake_amr_ds, \
+    fake_particle_ds, \
+    fake_random_ds
 
 # This will test the "dataset access" method.
 
@@ -37,3 +41,10 @@
     rho *= 2.0
     yield assert_equal, dd["density"]*2.0, ds.r["density"]
     yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]
+
+def test_particle_counts():
+    ds = fake_random_ds(16, particles=100)
+    assert ds.particle_type_counts == {'io': 100}
+
+    pds = fake_particle_ds(npart=128)
+    assert pds.particle_type_counts == {'io': 128}

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -337,6 +337,8 @@
             mylog.info("Discovered %i species of particles", len(ls_nonzero))
             mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                        *ls_nonzero)
+            self._particle_type_counts = dict(
+                zip(self.particle_types_raw, ls_nonzero))
             for k, v in particle_header_vals.items():
                 if k in self.parameters.keys():
                     if not self.parameters[k] == v:

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -69,6 +69,14 @@
                          ad[('specie2', 'particle_type')].size +
                          ad[('specie3', 'particle_type')].size), AnaNDM
 
+    for spnum in range(5):
+        npart_read = ad['specie%s' % spnum, 'particle_type'].size
+        npart_header = ds.particle_type_counts['specie%s' % spnum]
+        if spnum == 3:
+            # see issue 814
+            npart_read += 1
+        assert_equal(npart_read, npart_header)
+
     AnaBoxSize = YTQuantity(7.1442196564, 'Mpc')
     AnaVolume = YTQuantity(364.640074656, 'Mpc**3')
     Volume = 1

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -182,6 +182,17 @@
         return (self.dataset.domain_width /
                 (self.dataset.domain_dimensions * 2**(self.max_level))).min()
 
+    def _get_particle_type_counts(self):
+        # this could be done in the artio C interface without creating temporary
+        # arrays but I don't want to touch that code
+        # if a future brave soul wants to try, take a look at
+        # `read_sfc_particles` in _artio_caller.pyx
+        result = {}
+        ad = self.ds.all_data()
+        for ptype in self.ds.particle_types_raw:
+            result[ptype] = ad[ptype, 'PID'].size
+        return result
+
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]
 

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -48,6 +48,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+    assert_equal(ds.particle_type_counts, {'N-BODY': 100000, 'STAR': 110650})
 
 
 @requires_file(sizmbhloz)

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -483,6 +483,15 @@
             random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
+    def _get_particle_type_counts(self):
+        try:
+            ret = {}
+            for ptype in self.grid_active_particle_count:
+                ret[ptype] = self.grid_active_particle_count[ptype].sum()
+            return ret
+        except AttributeError:
+            return super(EnzoHierarchy, self)._get_particle_type_counts()
+
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
         """
         Returns a structure of arrays with all of the particles'

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -83,6 +83,7 @@
     for test in big_patch_amr(ds, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+    assert_equal(ds.particle_type_counts, {'io': 1124453})
 
 @requires_ds(enzotiny)
 def test_simulated_halo_mass_function():
@@ -152,3 +153,6 @@
         [f for f in apcos.field_list if f[0] == 'CenOstriker'])
 
     assert_equal(apcos_fields, real_apcos_fields)
+
+    assert_equal(apcos.particle_type_counts,
+                 {'CenOstriker': 899755, 'DarkMatter': 32768})

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -474,6 +474,13 @@
         for level in range(self.max_level+1):
             self.level_stats[level+self.dataset.min_level+1]['numcells'] = levels[level]
 
+    def _get_particle_type_counts(self):
+        npart = 0
+        for dom in self.domains:
+            npart += dom.local_particle_count
+
+        return {'io': npart}
+
     def print_stats(self):
         
         # This function prints information based on the fluid on the grids,

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -47,7 +47,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
-
+    assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
 def test_RAMSESDataset():

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -191,6 +191,10 @@
         except TypeError:
             return self._data_file[full_name]
 
+    def _get_particle_type_counts(self):
+        # this is implemented by subclasses
+        raise NotImplementedError
+
     def _close_data_file(self):
         if self._data_file:
             self._data_file.close()

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -119,6 +119,9 @@
         """
         return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
+    def _get_particle_type_counts(self):
+        return {self.ds.particle_types_raw[0]: self.grid_particle_count.sum()}
+
     def _initialize_level_stats(self):
         # Now some statistics:
         #   0 = number of grids

diff -r 4898059be9f9314615b7a7df090845dda5bdefe5 -r a77af405c8e62d179c98baf88daef16e54efc354 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import collections
 import numpy as np
 import os
 import weakref
@@ -51,6 +52,13 @@
                    self.dataset.domain_left_edge)
         return dx.min()
 
+    def _get_particle_type_counts(self):
+        result = collections.defaultdict(lambda: 0)
+        for df in self.data_files:
+            for k in df.total_particles.keys():
+                result[k] += df.total_particles[k]
+        return dict(result)
+
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]
 


https://bitbucket.org/yt_analysis/yt/commits/ba935c3276ac/
Changeset:   ba935c3276ac
Branch:      yt
User:        ngoldbaum
Date:        2016-05-25 16:55:34+00:00
Summary:     Adding some discussion of particle types and particle type counts to quickstart
Affected #:  1 file

diff -r a77af405c8e62d179c98baf88daef16e54efc354 -r ba935c3276acb327df4e6a8c60996804e2da2937 doc/source/quickstart/2)_Data_Inspection.ipynb
--- a/doc/source/quickstart/2)_Data_Inspection.ipynb
+++ b/doc/source/quickstart/2)_Data_Inspection.ipynb
@@ -154,6 +154,35 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Finally, we can get basic information about the particle types and number of particles in a simulation:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (ds.particle_types)\n",
+    "print (ds.particle_types_raw)\n",
+    "print (ds.particle_type_counts)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For this dataset, we see that there are two particle types defined, (`io` and `all`), but that only one of these particle types in in `ds.particle_types_raw`. The `ds.particle_types` list contains *all* particle types in the simulation, including ones that are dynamically defined like particle unions. The `ds.particle_types_raw` list includes only particle types that are in the output file we loaded the dataset from.\n",
+    "\n",
+    "We can also see that there are a bit more than 1.1 million particles in this simulation. Only particle types in `ds.particle_types_raw` will appear in the `ds.particle_type_counts` dictionary."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "# Mesh Structure\n",
     "\n",
     "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."


https://bitbucket.org/yt_analysis/yt/commits/0ad80d267114/
Changeset:   0ad80d267114
Branch:      yt
User:        ngoldbaum
Date:        2016-05-25 17:05:18+00:00
Summary:     Adding narrative docs for particle_type_counts.
Affected #:  1 file

diff -r ba935c3276acb327df4e6a8c60996804e2da2937 -r 0ad80d2671146894c04289ba163a01a98c8a50ee doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -232,3 +232,28 @@
 whatever interface they wish for displaying and saving their image data.
 You can use the :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
 to accomplish this as described in :ref:`fixed-resolution-buffers`.
+
+High-level Information about Particles
+--------------------------------------
+
+There are a number of high-level helpers attached to ``Dataset`` objects to find
+out information about the particles in an output file. First, one can check if
+there are any particles in a dataset at all by examining
+``ds.particles_exist``. This will be ``True`` for datasets the include particles
+and ``False`` otherwise.
+
+One can also see which particle types are available in a dataset. Particle types
+that are available in the dataset's on-disk output are known as "raw" particle
+types, and they will appear in ``ds.particle_types_raw``. Particle types that
+are dynamically defined via a particle filter of a particle union will also
+appear in the ``ds.particle_types`` list. If the simulation only has one
+particle type on-disk, its name will by ``'io'``. If there is more than one
+particle type, the names of the particle types will be inferred from the output
+file. For example, Gadget HDF5 files have particle type names like ``PartType0``
+and ``PartType1``, while Enzo data, which usually only has one particle type,
+will only have a particle named ``io``.
+
+Finally, one can see the number of each particle type by inspecting
+``ds.particle_type_counts``. This will be a dictionary mappying the names of
+particle types in ``ds.particle_types_raw`` to the number of each particle type
+in a simulation output.


https://bitbucket.org/yt_analysis/yt/commits/027e7591dc5a/
Changeset:   027e7591dc5a
Branch:      yt
User:        bwkeller
Date:        2016-05-26 20:41:36+00:00
Summary:     Merged in ngoldbaum/yt (pull request #2176)

[new feature] add ds.particle_type_counts. Closes #718
Affected #:  15 files

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -232,3 +232,28 @@
 whatever interface they wish for displaying and saving their image data.
 You can use the :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
 to accomplish this as described in :ref:`fixed-resolution-buffers`.
+
+High-level Information about Particles
+--------------------------------------
+
+There are a number of high-level helpers attached to ``Dataset`` objects to find
+out information about the particles in an output file. First, one can check if
+there are any particles in a dataset at all by examining
+``ds.particles_exist``. This will be ``True`` for datasets the include particles
+and ``False`` otherwise.
+
+One can also see which particle types are available in a dataset. Particle types
+that are available in the dataset's on-disk output are known as "raw" particle
+types, and they will appear in ``ds.particle_types_raw``. Particle types that
+are dynamically defined via a particle filter of a particle union will also
+appear in the ``ds.particle_types`` list. If the simulation only has one
+particle type on-disk, its name will by ``'io'``. If there is more than one
+particle type, the names of the particle types will be inferred from the output
+file. For example, Gadget HDF5 files have particle type names like ``PartType0``
+and ``PartType1``, while Enzo data, which usually only has one particle type,
+will only have a particle named ``io``.
+
+Finally, one can see the number of each particle type by inspecting
+``ds.particle_type_counts``. This will be a dictionary mappying the names of
+particle types in ``ds.particle_types_raw`` to the number of each particle type
+in a simulation output.

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 doc/source/quickstart/2)_Data_Inspection.ipynb
--- a/doc/source/quickstart/2)_Data_Inspection.ipynb
+++ b/doc/source/quickstart/2)_Data_Inspection.ipynb
@@ -154,6 +154,35 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Finally, we can get basic information about the particle types and number of particles in a simulation:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (ds.particle_types)\n",
+    "print (ds.particle_types_raw)\n",
+    "print (ds.particle_type_counts)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For this dataset, we see that there are two particle types defined, (`io` and `all`), but that only one of these particle types in in `ds.particle_types_raw`. The `ds.particle_types` list contains *all* particle types in the simulation, including ones that are dynamically defined like particle unions. The `ds.particle_types_raw` list includes only particle types that are in the output file we loaded the dataset from.\n",
+    "\n",
+    "We can also see that there are a bit more than 1.1 million particles in this simulation. Only particle types in `ds.particle_types_raw` will appear in the `ds.particle_type_counts` dictionary."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "# Mesh Structure\n",
     "\n",
     "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -15,6 +15,7 @@
 #-----------------------------------------------------------------------------
 
 import functools
+import itertools
 import numpy as np
 import os
 import time
@@ -64,7 +65,6 @@
 from yt.units.unit_systems import create_code_unit_system
 from yt.data_objects.region_expression import \
     RegionExpression
-
 from yt.geometry.coordinates.api import \
     CoordinateHandler, \
     CartesianCoordinateHandler, \
@@ -173,6 +173,7 @@
     derived_field_list = requires_index("derived_field_list")
     fields = requires_index("fields")
     _instantiated = False
+    _particle_type_counts = None
 
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, string_types):
@@ -768,6 +769,27 @@
         return fields
 
     @property
+    def particles_exist(self):
+        for pt, f in itertools.product(self.particle_types_raw, self.field_list):
+            if pt == f[0]:
+                return True
+        return False
+
+    @property
+    def particle_type_counts(self):
+        self.index
+        if self.particles_exist is False:
+            return {}
+
+        # frontends or index implementation can populate this dict while
+        # creating the index if they know particle counts at that time
+        if self._particle_type_counts is not None:
+            return self._particle_type_counts
+
+        self._particle_type_counts = self.index._get_particle_type_counts()
+        return self._particle_type_counts
+
+    @property
     def ires_factor(self):
         o2 = np.log2(self.refine_by)
         if o2 != int(o2):

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -1,4 +1,8 @@
-from yt.testing import fake_amr_ds, assert_equal
+from yt.testing import \
+    assert_equal, \
+    fake_amr_ds, \
+    fake_particle_ds, \
+    fake_random_ds
 
 # This will test the "dataset access" method.
 
@@ -37,3 +41,10 @@
     rho *= 2.0
     yield assert_equal, dd["density"]*2.0, ds.r["density"]
     yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]
+
+def test_particle_counts():
+    ds = fake_random_ds(16, particles=100)
+    assert ds.particle_type_counts == {'io': 100}
+
+    pds = fake_particle_ds(npart=128)
+    assert pds.particle_type_counts == {'io': 128}

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -337,6 +337,8 @@
             mylog.info("Discovered %i species of particles", len(ls_nonzero))
             mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                        *ls_nonzero)
+            self._particle_type_counts = dict(
+                zip(self.particle_types_raw, ls_nonzero))
             for k, v in particle_header_vals.items():
                 if k in self.parameters.keys():
                     if not self.parameters[k] == v:

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -69,6 +69,14 @@
                          ad[('specie2', 'particle_type')].size +
                          ad[('specie3', 'particle_type')].size), AnaNDM
 
+    for spnum in range(5):
+        npart_read = ad['specie%s' % spnum, 'particle_type'].size
+        npart_header = ds.particle_type_counts['specie%s' % spnum]
+        if spnum == 3:
+            # see issue 814
+            npart_read += 1
+        assert_equal(npart_read, npart_header)
+
     AnaBoxSize = YTQuantity(7.1442196564, 'Mpc')
     AnaVolume = YTQuantity(364.640074656, 'Mpc**3')
     Volume = 1

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -182,6 +182,17 @@
         return (self.dataset.domain_width /
                 (self.dataset.domain_dimensions * 2**(self.max_level))).min()
 
+    def _get_particle_type_counts(self):
+        # this could be done in the artio C interface without creating temporary
+        # arrays but I don't want to touch that code
+        # if a future brave soul wants to try, take a look at
+        # `read_sfc_particles` in _artio_caller.pyx
+        result = {}
+        ad = self.ds.all_data()
+        for ptype in self.ds.particle_types_raw:
+            result[ptype] = ad[ptype, 'PID'].size
+        return result
+
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]
 

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -48,6 +48,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+    assert_equal(ds.particle_type_counts, {'N-BODY': 100000, 'STAR': 110650})
 
 
 @requires_file(sizmbhloz)

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -483,6 +483,15 @@
             random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
+    def _get_particle_type_counts(self):
+        try:
+            ret = {}
+            for ptype in self.grid_active_particle_count:
+                ret[ptype] = self.grid_active_particle_count[ptype].sum()
+            return ret
+        except AttributeError:
+            return super(EnzoHierarchy, self)._get_particle_type_counts()
+
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
         """
         Returns a structure of arrays with all of the particles'

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -83,6 +83,7 @@
     for test in big_patch_amr(ds, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+    assert_equal(ds.particle_type_counts, {'io': 1124453})
 
 @requires_ds(enzotiny)
 def test_simulated_halo_mass_function():
@@ -152,3 +153,6 @@
         [f for f in apcos.field_list if f[0] == 'CenOstriker'])
 
     assert_equal(apcos_fields, real_apcos_fields)
+
+    assert_equal(apcos.particle_type_counts,
+                 {'CenOstriker': 899755, 'DarkMatter': 32768})

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -474,6 +474,13 @@
         for level in range(self.max_level+1):
             self.level_stats[level+self.dataset.min_level+1]['numcells'] = levels[level]
 
+    def _get_particle_type_counts(self):
+        npart = 0
+        for dom in self.domains:
+            npart += dom.local_particle_count
+
+        return {'io': npart}
+
     def print_stats(self):
         
         # This function prints information based on the fluid on the grids,

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -47,7 +47,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
-
+    assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
 def test_RAMSESDataset():

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -191,6 +191,10 @@
         except TypeError:
             return self._data_file[full_name]
 
+    def _get_particle_type_counts(self):
+        # this is implemented by subclasses
+        raise NotImplementedError
+
     def _close_data_file(self):
         if self._data_file:
             self._data_file.close()

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -119,6 +119,9 @@
         """
         return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
+    def _get_particle_type_counts(self):
+        return {self.ds.particle_types_raw[0]: self.grid_particle_count.sum()}
+
     def _initialize_level_stats(self):
         # Now some statistics:
         #   0 = number of grids

diff -r f609b1cbd8dd18374c28cbd7511641b64438cd94 -r 027e7591dc5a18423ab364582ef7c4b895fe9b44 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import collections
 import numpy as np
 import os
 import weakref
@@ -51,6 +52,13 @@
                    self.dataset.domain_left_edge)
         return dx.min()
 
+    def _get_particle_type_counts(self):
+        result = collections.defaultdict(lambda: 0)
+        for df in self.data_files:
+            for k in df.total_particles.keys():
+                result[k] += df.total_particles[k]
+        return dict(result)
+
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list