[yt-svn] commit/yt: 64 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jan 15 06:25:12 PST 2018


64 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/991f6b6c4684/
Changeset:   991f6b6c4684
User:        Corentin Cadiou
Date:        2017-11-08 14:54:33+00:00
Summary:     add support for reading of part_file_descriptor.txt
Affected #:  3 files

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 991f6b6c468463fa5427f7d5b8c05609d3126705 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,6 +35,7 @@
     OctreeSubset
 
 from .definitions import ramses_header, field_aliases
+from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
@@ -58,12 +59,14 @@
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
+        basedir = os.path.abspath(
+            os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
+            basedir, num, domain_id)
+        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
         for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
             setattr(self, "%s_fn" % t, basename % t)
+        self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
         self._read_particle_header()
@@ -214,6 +217,7 @@
             self.local_particle_count = 0
             self.particle_field_offsets = {}
             return
+
         f = open(self.part_fn, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -233,7 +237,12 @@
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
 
-        particle_fields = [
+        # Try reading particle file descriptor
+        if os.path.exists(self._part_file_descriptor) and \
+           self.ds._extra_particle_fields is None:
+            particle_fields = _read_part_file_descriptor(self._part_file_descriptor)
+        else:
+            particle_fields = [
                 ("particle_position_x", "d"),
                 ("particle_position_y", "d"),
                 ("particle_position_z", "d"),
@@ -244,8 +253,8 @@
                 ("particle_identifier", "i"),
                 ("particle_refinement_level", "I")]
 
-        if self.ds._extra_particle_fields is not None:
-            particle_fields += self.ds._extra_particle_fields
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
 
         field_offsets = {}
         _pfields = {}

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 991f6b6c468463fa5427f7d5b8c05609d3126705 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -88,6 +88,7 @@
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
         ("particle_age", ("code_time", ['age'], None)),
+        ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
     )
 

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 991f6b6c468463fa5427f7d5b8c05609d3126705 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,6 +24,7 @@
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
 from yt.extern.six import PY3
+import re
 
 if PY3:
     from io import BytesIO as IO
@@ -162,3 +163,48 @@
                 fname, foffsets, data_types, subset, subs_fields))
 
         return tr
+
+VERSION_RE = re.compile(' *version: *(\d+)')
+VAR_DESC_RE = re.compile(' *variable # *(\d+): *(\w+)')
+def _read_part_file_descriptor(fname):
+    """
+    Read the particle file descriptor and returns the array of the fields found.
+    """
+    # The kind of the known types
+    assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
+            [('velocity_%s' % k, 'd') for k in 'xyz'] + \
+            [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
+            ('family', 'i'), ('tag', 'i'), ('birth_time', 'd'),
+            ('metallicity', 'd')]
+
+    assoc = {k: v for k, v in assoc}
+    if True: #with open(fname, 'r') as f:
+        f = open(fname, 'r')
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        if not tmp:
+            print(line)
+            raise Exception('File format not understood')
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise Exception('Error while reading %s at line %s' % (fname, i+1))
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+
+                if varname in assoc:
+                    dtype = assoc[varname]
+                else:
+                    dtype = 'd'
+
+                fields.append(("particle_%s" % varname, dtype))
+        else:
+            raise Exception('Unrecognized particle file descriptor version: %s' % version)
+
+    return fields


https://bitbucket.org/yt_analysis/yt/commits/ac23e596ed63/
Changeset:   ac23e596ed63
User:        Corentin Cadiou
Date:        2017-11-11 11:25:38+00:00
Summary:     reading particle file descriptor
Affected #:  4 files

diff -r 991f6b6c468463fa5427f7d5b8c05609d3126705 -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -34,7 +34,7 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 
-from .definitions import ramses_header, field_aliases
+from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
@@ -240,7 +240,9 @@
         # Try reading particle file descriptor
         if os.path.exists(self._part_file_descriptor) and \
            self.ds._extra_particle_fields is None:
-            particle_fields = _read_part_file_descriptor(self._part_file_descriptor)
+            particle_fields = (
+                _read_part_file_descriptor(self._part_file_descriptor))
+            ptype = 'io'
         else:
             particle_fields = [
                 ("particle_position_x", "d"),
@@ -256,10 +258,12 @@
             if self.ds._extra_particle_fields is not None:
                 particle_fields += self.ds._extra_particle_fields
 
+            ptype = 'io'
+
+
         field_offsets = {}
         _pfields = {}
 
-        ptype = 'io'
 
         # Read offsets
         for field, vtype in particle_fields:

diff -r 991f6b6c468463fa5427f7d5b8c05609d3126705 -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -61,3 +61,14 @@
                           'Metallicity'),
 
 }
+
+particle_families = {
+    1: 'DM',
+    2: 'star',
+    3: 'cloud',
+    4: 'dust',
+    -2: 'star_tracer',
+    -3: 'cloud_tracer',
+    -4: 'dust_tracer',
+    0: 'gas_tracer',
+}

diff -r 991f6b6c468463fa5427f7d5b8c05609d3126705 -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -64,6 +64,20 @@
 _X = 0.76 # H fraction, hardcoded
 _Y = 0.24 # He fraction, hardcoded
 
+
+# Association between particles types and families, hardcoded
+_families = {
+    1: 'DM',
+    2: 'star',
+    3: 'cloud',
+    4: 'dust',
+    -2: 'star_tracer',
+    -3: 'cloud_tracer',
+    -4: 'dust_tracer',
+    0: 'gas_tracer',
+}
+
+
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),
@@ -90,6 +104,8 @@
         ("particle_age", ("code_time", ['age'], None)),
         ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
+        ("particle_family", ("", [], None)),
+        ("particle_tag", ("", [], None))
     )
 
     known_sink_fields = (

diff -r 991f6b6c468463fa5427f7d5b8c05609d3126705 -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -174,7 +174,7 @@
     assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
             [('velocity_%s' % k, 'd') for k in 'xyz'] + \
             [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-            ('family', 'i'), ('tag', 'i'), ('birth_time', 'd'),
+            ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
             ('metallicity', 'd')]
 
     assoc = {k: v for k, v in assoc}
@@ -182,6 +182,7 @@
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
+        mylog.info('Reading part file descriptor.')
         if not tmp:
             print(line)
             raise Exception('File format not understood')


https://bitbucket.org/yt_analysis/yt/commits/a4cd438ebc0a/
Changeset:   a4cd438ebc0a
User:        Corentin Cadiou
Date:        2017-11-11 12:55:22+00:00
Summary:     Support for new particles types
Affected #:  3 files

diff -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 -r a4cd438ebc0af848b1c06b485b9a568a210506ec yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -33,6 +33,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt import add_particle_filter
 
 from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor
@@ -94,6 +95,13 @@
         return os.path.exists(self.sink_fn)
 
     @property
+    def _has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self._part_file_descriptor)
+
+    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -238,7 +246,7 @@
         self.local_particle_count = hvals['npart']
 
         # Try reading particle file descriptor
-        if os.path.exists(self._part_file_descriptor) and \
+        if self._has_part_descriptor and \
            self.ds._extra_particle_fields is None:
             particle_fields = (
                 _read_part_file_descriptor(self._part_file_descriptor))
@@ -683,6 +691,22 @@
 
         self.storage_filename = storage_filename
 
+        # Add particles filters
+        for fname, value in particle_families.items():
+            def loc(val):
+                def closure(pfilter, data):
+                    filter = data[(pfilter.filtered_type, "particle_family")] == val
+                    return filter
+
+                return closure
+            add_particle_filter(fname, loc(value),
+                                filtered_type='io', requires=['particle_family'])
+
+
+    def add_ptypes(self):
+        for k in particle_families.keys():
+            mylog.info('Adding particle_type: %s' % k)
+            self.add_particle_filter('%s' % k)
 
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]

diff -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 -r a4cd438ebc0af848b1c06b485b9a568a210506ec yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -63,12 +63,12 @@
 }
 
 particle_families = {
-    1: 'DM',
-    2: 'star',
-    3: 'cloud',
-    4: 'dust',
-    -2: 'star_tracer',
-    -3: 'cloud_tracer',
-    -4: 'dust_tracer',
-    0: 'gas_tracer',
+    'DM': 1,
+    'star': 2,
+    'cloud': 3,
+    'dust': 4,
+    'star_tracer': -2,
+    'cloud_tracer': -3,
+    'dust_tracer': -4,
+    'gas_tracer': 0
 }

diff -r ac23e596ed63c6d1b8a093cef06722e0d38c3653 -r a4cd438ebc0af848b1c06b485b9a568a210506ec yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -17,6 +17,7 @@
 import os
 import numpy as np
 
+import yt
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -26,6 +27,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from .definitions import particle_families
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
@@ -65,19 +67,6 @@
 _Y = 0.24 # He fraction, hardcoded
 
 
-# Association between particles types and families, hardcoded
-_families = {
-    1: 'DM',
-    2: 'star',
-    3: 'cloud',
-    4: 'dust',
-    -2: 'star_tracer',
-    -3: 'cloud_tracer',
-    -4: 'dust_tracer',
-    0: 'gas_tracer',
-}
-
-
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/b81c09686d9a/
Changeset:   b81c09686d9a
User:        Corentin Cadiou
Date:        2017-11-11 13:08:02+00:00
Summary:     remove useless (and erroneous) check
Affected #:  1 file

diff -r a4cd438ebc0af848b1c06b485b9a568a210506ec -r b81c09686d9a870ebec6c16643a27ede278dc86b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -246,8 +246,7 @@
         self.local_particle_count = hvals['npart']
 
         # Try reading particle file descriptor
-        if self._has_part_descriptor and \
-           self.ds._extra_particle_fields is None:
+        if self._has_part_descriptor:
             particle_fields = (
                 _read_part_file_descriptor(self._part_file_descriptor))
             ptype = 'io'


https://bitbucket.org/yt_analysis/yt/commits/2db8c0e1f736/
Changeset:   2db8c0e1f736
User:        Corentin Cadiou
Date:        2017-11-11 13:10:38+00:00
Summary:     flake8
Affected #:  1 file

diff -r b81c09686d9a870ebec6c16643a27ede278dc86b -r 2db8c0e1f7361c9cbdf10815fc2d22677f5bc72a yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -17,7 +17,6 @@
 import os
 import numpy as np
 
-import yt
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -27,7 +26,6 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from .definitions import particle_families
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"


https://bitbucket.org/yt_analysis/yt/commits/76e86689857f/
Changeset:   76e86689857f
User:        Corentin Cadiou
Date:        2017-11-11 13:22:42+00:00
Summary:     add doc
Affected #:  1 file

diff -r 2db8c0e1f7361c9cbdf10815fc2d22677f5bc72a -r 76e86689857fb7330adb6587f670ff3aad672787 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2003,6 +2003,13 @@
 
 yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
+yt also support the new way particles are handled introduced after
+version `stable_17_09` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file `part_file_descriptor.txt`
+containing the different fields in the particle files will be read. If
+you use a custom version of RAMSES, make sure this file is up-to-date
+and reflects the true layout of the particles.
+
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
@@ -2091,6 +2098,21 @@
 	 This feature is only available when using Hilbert ordering.
 
 
+.. rubric:: Particle automatic filtering
+
+If your RAMSES version is more recent than `stable_17_09`, it is
+possible to tell yt to filter the particles in your dataset. This is
+not done by default is it requires to read all the particles and may
+be small. To use this feature, run
+
+.. code-block:: python
+
+   ds = yt.load('ramses_new_format/output_00011/info_00011.txt')
+
+   # This will load the particle types automatically
+   ds.add_ptypes()
+
+
 .. _loading-sph-data:
 
 SPH Particle Data


https://bitbucket.org/yt_analysis/yt/commits/b75494ce14ef/
Changeset:   b75494ce14ef
User:        Corentin Cadiou
Date:        2017-11-11 13:37:29+00:00
Summary:     add test
Affected #:  1 file

diff -r 76e86689857fb7330adb6587f670ff3aad672787 -r b75494ce14ef694123d1ea8af19094b3d4266d11 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -204,3 +204,34 @@
 
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
+
+
+ramses_new_format = "ramses_new_format/output_00011/info_00011.txt"
+ at requires_file(ramses_new_format)
+def test_new_format():
+    expected_particle_fields = [
+        ('io', 'particle_birth_time'),
+        ('io', 'particle_family'),
+        ('io', 'particle_identity'),
+        ('io', 'particle_levelp'),
+        ('io', 'particle_mass'),
+        ('io', 'particle_metallicity'),
+        ('io', 'particle_position_x'),
+        ('io', 'particle_position_y'),
+        ('io', 'particle_position_z'),
+        ('io', 'particle_tag'),
+        ('io', 'particle_velocity_x'),
+        ('io', 'particle_velocity_y'),
+        ('io', 'particle_velocity_z')]
+
+    ds = yt.load(ramses_new_format)
+    ad = ds.all_data()
+
+    # Check all the expected fields exist and can be accessed
+    for f in expected_particle_fields:
+        assert(f in ds.field_list)
+        ad[f]
+
+    # Check there is only stars with tag 0 (it should be right)
+    assert(all(ad['particle_family'] == 2))
+    assert(all(ad['particle_tag'] == 0))


https://bitbucket.org/yt_analysis/yt/commits/0acf3e93e3a4/
Changeset:   0acf3e93e3a4
User:        Corentin Cadiou
Date:        2017-11-11 13:45:55+00:00
Summary:     more doc
Affected #:  2 files

diff -r b75494ce14ef694123d1ea8af19094b3d4266d11 -r 0acf3e93e3a45657a7d2747b23595df5111906f0 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2098,7 +2098,8 @@
 	 This feature is only available when using Hilbert ordering.
 
 
-.. rubric:: Particle automatic filtering
+Particle automatic filtering
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 If your RAMSES version is more recent than `stable_17_09`, it is
 possible to tell yt to filter the particles in your dataset. This is
@@ -2113,6 +2114,19 @@
    ds.add_ptypes()
 
 
+Adding custom particle fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to add support for particle fields. For this, one
+should tweak
+:func:`~yt.frontends.ramses.io._read_part_file_descriptor` to include
+the field as well as its data type to the assoc list, following the
+convention from
+`python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+For example, to add support for a longint field named
+`my_custom_field`, one would add `('my_custom_field', 'l')` to `assoc`.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data

diff -r b75494ce14ef694123d1ea8af19094b3d4266d11 -r 0acf3e93e3a45657a7d2747b23595df5111906f0 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -174,8 +174,8 @@
     assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
             [('velocity_%s' % k, 'd') for k in 'xyz'] + \
             [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-            ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
-            ('metallicity', 'd')]
+             ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
+             ('metallicity', 'd')]
 
     assoc = {k: v for k, v in assoc}
     if True: #with open(fname, 'r') as f:


https://bitbucket.org/yt_analysis/yt/commits/3e4a473f8ba6/
Changeset:   3e4a473f8ba6
User:        Corentin Cadiou
Date:        2017-11-13 17:07:11+00:00
Summary:     automatic loading of particles
Affected #:  1 file

diff -r 0acf3e93e3a45657a7d2747b23595df5111906f0 -r 3e4a473f8ba6181c2a0744d89905ffea9b13cd25 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -702,7 +702,12 @@
                                 filtered_type='io', requires=['particle_family'])
 
 
-    def add_ptypes(self):
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        Dataset.create_field_info(self, *args, **kwa)
+        self._add_ptypes()
+
+    def _add_ptypes(self):
         for k in particle_families.keys():
             mylog.info('Adding particle_type: %s' % k)
             self.add_particle_filter('%s' % k)


https://bitbucket.org/yt_analysis/yt/commits/534438eb5712/
Changeset:   534438eb5712
User:        Corentin Cadiou
Date:        2017-11-13 17:14:30+00:00
Summary:     remove 'old' doc
Affected #:  1 file

diff -r 3e4a473f8ba6181c2a0744d89905ffea9b13cd25 -r 534438eb57120c6808f68e937ef07e84ff71006b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2098,22 +2098,6 @@
 	 This feature is only available when using Hilbert ordering.
 
 
-Particle automatic filtering
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If your RAMSES version is more recent than `stable_17_09`, it is
-possible to tell yt to filter the particles in your dataset. This is
-not done by default is it requires to read all the particles and may
-be small. To use this feature, run
-
-.. code-block:: python
-
-   ds = yt.load('ramses_new_format/output_00011/info_00011.txt')
-
-   # This will load the particle types automatically
-   ds.add_ptypes()
-
-
 Adding custom particle fields
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 


https://bitbucket.org/yt_analysis/yt/commits/5a28f735a458/
Changeset:   5a28f735a458
User:        Corentin Cadiou
Date:        2017-11-13 17:14:39+00:00
Summary:     use more specific exception
Affected #:  1 file

diff -r 534438eb57120c6808f68e937ef07e84ff71006b -r 5a28f735a4582aad712efcb12bb5dc720428231e yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,6 +23,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
+from yt.utilities.exceptions import YTFieldTypeNotFound
 from yt.extern.six import PY3
 import re
 
@@ -157,7 +158,7 @@
 
             else:
                 # Raise here an exception
-                raise Exception('Unknown particle type %s' % ptype)
+                raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
                 fname, foffsets, data_types, subset, subs_fields))


https://bitbucket.org/yt_analysis/yt/commits/cff6ae99ed28/
Changeset:   cff6ae99ed28
User:        Corentin Cadiou
Date:        2017-11-13 17:21:36+00:00
Summary:     add explicit exceptions
Affected #:  2 files

diff -r 5a28f735a4582aad712efcb12bb5dc720428231e -r cff6ae99ed28b590643d4400d4581295ae3afd09 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,8 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
-from yt.utilities.exceptions import YTFieldTypeNotFound
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTOutputFormatNotImplemented, \
+    YTNotParsableFile
 from yt.extern.six import PY3
 import re
 
@@ -195,7 +196,7 @@
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
                 if not tmp:
-                    raise Exception('Error while reading %s at line %s' % (fname, i+1))
+                    raise YTNotParsableFile(fname, i+1)
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)
@@ -207,6 +208,6 @@
 
                 fields.append(("particle_%s" % varname, dtype))
         else:
-            raise Exception('Unrecognized particle file descriptor version: %s' % version)
+            raise YTOutputFormatNotImplemented()
 
     return fields

diff -r 5a28f735a4582aad712efcb12bb5dc720428231e -r cff6ae99ed28b590643d4400d4581295ae3afd09 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,6 +424,19 @@
         v += r"'%s'."
         return v % (self.obj_name, self.ds)
 
+class YTParticleOutputFormatNotImplemented(YTException):
+    def __str__(self):
+        return "The particle output format is not supported."
+
+class YTNotParsableFile(YTException):
+    def __init__(self, fname, line):
+        self.fname = fname
+        self.line = line
+
+    def __str__(self):
+        v = r"Error while parsing file %s at line %s"
+        return v % (self.fname, self.line)
+
 class YTRockstarMultiMassNotSupported(YTException):
     def __init__(self, mi, ma, ptype):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt/commits/3dd7060b4567/
Changeset:   3dd7060b4567
User:        Corentin Cadiou
Date:        2017-11-25 10:46:03+00:00
Summary:     Update to new-new format
Affected #:  1 file

diff -r cff6ae99ed28b590643d4400d4581295ae3afd09 -r 3dd7060b456701d2628dd500d4d5d4baf3fb16f2 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
-from yt.utilities.exceptions import YTFieldTypeNotFound, YTOutputFormatNotImplemented, \
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
     YTNotParsableFile
 from yt.extern.six import PY3
 import re
@@ -166,32 +166,42 @@
 
         return tr
 
-VERSION_RE = re.compile(' *version: *(\d+)')
-VAR_DESC_RE = re.compile(' *variable # *(\d+): *(\w+)')
+VERSION_RE = re.compile('# version: *(\d+)')
+VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
 def _read_part_file_descriptor(fname):
     """
     Read the particle file descriptor and returns the array of the fields found.
     """
-    # The kind of the known types
-    assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
-            [('velocity_%s' % k, 'd') for k in 'xyz'] + \
-            [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-             ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
-             ('metallicity', 'd')]
+    # Mapping
+    mapping = [
+        ('position_x', 'particle_position_x'),
+        ('position_y', 'particle_position_y'),
+        ('position_z', 'particle_position_z'),
+        ('velocity_x', 'particle_velocity_x'),
+        ('velocity_y', 'particle_velocity_y'),
+        ('velocity_z', 'particle_velocity_z'),
+        ('mass', 'particle_mass'),
+        ('identity', 'particle_identity'),
+        ('levelp', 'particle_level'),
+        ('family', 'particle_family'),
+        ('tag', 'particle_tag')
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
 
-    assoc = {k: v for k, v in assoc}
     if True: #with open(fname, 'r') as f:
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
         mylog.info('Reading part file descriptor.')
         if not tmp:
-            print(line)
-            raise Exception('File format not understood')
+            raise YTParticleOutputFormatNotImplemented()
 
         version = int(tmp.group(1))
 
         if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
             fields = []
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
@@ -200,14 +210,15 @@
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)
+                dtype = tmp.group(3)
 
-                if varname in assoc:
-                    dtype = assoc[varname]
+                if varname in mapping:
+                    varname = mapping[varname]
                 else:
-                    dtype = 'd'
+                    varname = 'particle_%s' % varname
 
-                fields.append(("particle_%s" % varname, dtype))
+                fields.append((varname, dtype))
         else:
-            raise YTOutputFormatNotImplemented()
+            raise YTParticleOutputFormatNotImplemented()
 
     return fields


https://bitbucket.org/yt_analysis/yt/commits/354f9a0bb1ac/
Changeset:   354f9a0bb1ac
User:        Corentin Cadiou
Date:        2017-11-25 11:14:50+00:00
Summary:     more idiomatic name
Affected #:  2 files

diff -r 3dd7060b456701d2628dd500d4d5d4baf3fb16f2 -r 354f9a0bb1ac76cf2cc4116aa0565bbefb7e275a yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
 from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
-    YTNotParsableFile
+    YTFileNotParseable
 from yt.extern.six import PY3
 import re
 
@@ -206,7 +206,7 @@
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
                 if not tmp:
-                    raise YTNotParsableFile(fname, i+1)
+                    raise YTFileNotParseable(fname, i+1)
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)

diff -r 3dd7060b456701d2628dd500d4d5d4baf3fb16f2 -r 354f9a0bb1ac76cf2cc4116aa0565bbefb7e275a yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -428,7 +428,7 @@
     def __str__(self):
         return "The particle output format is not supported."
 
-class YTNotParsableFile(YTException):
+class YTFileNotParseable(YTException):
     def __init__(self, fname, line):
         self.fname = fname
         self.line = line


https://bitbucket.org/yt_analysis/yt/commits/7f9722de47cc/
Changeset:   7f9722de47cc
User:        Corentin Cadiou
Date:        2017-11-25 11:55:49+00:00
Summary:     only add particle types when 'particle_family' is found
Affected #:  1 file

diff -r 354f9a0bb1ac76cf2cc4116aa0565bbefb7e275a -r 7f9722de47cce8d3c35154ffa607e6e7307cb895 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -691,15 +691,16 @@
         self.storage_filename = storage_filename
 
         # Add particles filters
-        for fname, value in particle_families.items():
-            def loc(val):
-                def closure(pfilter, data):
-                    filter = data[(pfilter.filtered_type, "particle_family")] == val
-                    return filter
+        if ('io', 'particle_family') in self.field_list:
+            for fname, value in particle_families.items():
+                def loc(val):
+                    def closure(pfilter, data):
+                        filter = data[(pfilter.filtered_type, "particle_family")] == val
+                        return filter
 
-                return closure
-            add_particle_filter(fname, loc(value),
-                                filtered_type='io', requires=['particle_family'])
+                    return closure
+                add_particle_filter(fname, loc(value),
+                                    filtered_type='io', requires=['particle_family'])
 
 
     def create_field_info(self, *args, **kwa):
@@ -708,9 +709,10 @@
         self._add_ptypes()
 
     def _add_ptypes(self):
-        for k in particle_families.keys():
-            mylog.info('Adding particle_type: %s' % k)
-            self.add_particle_filter('%s' % k)
+        if ('io', 'particle_family') in self.field_list:
+            for k in particle_families.keys():
+                mylog.info('Adding particle_type: %s' % k)
+                self.add_particle_filter('%s' % k)
 
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]


https://bitbucket.org/yt_analysis/yt/commits/0e004c71cd53/
Changeset:   0e004c71cd53
User:        Corentin Cadiou
Date:        2017-11-25 11:56:05+00:00
Summary:     be a bit more precise + updated dataset
Affected #:  1 file

diff -r 7f9722de47cce8d3c35154ffa607e6e7307cb895 -r 0e004c71cd5388904b8fbfcd38d40fc1e681fec5 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -206,32 +206,31 @@
         assert(('sink', 'field') not in ds.field_list)
 
 
-ramses_new_format = "ramses_new_format/output_00011/info_00011.txt"
+ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
 @requires_file(ramses_new_format)
 def test_new_format():
     expected_particle_fields = [
-        ('io', 'particle_birth_time'),
-        ('io', 'particle_family'),
-        ('io', 'particle_identity'),
-        ('io', 'particle_levelp'),
-        ('io', 'particle_mass'),
-        ('io', 'particle_metallicity'),
-        ('io', 'particle_position_x'),
-        ('io', 'particle_position_y'),
-        ('io', 'particle_position_z'),
-        ('io', 'particle_tag'),
-        ('io', 'particle_velocity_x'),
-        ('io', 'particle_velocity_y'),
-        ('io', 'particle_velocity_z')]
+        ('star', 'particle_identity'),
+        ('star', 'particle_level'),
+        ('star', 'particle_mass'),
+        ('star', 'particle_metallicity'),
+        ('star', 'particle_position_x'),
+        ('star', 'particle_position_y'),
+        ('star', 'particle_position_z'),
+        ('star', 'particle_tag'),
+        ('star', 'particle_velocity_x'),
+        ('star', 'particle_velocity_y'),
+        ('star', 'particle_velocity_z')]
 
     ds = yt.load(ramses_new_format)
     ad = ds.all_data()
 
     # Check all the expected fields exist and can be accessed
     for f in expected_particle_fields:
-        assert(f in ds.field_list)
+        assert(f in ds.derived_field_list)
         ad[f]
 
     # Check there is only stars with tag 0 (it should be right)
-    assert(all(ad['particle_family'] == 2))
-    assert(all(ad['particle_tag'] == 0))
+    assert(all(ad['star', 'particle_family'] == 2))
+    assert(all(ad['star', 'particle_tag'] == 0))
+    assert(len(ad['star', 'particle_tag']) == 600)


https://bitbucket.org/yt_analysis/yt/commits/b00647cd9764/
Changeset:   b00647cd9764
User:        Corentin Cadiou
Date:        2017-11-25 12:12:53+00:00
Summary:     remove debug stmt
Affected #:  1 file

diff -r 0e004c71cd5388904b8fbfcd38d40fc1e681fec5 -r b00647cd9764de411f78937a6c26622842ec2e77 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -189,7 +189,7 @@
     # Convert in dictionary
     mapping = {k: v for k, v in mapping}
 
-    if True: #with open(fname, 'r') as f:
+    with open(fname, 'r') as f:
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)


https://bitbucket.org/yt_analysis/yt/commits/c768d7d7ad68/
Changeset:   c768d7d7ad68
User:        Corentin Cadiou
Date:        2017-11-30 18:58:26+00:00
Summary:     use more explicit import
Affected #:  1 file

diff -r b00647cd9764de411f78937a6c26622842ec2e77 -r c768d7d7ad681be54776786f76676befae482b6f yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -33,7 +33,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
-from yt import add_particle_filter
+from yt.data_objects.particle_filters import add_particle_filter
 
 from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor


https://bitbucket.org/yt_analysis/yt/commits/03f4bac1920d/
Changeset:   03f4bac1920d
User:        Corentin Cadiou
Date:        2017-11-30 19:04:20+00:00
Summary:     Moving magic particle filtering to `create_field_info` method
Affected #:  1 file

diff -r c768d7d7ad681be54776786f76676befae482b6f -r 03f4bac1920da36a79ea3149fb4b1ece13f5adde yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -690,7 +690,11 @@
 
         self.storage_filename = storage_filename
 
-        # Add particles filters
+
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        super(RAMSESDataset, self).create_field_info(*args, **kwa)
+        # Register particle filters
         if ('io', 'particle_family') in self.field_list:
             for fname, value in particle_families.items():
                 def loc(val):
@@ -702,14 +706,6 @@
                 add_particle_filter(fname, loc(value),
                                     filtered_type='io', requires=['particle_family'])
 
-
-    def create_field_info(self, *args, **kwa):
-        """Extend create_field_info to add the particles types."""
-        Dataset.create_field_info(self, *args, **kwa)
-        self._add_ptypes()
-
-    def _add_ptypes(self):
-        if ('io', 'particle_family') in self.field_list:
             for k in particle_families.keys():
                 mylog.info('Adding particle_type: %s' % k)
                 self.add_particle_filter('%s' % k)


https://bitbucket.org/yt_analysis/yt/commits/9df80be26eb1/
Changeset:   9df80be26eb1
User:        Corentin Cadiou
Date:        2017-11-30 22:34:37+00:00
Summary:     use one-to-one filter registry
Affected #:  2 files

diff -r 03f4bac1920da36a79ea3149fb4b1ece13f5adde -r 9df80be26eb1c960969f8daf23966ebac7814728 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -24,7 +24,7 @@
 from yt.utilities.exceptions import YTIllDefinedFilter
 
 # One to many mapping
-filter_registry = defaultdict(list)
+filter_registry = defaultdict(None)
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -131,7 +131,7 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    filter_registry[name].append(filter)
+    filter_registry[name] = filter
 
 
 def particle_filter(name=None, requires=None, filtered_type='all'):

diff -r 03f4bac1920da36a79ea3149fb4b1ece13f5adde -r 9df80be26eb1c960969f8daf23966ebac7814728 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,11 +663,10 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            for f in filter_registry[filter]:
-                used = self._setup_filtered_type(f)
-                if used:
-                    filter = f
-                    break
+            f = filter_registry[filter]
+            used = self._setup_filtered_type(f)
+            if used:
+                filter = f
         else:
             used = self._setup_filtered_type(filter)
         if not used:


https://bitbucket.org/yt_analysis/yt/commits/a93ae2cc8d72/
Changeset:   a93ae2cc8d72
User:        Corentin Cadiou
Date:        2017-11-30 22:34:51+00:00
Summary:     notify user when overriding a particle filter
Affected #:  1 file

diff -r 9df80be26eb1c960969f8daf23966ebac7814728 -r a93ae2cc8d72540f80d97fce0f8c0ce3e453a105 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -22,6 +22,7 @@
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import mylog
 
 # One to many mapping
 filter_registry = defaultdict(None)
@@ -131,6 +132,8 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
+    if filter_registry[name] is not None:
+        mylog.warning('The %s particle filter already exists. Overriding.' % name)
     filter_registry[name] = filter
 
 


https://bitbucket.org/yt_analysis/yt/commits/9f3a936a01b7/
Changeset:   9f3a936a01b7
User:        Corentin Cadiou
Date:        2017-11-30 23:00:14+00:00
Summary:     Use standard dict for filter registry
Affected #:  2 files

diff -r a93ae2cc8d72540f80d97fce0f8c0ce3e453a105 -r 9f3a936a01b778a6e7fc55af91a704afad983104 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -15,7 +15,6 @@
 #-----------------------------------------------------------------------------
 
 import copy
-from collections import defaultdict
 
 from contextlib import contextmanager
 
@@ -24,8 +23,8 @@
 from yt.utilities.exceptions import YTIllDefinedFilter
 from yt.funcs import mylog
 
-# One to many mapping
-filter_registry = defaultdict(None)
+# One to one mapping
+filter_registry = {}
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -132,7 +131,7 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    if filter_registry[name] is not None:
+    if filter_registry.get(name, None) is not None:
         mylog.warning('The %s particle filter already exists. Overriding.' % name)
     filter_registry[name] = filter
 

diff -r a93ae2cc8d72540f80d97fce0f8c0ce3e453a105 -r 9f3a936a01b778a6e7fc55af91a704afad983104 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,7 +663,9 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            f = filter_registry[filter]
+            f = filter_registry.get(filter, None)
+            if f is None:
+                return False
             used = self._setup_filtered_type(f)
             if used:
                 filter = f


https://bitbucket.org/yt_analysis/yt/commits/308756b1eead/
Changeset:   308756b1eead
User:        Corentin Cadiou
Date:        2017-11-30 23:00:31+00:00
Summary:     Test overriding of particle_filter
Affected #:  1 file

diff -r 9f3a936a01b778a6e7fc55af91a704afad983104 -r 308756b1eead2c44210af216a73ca400f9deadc5 yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -33,6 +33,51 @@
     ad['deposit', 'stars_cic']
     assert True
 
+
+def test_add_particle_filter_overriding():
+    """Test the add_particle_filter overriding"""
+    from yt.data_objects.particle_filters import filter_registry
+    from yt.funcs import mylog
+
+    def star_0(pfilter, data):
+        pass
+
+    def star_1(pfilter, data):
+        pass
+
+    # Use a closure to store whether the warning was called
+    def closure(status):
+        def warning_patch(*args, **kwargs):
+            print('I am called!')
+            status[0] = True
+
+        def was_called():
+            return status[0]
+
+        return warning_patch, was_called
+
+    ## Test 1: we add a dummy particle filter
+    add_particle_filter("dummy", function=star_0, filtered_type='all',
+                        requires=["creation_time"])
+    assert 'dummy' in filter_registry
+    assert_equal(filter_registry['dummy'].function, star_0)
+
+    ## Test 2: we add another dummy particle filter.
+    ##         a warning is expected. We use the above closure to
+    ##         check that.
+    # Store the original warning function
+    warning = mylog.warning
+    monkey_warning, monkey_patch_was_called = closure([False])
+    mylog.warning = monkey_warning
+    add_particle_filter("dummy", function=star_1, filtered_type='all',
+                        requires=["creation_time"])
+    assert_equal(filter_registry['dummy'].function, star_1)
+    assert_equal(monkey_patch_was_called(), True)
+
+    # Restore the original warning function
+    mylog.warning = warning
+
+
 @requires_file(iso_galaxy)
 def test_particle_filter():
     """Test the particle_filter decorator"""


https://bitbucket.org/yt_analysis/yt/commits/6545920cef31/
Changeset:   6545920cef31
User:        Corentin Cadiou
Date:        2017-12-03 13:05:12+00:00
Summary:     remove call stmt
Affected #:  1 file

diff -r 308756b1eead2c44210af216a73ca400f9deadc5 -r 6545920cef31ce77371f41de0a59b694efc54908 yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -48,7 +48,6 @@
     # Use a closure to store whether the warning was called
     def closure(status):
         def warning_patch(*args, **kwargs):
-            print('I am called!')
             status[0] = True
 
         def was_called():
@@ -106,7 +105,7 @@
 
     for grid in ds.index.grids[20:31]:
         cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
-        
+
         assert_equal(cg['stars', 'particle_ones'].shape[0],
                      grid['stars', 'particle_ones'].shape[0])
         assert_equal(cg['stars', 'particle_mass'].shape[0],


https://bitbucket.org/yt_analysis/yt/commits/730a5a2c4de1/
Changeset:   730a5a2c4de1
User:        Corentin Cadiou
Date:        2017-11-29 14:25:53+00:00
Summary:     Refactor particle handling in RAMSES
Affected #:  3 files

diff -r 6545920cef31ce77371f41de0a59b694efc54908 -r 730a5a2c4de144e67cf37884e35732727e164e25 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,12 +35,12 @@
     OctreeSubset
 from yt.data_objects.particle_filters import add_particle_filter
 
+from yt.utilities.physical_constants import mp, kb
 from .definitions import ramses_header, field_aliases, particle_families
-from .io import _read_part_file_descriptor
-from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
 from .hilbert import get_cpu_list
+from .particle_handlers import get_particle_handlers
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
@@ -65,13 +65,23 @@
         basename = "%s/%%s_%s.out%05i" % (
             basedir, num, domain_id)
         part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
-        for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
+        for t in ['grav', 'hydro', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)
         self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
-        self._read_particle_header()
-        self._read_sink_header()
+
+        # Autodetect particle files
+        particle_handlers = [PH(ds, domain_id)
+                             for PH in get_particle_handlers()
+                             if PH.any_exist(self.ds)]
+        self.particle_handlers = particle_handlers
+        for ph in particle_handlers:
+            mylog.info('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
+            ph.read_header()
+            self._add_ptype(ph.ptype)
+
+        # Load the AMR structure
         self._read_amr()
 
     _hydro_offset = None
@@ -88,20 +98,6 @@
         return os.path.exists(self.hydro_fn)
 
     @property
-    def _has_sink(self):
-        '''
-        Does the output include sinks (black holes)?
-        '''
-        return os.path.exists(self.sink_fn)
-
-    @property
-    def _has_part_descriptor(self):
-        '''
-        Does the output include particle file descriptor?
-        '''
-        return os.path.exists(self._part_file_descriptor)
-
-    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -161,146 +157,78 @@
         fpu.skip(f, 1)
         self.nvar = fpu.read_vector(f, "i")[0]
 
-
-    def _read_sink_header(self):
-        if not self._has_sink:
-            self.local_sink_count = 0
-            self.sink_field_offsets = {}
-            return
-        f = open(self.sink_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = (('nsink', 1, 'I'),
-                 ('nindsink', 1, 'I'))
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.sink_header = hvals
-        self.local_sink_count = hvals['nsink']
+    # def _read_particle_header(self):
+    #     if not os.path.exists(self.part_fn):
+    #         self.local_particle_count = 0
+    #         self.particle_field_offsets = {}
+    #         return
+    #     f = open(self.part_fn, "rb")
+    #     f.seek(0, os.SEEK_END)
+    #     flen = f.tell()
+    #     f.seek(0)
+    #     hvals = {}
+    #     attrs = ( ('ncpu', 1, 'I'),
+    #               ('ndim', 1, 'I'),
+    #               ('npart', 1, 'I') )
+    #     hvals.update(fpu.read_attrs(f, attrs))
+    #     fpu.read_vector(f, 'I')
 
-        sink_fields = [
-            ("particle_identifier", "i"),
-            ("particle_mass", "d"),
-            ("particle_position_x", "d"),
-            ("particle_position_y", "d"),
-            ("particle_position_z", "d"),
-            ("particle_velocity_x", "d"),
-            ("particle_velocity_y", "d"),
-            ("particle_velocity_z", "d"),
-            ("particle_age", "d"),
-            ("BH_real_accretion", "d"),
-            ("BH_bondi_accretion", "d"),
-            ("BH_eddington_accretion", "d"),
-            ("BH_esave", "d"),
-            ("gas_spin_x", "d"),
-            ("gas_spin_y", "d"),
-            ("gas_spin_z", "d"),
-            ("BH_spin_x", "d"),
-            ("BH_spin_y", "d"),
-            ("BH_spin_z", "d"),
-            ("BH_spin", "d"),
-            ("BH_efficiency", "d")]
+    #     attrs = ( ('nstar_tot', 1, 'I'),
+    #               ('mstar_tot', 1, 'd'),
+    #               ('mstar_lost', 1, 'd'),
+    #               ('nsink', 1, 'I') )
+    #     hvals.update(fpu.read_attrs(f, attrs))
+    #     self.particle_header = hvals
+    #     self.local_particle_count = hvals['npart']
 
-        for i in range(self.ds.dimensionality*2+1):
-            for j in range(self.ds.max_level, self.ds.min_level):
-                sink_fields.append((
-                    "particle_prop_%s_%s" % (i, j), "d"
-                ))
-
-        field_offsets = {}
-        _pfields = {}
-        for field, vtype in sink_fields:
-            if f.tell() >= flen: break
-            field_offsets["sink", field] = f.tell()
-            _pfields["sink", field] = vtype
-            fpu.skip(f, 1)
-        self.sink_field_offsets = field_offsets
-        self.sink_field_types = _pfields
-
-        self._add_ptype('sink')
-
-
-    def _read_particle_header(self):
-        if not os.path.exists(self.part_fn):
-            self.local_particle_count = 0
-            self.particle_field_offsets = {}
-            return
+    #     particle_fields = [
+    #             ("particle_position_x", "d"),
+    #             ("particle_position_y", "d"),
+    #             ("particle_position_z", "d"),
+    #             ("particle_velocity_x", "d"),
+    #             ("particle_velocity_y", "d"),
+    #             ("particle_velocity_z", "d"),
+    #             ("particle_mass", "d"),
+    #             ("particle_identifier", "i"),
+    #             ("particle_refinement_level", "I")]
 
-        f = open(self.part_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = ( ('ncpu', 1, 'I'),
-                  ('ndim', 1, 'I'),
-                  ('npart', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        fpu.read_vector(f, 'I')
+    #     if self.ds._extra_particle_fields is not None:
+    #         particle_fields += self.ds._extra_particle_fields
+
+    #     field_offsets = {}
+    #     _pfields = {}
 
-        attrs = ( ('nstar_tot', 1, 'I'),
-                  ('mstar_tot', 1, 'd'),
-                  ('mstar_lost', 1, 'd'),
-                  ('nsink', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.particle_header = hvals
-        self.local_particle_count = hvals['npart']
+    #     ptype = 'io'
 
-        # Try reading particle file descriptor
-        if self._has_part_descriptor:
-            particle_fields = (
-                _read_part_file_descriptor(self._part_file_descriptor))
-            ptype = 'io'
-        else:
-            particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "i"),
-                ("particle_refinement_level", "I")]
+    #     # Read offsets
+    #     for field, vtype in particle_fields:
+    #         if f.tell() >= flen: break
+    #         field_offsets[ptype, field] = f.tell()
+    #         _pfields[ptype, field] = vtype
+    #         fpu.skip(f, 1)
 
-            if self.ds._extra_particle_fields is not None:
-                particle_fields += self.ds._extra_particle_fields
-
-            ptype = 'io'
-
+    #     iextra = 0
+    #     while f.tell() < flen:
+    #         iextra += 1
+    #         field, vtype = ('particle_extra_field_%i' % iextra, 'd')
+    #         particle_fields.append((field, vtype))
 
-        field_offsets = {}
-        _pfields = {}
-
-
-        # Read offsets
-        for field, vtype in particle_fields:
-            if f.tell() >= flen: break
-            field_offsets[ptype, field] = f.tell()
-            _pfields[ptype, field] = vtype
-            fpu.skip(f, 1)
+    #         field_offsets[ptype, field] = f.tell()
+    #         _pfields[ptype, field] = vtype
+    #         fpu.skip(f, 1)
 
-        iextra = 0
-        while f.tell() < flen:
-            iextra += 1
-            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
-            particle_fields.append((field, vtype))
-
-            field_offsets[ptype, field] = f.tell()
-            _pfields[ptype, field] = vtype
-            fpu.skip(f, 1)
+    #     if iextra > 0 and not self.ds._warn_extra_fields:
+    #         self.ds._warn_extra_fields = True
+    #         w = ("Detected %s extra particle fields assuming kind "
+    #              "`double`. Consider using the `extra_particle_fields` "
+    #              "keyword argument if you have unexpected behavior.")
+    #         mylog.warning(w % iextra)
 
-        if iextra > 0 and not self.ds._warn_extra_fields:
-            self.ds._warn_extra_fields = True
-            w = ("Detected %s extra particle fields assuming kind "
-                 "`double`. Consider using the `extra_particle_fields` "
-                 "keyword argument if you have unexpected behavior.")
-            mylog.warning(w % iextra)
+    #     self.particle_field_offsets = field_offsets
+    #     self.particle_field_types = _pfields
 
-        self.particle_field_offsets = field_offsets
-        self.particle_field_types = _pfields
-
-        # Register the particle type
-        self._add_ptype(ptype)
+    #     # Register the particle type
+    #     self._add_ptype(ptype)
 
     def _read_amr_header(self):
         hvals = {}
@@ -491,8 +419,8 @@
             self._setup_auto_fields()
 
         for domain in self.domains:
-            dsl.update(set(domain.particle_field_offsets.keys()))
-            dsl.update(set(domain.sink_field_offsets.keys()))
+            for ph in domain.particle_handlers:
+                dsl.update(set(ph.field_offsets.keys()))
 
         self.particle_field_list = list(dsl)
         self.field_list = [("ramses", f) for f in self.fluid_field_list] \
@@ -839,17 +767,13 @@
 
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
-        # Check for the presence of sink files
-        sink_files = os.path.join(
-            os.path.split(self.parameter_filename)[0],
-            'sink_?????.out?????')
-        has_sink = len(glob.glob(sink_files))
+        # Add the particle types
+        ptypes = []
+        for PH in get_particle_handlers():
+            if PH.any_exist(self):
+                ptypes.append(PH.ptype)
 
-        if has_sink:
-            ptypes = ('io', 'sink')
-        else:
-            ptypes = ('io', )
-
+        ptypes = tuple(ptypes)
         self.particle_types = self.particle_types_raw = ptypes
 
 

diff -r 6545920cef31ce77371f41de0a59b694efc54908 -r 730a5a2c4de144e67cf37884e35732727e164e25 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -147,18 +147,15 @@
             # Select relevant fiels
             subs_fields = filter(lambda f: f[0] == ptype, fields)
 
-            if ptype == 'io':
-                fname = subset.domain.part_fn
-                foffsets = subset.domain.particle_field_offsets
-                data_types = subset.domain.particle_field_types
-
-            elif ptype == 'sink':
-                fname = subset.domain.sink_fn
-                foffsets = subset.domain.sink_field_offsets
-                data_types = subset.domain.sink_field_types
-
-            else:
-                # Raise here an exception
+            ok = False
+            for ph in subset.domain.particle_handlers:
+                if ph.ptype == ptype:
+                    fname = ph.fname
+                    foffsets = ph.field_offsets
+                    data_types = ph.field_types
+                    ok = True
+                    break
+            if not ok:
                 raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(

diff -r 6545920cef31ce77371f41de0a59b694efc54908 -r 730a5a2c4de144e67cf37884e35732727e164e25 yt/frontends/ramses/particle_handlers.py
--- /dev/null
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -0,0 +1,239 @@
+import os
+import yt.utilities.fortran_utils as fpu
+import glob
+from yt.funcs import mylog
+
+from .io import _read_part_file_descriptor
+
+PARTICLE_HANDLERS = []
+
+def get_particle_handlers():
+    return PARTICLE_HANDLERS
+
+def register_particle_handler(ph):
+    PARTICLE_HANDLERS.append(ph)
+
+
+class ParticleFileHandler(object):
+    '''Abstract class to handle particles in RAMSES.
+
+
+    See `SinkParticleFileHandler` for an example implementation.'''
+
+    ptype = None  # The name to give to the particle type
+    fname = None  # The name of the file(s).
+
+    attrs = None  # The attributes of the header
+    known_fields = None  # A list of tuple containing the field name and its type
+
+    @classmethod
+    def any_exist(cls, ds):
+        '''Return True if any file of this type is found
+
+        Arguments
+        ---------
+        * ds: a Ramses Dataset
+        '''
+        raise NotImplementedError
+
+
+    def __init__(self, ds, domain_id):
+        self.ds = ds
+        basename = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
+        iout = int(
+            os.path.basename(ds.parameter_filename)
+            .split(".")[0].
+            split("_")[1])
+        icpu = domain_id
+        self.fname = os.path.join(
+            basename,
+            self.fname.format(iout=iout, icpu=icpu))
+
+    @property
+    def exists(self):
+        '''Return True if the fname exists'''
+        return os.path.exists(self.fname)
+
+    def read_header(self):
+        '''
+        This function should read the header, compute the offsets of
+        the file into self.offsets and store the fields found in
+        self.fields.'''
+        raise NotImplementedError
+
+class DefaultParticleFileHandler(ParticleFileHandler):
+    ptype = 'io'
+    fname = 'part_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'part_file_descriptor.txt'
+
+    attrs = ( ('ncpu', 1, 'I'),
+              ('ndim', 1, 'I'),
+              ('npart', 1, 'I') )
+
+    known_fields = [
+        ("particle_position_x", "d"),
+        ("particle_position_y", "d"),
+        ("particle_position_z", "d"),
+        ("particle_velocity_x", "d"),
+        ("particle_velocity_y", "d"),
+        ("particle_velocity_z", "d"),
+        ("particle_mass", "d"),
+        ("particle_identifier", "i"),
+        ("particle_refinement_level", "I")]
+
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'part_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        return ret
+
+    def read_header(self):
+        f = open(self.fname, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        attrs = ( ('ncpu', 1, 'I'),
+                  ('ndim', 1, 'I'),
+                  ('npart', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        fpu.read_vector(f, 'I')
+
+        attrs = ( ('nstar_tot', 1, 'I'),
+                  ('mstar_tot', 1, 'd'),
+                  ('mstar_lost', 1, 'd'),
+                  ('nsink', 1, 'I') )
+        hvals.update(fpu.read_attrs(f, attrs))
+        self.header = hvals
+        self.local_particle_count = hvals['npart']
+
+        if self.has_part_descriptor:
+            particle_fields = (
+                _read_part_file_descriptor(self.file_descriptor)
+            )
+        else:
+            particle_fields = self.known_fields.copy()
+
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
+
+        field_offsets = {}
+        _pfields = {}
+
+        ptype = self.ptype
+
+        # Read offsets
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets[ptype, field] = f.tell()
+            _pfields[ptype, field] = vtype
+            fpu.skip(f, 1)
+
+        iextra = 0
+        while f.tell() < flen:
+            iextra += 1
+            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
+            particle_fields.append((field, vtype))
+
+            field_offsets[ptype, field] = f.tell()
+            _pfields[ptype, field] = vtype
+            fpu.skip(f, 1)
+
+        if iextra > 0 and not self.ds._warn_extra_fields:
+            self.ds._warn_extra_fields = True
+            w = ("Detected %s extra particle fields assuming kind "
+                 "`double`. Consider using the `extra_particle_fields` "
+                 "keyword argument if you have unexpected behavior.")
+            mylog.warning(w % iextra)
+
+        self.field_offsets = field_offsets
+        self.field_types = _pfields
+
+    @property
+    def has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self.file_descriptor)
+
+
+
+class SinkParticleFileHandler(ParticleFileHandler):
+    '''Handle sink files'''
+    ptype = 'sink'
+    fname = 'sink_{iout:05d}.out{icpu:05d}'
+
+    attrs = (('nsink', 1, 'I'),
+             ('nindsink', 1, 'I'))
+
+    known_fields = [
+        ("particle_identifier", "i"),
+        ("particle_mass", "d"),
+        ("particle_position_x", "d"),
+        ("particle_position_y", "d"),
+        ("particle_position_z", "d"),
+        ("particle_velocity_x", "d"),
+        ("particle_velocity_y", "d"),
+        ("particle_velocity_z", "d"),
+        ("particle_age", "d"),
+        ("BH_real_accretion", "d"),
+        ("BH_bondi_accretion", "d"),
+        ("BH_eddington_accretion", "d"),
+        ("BH_esave", "d"),
+        ("gas_spin_x", "d"),
+        ("gas_spin_y", "d"),
+        ("gas_spin_z", "d"),
+        ("BH_spin_x", "d"),
+        ("BH_spin_y", "d"),
+        ("BH_spin_z", "d"),
+        ("BH_spin", "d"),
+        ("BH_efficiency", "d")]
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'sink_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        return ret
+
+    def read_header(self):
+        f = open(self.fname, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        # Read the header of the file
+        attrs = self.attrs
+        hvals.update(fpu.read_attrs(f, attrs))
+        self._header = hvals
+        self._sink_count = hvals['nsink']
+
+        # Read the fields + add the sink properties
+        fields = self.known_fields.copy()
+        for i in range(self.ds.dimensionality*2+1):
+            for j in range(self.ds.max_level, self.ds.min_level):
+                fields.append((
+                    "particle_prop_%s_%s" % (i, j), "d"
+                ))
+
+        field_offsets = {}
+        _pfields = {}
+
+        # Fill the fields, offsets and types
+        self.fields = []
+        for field, vtype in fields:
+            self.fields.append(field)
+            if f.tell() >= flen: break
+            field_offsets[self.ptype, field] = f.tell()
+            _pfields[self.ptype, field] = vtype
+            fpu.skip(f, 1)
+        self.field_offsets = field_offsets
+        self.field_types = _pfields
+
+register_particle_handler(DefaultParticleFileHandler)
+register_particle_handler(SinkParticleFileHandler)


https://bitbucket.org/yt_analysis/yt/commits/96b8b1f6f5c8/
Changeset:   96b8b1f6f5c8
User:        Corentin Cadiou
Date:        2017-11-29 15:43:29+00:00
Summary:     remove comments
Affected #:  1 file

diff -r 730a5a2c4de144e67cf37884e35732727e164e25 -r 96b8b1f6f5c802f9ae165a7aeeca8ff623b5ce76 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -157,79 +157,6 @@
         fpu.skip(f, 1)
         self.nvar = fpu.read_vector(f, "i")[0]
 
-    # def _read_particle_header(self):
-    #     if not os.path.exists(self.part_fn):
-    #         self.local_particle_count = 0
-    #         self.particle_field_offsets = {}
-    #         return
-    #     f = open(self.part_fn, "rb")
-    #     f.seek(0, os.SEEK_END)
-    #     flen = f.tell()
-    #     f.seek(0)
-    #     hvals = {}
-    #     attrs = ( ('ncpu', 1, 'I'),
-    #               ('ndim', 1, 'I'),
-    #               ('npart', 1, 'I') )
-    #     hvals.update(fpu.read_attrs(f, attrs))
-    #     fpu.read_vector(f, 'I')
-
-    #     attrs = ( ('nstar_tot', 1, 'I'),
-    #               ('mstar_tot', 1, 'd'),
-    #               ('mstar_lost', 1, 'd'),
-    #               ('nsink', 1, 'I') )
-    #     hvals.update(fpu.read_attrs(f, attrs))
-    #     self.particle_header = hvals
-    #     self.local_particle_count = hvals['npart']
-
-    #     particle_fields = [
-    #             ("particle_position_x", "d"),
-    #             ("particle_position_y", "d"),
-    #             ("particle_position_z", "d"),
-    #             ("particle_velocity_x", "d"),
-    #             ("particle_velocity_y", "d"),
-    #             ("particle_velocity_z", "d"),
-    #             ("particle_mass", "d"),
-    #             ("particle_identifier", "i"),
-    #             ("particle_refinement_level", "I")]
-
-    #     if self.ds._extra_particle_fields is not None:
-    #         particle_fields += self.ds._extra_particle_fields
-
-    #     field_offsets = {}
-    #     _pfields = {}
-
-    #     ptype = 'io'
-
-    #     # Read offsets
-    #     for field, vtype in particle_fields:
-    #         if f.tell() >= flen: break
-    #         field_offsets[ptype, field] = f.tell()
-    #         _pfields[ptype, field] = vtype
-    #         fpu.skip(f, 1)
-
-    #     iextra = 0
-    #     while f.tell() < flen:
-    #         iextra += 1
-    #         field, vtype = ('particle_extra_field_%i' % iextra, 'd')
-    #         particle_fields.append((field, vtype))
-
-    #         field_offsets[ptype, field] = f.tell()
-    #         _pfields[ptype, field] = vtype
-    #         fpu.skip(f, 1)
-
-    #     if iextra > 0 and not self.ds._warn_extra_fields:
-    #         self.ds._warn_extra_fields = True
-    #         w = ("Detected %s extra particle fields assuming kind "
-    #              "`double`. Consider using the `extra_particle_fields` "
-    #              "keyword argument if you have unexpected behavior.")
-    #         mylog.warning(w % iextra)
-
-    #     self.particle_field_offsets = field_offsets
-    #     self.particle_field_types = _pfields
-
-    #     # Register the particle type
-    #     self._add_ptype(ptype)
-
     def _read_amr_header(self):
         hvals = {}
         f = open(self.amr_fn, "rb")


https://bitbucket.org/yt_analysis/yt/commits/719557482f26/
Changeset:   719557482f26
User:        Corentin Cadiou
Date:        2017-11-29 16:22:53+00:00
Summary:     use meta class for registering
Affected #:  1 file

diff -r 96b8b1f6f5c802f9ae165a7aeeca8ff623b5ce76 -r 719557482f26dbf88c09d91bc4859a298fa73f44 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -1,17 +1,29 @@
 import os
 import yt.utilities.fortran_utils as fpu
 import glob
+from yt.extern.six import add_metaclass
 from yt.funcs import mylog
 
 from .io import _read_part_file_descriptor
 
-PARTICLE_HANDLERS = []
+PARTICLE_HANDLERS = set()
 
 def get_particle_handlers():
     return PARTICLE_HANDLERS
 
 def register_particle_handler(ph):
-    PARTICLE_HANDLERS.append(ph)
+    PARTICLE_HANDLERS.add(ph)
+
+
+class RAMSESParticleFileHandlerRegister(type):
+    """
+    This is a base class that on instantiation registers the file
+    handler into the list. Used as a metaclass.
+    """
+    def __new__(meta, name, bases, class_dict):
+        cls = type.__new__(meta, name, bases, class_dict)
+        register_particle_handler(cls)
+        return cls
 
 
 class ParticleFileHandler(object):
@@ -62,6 +74,8 @@
         self.fields.'''
         raise NotImplementedError
 
+
+ at add_metaclass(RAMSESParticleFileHandlerRegister)
 class DefaultParticleFileHandler(ParticleFileHandler):
     ptype = 'io'
     fname = 'part_{iout:05d}.out{icpu:05d}'
@@ -162,6 +176,7 @@
 
 
 
+ at add_metaclass(RAMSESParticleFileHandlerRegister)
 class SinkParticleFileHandler(ParticleFileHandler):
     '''Handle sink files'''
     ptype = 'sink'
@@ -234,6 +249,3 @@
             fpu.skip(f, 1)
         self.field_offsets = field_offsets
         self.field_types = _pfields
-
-register_particle_handler(DefaultParticleFileHandler)
-register_particle_handler(SinkParticleFileHandler)


https://bitbucket.org/yt_analysis/yt/commits/47a432954ca2/
Changeset:   47a432954ca2
User:        Corentin Cadiou
Date:        2017-11-29 16:31:40+00:00
Summary:     define header on class level
Affected #:  1 file

diff -r 719557482f26dbf88c09d91bc4859a298fa73f44 -r 47a432954ca2a612a67fd2d328cd267c1ac6cb07 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -83,7 +83,12 @@
 
     attrs = ( ('ncpu', 1, 'I'),
               ('ndim', 1, 'I'),
-              ('npart', 1, 'I') )
+              ('npart', 1, 'I'),
+              ('localseed', 4, 'I'),
+              ('nstar_tot', 1, 'I'),
+              ('mstar_tot', 1, 'd'),
+              ('mstar_lost', 1, 'd'),
+              ('nsink', 1, 'I') )
 
     known_fields = [
         ("particle_position_x", "d"),
@@ -111,16 +116,7 @@
         flen = f.tell()
         f.seek(0)
         hvals = {}
-        attrs = ( ('ncpu', 1, 'I'),
-                  ('ndim', 1, 'I'),
-                  ('npart', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        fpu.read_vector(f, 'I')
-
-        attrs = ( ('nstar_tot', 1, 'I'),
-                  ('mstar_tot', 1, 'd'),
-                  ('mstar_lost', 1, 'd'),
-                  ('nsink', 1, 'I') )
+        attrs = self.attrs
         hvals.update(fpu.read_attrs(f, attrs))
         self.header = hvals
         self.local_particle_count = hvals['npart']


https://bitbucket.org/yt_analysis/yt/commits/d702bd33142d/
Changeset:   d702bd33142d
User:        Corentin Cadiou
Date:        2017-11-29 16:35:36+00:00
Summary:     register on base class
Affected #:  1 file

diff -r 47a432954ca2a612a67fd2d328cd267c1ac6cb07 -r d702bd33142d8ba699962649254762404b1ed38e yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -22,14 +22,15 @@
     """
     def __new__(meta, name, bases, class_dict):
         cls = type.__new__(meta, name, bases, class_dict)
-        register_particle_handler(cls)
+        if cls.ptype is not None:
+            register_particle_handler(cls)
         return cls
 
 
+ at add_metaclass(RAMSESParticleFileHandlerRegister)
 class ParticleFileHandler(object):
     '''Abstract class to handle particles in RAMSES.
 
-
     See `SinkParticleFileHandler` for an example implementation.'''
 
     ptype = None  # The name to give to the particle type
@@ -75,7 +76,6 @@
         raise NotImplementedError
 
 
- at add_metaclass(RAMSESParticleFileHandlerRegister)
 class DefaultParticleFileHandler(ParticleFileHandler):
     ptype = 'io'
     fname = 'part_{iout:05d}.out{icpu:05d}'
@@ -172,7 +172,6 @@
 
 
 
- at add_metaclass(RAMSESParticleFileHandlerRegister)
 class SinkParticleFileHandler(ParticleFileHandler):
     '''Handle sink files'''
     ptype = 'sink'


https://bitbucket.org/yt_analysis/yt/commits/e7930e75a62e/
Changeset:   e7930e75a62e
User:        Corentin Cadiou
Date:        2017-11-30 13:10:21+00:00
Summary:     Update baseclass doc
Affected #:  1 file

diff -r d702bd33142d8ba699962649254762404b1ed38e -r e7930e75a62eedf607b941cdafdf16b5eccc676e yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -29,7 +29,12 @@
 
 @add_metaclass(RAMSESParticleFileHandlerRegister)
 class ParticleFileHandler(object):
-    '''Abstract class to handle particles in RAMSES.
+    '''
+    Abstract class to handle particles in RAMSES. Each instance
+    represents a single file (one domain).
+
+    To add support to a new particle file, inherit from this class and
+    implement all functions containing a `NotImplementedError`.
 
     See `SinkParticleFileHandler` for an example implementation.'''
 
@@ -39,18 +44,16 @@
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
 
-    @classmethod
-    def any_exist(cls, ds):
-        '''Return True if any file of this type is found
-
-        Arguments
-        ---------
-        * ds: a Ramses Dataset
-        '''
-        raise NotImplementedError
-
 
     def __init__(self, ds, domain_id):
+        '''
+        Initalize an instance of the class. This automatically sets
+        the full path to the file. This is not intended to be
+        overriden in most cases.
+
+        If you need more flexibility, rewrite this function to your
+        need in the inherited class.
+        '''
         self.ds = ds
         basename = os.path.abspath(
               os.path.dirname(ds.parameter_filename))
@@ -65,14 +68,51 @@
 
     @property
     def exists(self):
-        '''Return True if the fname exists'''
+        '''
+        This function should return True if the *file* the instance
+        exists. It is called for each file of the type found on the
+        disk.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex behavior.
+        '''
         return os.path.exists(self.fname)
 
+    @classmethod
+    def any_exist(cls, ds):
+        '''
+        This function should return True if the kind of particle
+        represented by the class exists in the dataset. It takes as
+        argument the class itself —not an instance— and a dataset.
+
+        Arguments
+        ---------
+        * ds: a Ramses Dataset
+
+        Note
+        ----
+        This function is usually called once at the initialization of
+        the RAMSES Dataset structure to determine if the particle type
+        (e.g. regular particles) exists.
+        '''
+        raise NotImplementedError
+
+
     def read_header(self):
         '''
-        This function should read the header, compute the offsets of
-        the file into self.offsets and store the fields found in
-        self.fields.'''
+        This function is called once per file. It should:
+        * read the header of the file and store any relevant information
+        * detect the fields in the file
+        * compute the offsets (location in the file) of each field
+
+        It is in charge of setting `self.field_offsets` and `self.field_types`.
+        * `field_offsets`: dictionary: tuple -> integer
+           A dictionary that maps `(type, field_name)` to their
+           location in the file (integer)
+        * `field_types`: dictionary: tuple -> character
+           A dictionary that maps `(type, field_name)` to their type
+           (character), following Python's struct convention.
+z        '''
         raise NotImplementedError
 
 


https://bitbucket.org/yt_analysis/yt/commits/631166cdb146/
Changeset:   631166cdb146
User:        Corentin Cadiou
Date:        2017-11-30 15:49:11+00:00
Summary:     more doc
Affected #:  1 file

diff -r e7930e75a62eedf607b941cdafdf16b5eccc676e -r 631166cdb146767e3de79af9b703b8dff3e175c2 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -38,12 +38,16 @@
 
     See `SinkParticleFileHandler` for an example implementation.'''
 
+    # These properties are static properties
     ptype = None  # The name to give to the particle type
     fname = None  # The name of the file(s).
-
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
 
+    # These properties are computed dynamically
+    field_offsets = None     # Mapping from field to offset in file
+    field_types = None       # Mapping from field to the type of the data (float, integer, …)
+    local_particle_count = None  # The number of particle in the domain
 
     def __init__(self, ds, domain_id):
         '''
@@ -55,6 +59,7 @@
         need in the inherited class.
         '''
         self.ds = ds
+        self.domain_id = domain_id
         basename = os.path.abspath(
               os.path.dirname(ds.parameter_filename))
         iout = int(
@@ -62,6 +67,7 @@
             .split(".")[0].
             split("_")[1])
         icpu = domain_id
+
         self.fname = os.path.join(
             basename,
             self.fname.format(iout=iout, icpu=icpu))
@@ -74,7 +80,7 @@
         disk.
 
         By default, it just returns whether the file exists. Override
-        it for more complex behavior.
+        it for more complex cases.
         '''
         return os.path.exists(self.fname)
 
@@ -112,7 +118,7 @@
         * `field_types`: dictionary: tuple -> character
            A dictionary that maps `(type, field_name)` to their type
            (character), following Python's struct convention.
-z        '''
+        '''
         raise NotImplementedError
 
 


https://bitbucket.org/yt_analysis/yt/commits/e7975286d272/
Changeset:   e7975286d272
User:        Corentin Cadiou
Date:        2017-11-30 15:52:59+00:00
Summary:     count number of particles of each type
Affected #:  2 files

diff -r 631166cdb146767e3de79af9b703b8dff3e175c2 -r e7975286d272597b6d98818ff8dcb51448a2715e yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -470,10 +470,15 @@
 
     def _get_particle_type_counts(self):
         npart = 0
+        npart = {k: 0 for k in self.ds.particle_types}
+        do_all = 'all' in self.ds.particle_types
         for dom in self.domains:
-            npart += dom.local_particle_count
+            for fh in dom.particle_handlers:
+                count = fh.local_particle_count
+                npart[fh.ptype] += count
+                if do_all: npart['all'] += count
 
-        return {'io': npart}
+        return npart
 
     def print_stats(self):
 

diff -r 631166cdb146767e3de79af9b703b8dff3e175c2 -r e7975286d272597b6d98818ff8dcb51448a2715e yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -157,6 +157,11 @@
         return ret
 
     def read_header(self):
+        if not self.exists:
+            self.field_offsets = {}
+            self.field_types = {}
+            self.local_particle_count = 0
+            return
         f = open(self.fname, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -258,6 +263,11 @@
         return ret
 
     def read_header(self):
+        if not self.exists:
+            self.field_offsets = {}
+            self.field_types = {}
+            self.local_particle_count = 0
+            return
         f = open(self.fname, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -265,9 +275,10 @@
         hvals = {}
         # Read the header of the file
         attrs = self.attrs
+
         hvals.update(fpu.read_attrs(f, attrs))
         self._header = hvals
-        self._sink_count = hvals['nsink']
+        self.local_particle_count = hvals['nsink']
 
         # Read the fields + add the sink properties
         fields = self.known_fields.copy()


https://bitbucket.org/yt_analysis/yt/commits/2b07bc8c5213/
Changeset:   2b07bc8c5213
User:        Corentin Cadiou
Date:        2017-11-30 15:56:27+00:00
Summary:     also check particle counts
Affected #:  1 file

diff -r e7975286d272597b6d98818ff8dcb51448a2715e -r 2b07bc8c52131c8e40eb5bda0952bc0e4f47a338 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -171,7 +171,6 @@
 
 ramses_sink = "ramses_sink_00016/output_00016/info_00016.txt"
 @requires_file(ramses_sink)
- at requires_file(ramsesNonCosmo)
 def test_ramses_sink():
     expected_fields = ["BH_bondi_accretion", "BH_eddington_accretion",
                        "BH_efficiency", "BH_esave",
@@ -205,7 +204,6 @@
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
 
-
 ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
 @requires_file(ramses_new_format)
 def test_new_format():
@@ -234,3 +232,12 @@
     assert(all(ad['star', 'particle_family'] == 2))
     assert(all(ad['star', 'particle_tag'] == 0))
     assert(len(ad['star', 'particle_tag']) == 600)
+
+ at requires_file(ramses_sink)
+def test_ramses_part_count():
+    ds = yt.load(ramses_sink)
+    pcount = ds.particle_type_counts
+
+    assert_equal(pcount['all'], 17140, err_msg='Got wrong number of particle')
+    assert_equal(pcount['io'], 17132, err_msg='Got wrong number of io particle')
+    assert_equal(pcount['sink'], 8, err_msg='Got wrong number of sink particle')


https://bitbucket.org/yt_analysis/yt/commits/ca552d4c2a77/
Changeset:   ca552d4c2a77
User:        Corentin Cadiou
Date:        2017-11-30 16:35:18+00:00
Summary:     skip reading for files containing no information
Affected #:  1 file

diff -r 2b07bc8c52131c8e40eb5bda0952bc0e4f47a338 -r ca552d4c2a777b0258722c549407af14f3553a7d yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -34,7 +34,7 @@
     from cStringIO import StringIO as IO
 
 def _ramses_particle_file_handler(fname, foffsets, data_types,
-                                  subset, fields):
+                                  subset, fields, count):
     '''General file handler, called by _read_particle_subset
 
     Parameters
@@ -44,11 +44,13 @@
     foffsets: dict
         Offsets in file of the fields
     data_types: dict
-         Data type of the fields
+        Data type of the fields
     subset: ``RAMSESDomainSubset``
-         A RAMSES domain subset object
+        A RAMSES domain subset object
     fields: list of tuple
-         The fields to read
+        The fields to read
+    count: integer
+        The number of elements to count
     '''
     tr = {}
     with open(fname, "rb") as f:
@@ -56,6 +58,9 @@
         # This means that no other conversions need to be applied to convert
         # positions into the same domain as the octs themselves.
         for field in sorted(fields, key=lambda a: foffsets[a]):
+            if count == 0:
+                tr[field] = np.empty(0, dtype=data_types[field])
+                continue
             f.seek(foffsets[field])
             dt = data_types[field]
             tr[field] = fpu.read_vector(f, dt)
@@ -154,12 +159,15 @@
                     foffsets = ph.field_offsets
                     data_types = ph.field_types
                     ok = True
+                    count = ph.local_particle_count
                     break
             if not ok:
                 raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
-                fname, foffsets, data_types, subset, subs_fields))
+                fname, foffsets, data_types, subset, subs_fields,
+                count=count
+            ))
 
         return tr
 


https://bitbucket.org/yt_analysis/yt/commits/c442a38e360a/
Changeset:   c442a38e360a
User:        Corentin Cadiou
Date:        2017-11-30 16:35:33+00:00
Summary:     Read only once sinks

In RAMSES, all sinks are dumped by each CPUs. This results in a lot of
data duplication. One of the result was that sinks were loaded ncpu
times by yt. Using this trick, only one sink domain will be red.
Affected #:  1 file

diff -r ca552d4c2a777b0258722c549407af14f3553a7d -r c442a38e360acdc9ef3cd7fa35a3c4f3fd523801 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -278,7 +278,16 @@
 
         hvals.update(fpu.read_attrs(f, attrs))
         self._header = hvals
-        self.local_particle_count = hvals['nsink']
+
+        # This is somehow a trick here: we only want one domain to
+        # be read, as ramses writes all the sinks in all the
+        # domains. Here, we set the local_particle_count to 0 except
+        # for the first domain to be red.
+        if getattr(self.ds, '_sink_file_flag', False):
+            self.local_particle_count = 0
+        else:
+            self.ds._sink_file_flag = True
+            self.local_particle_count = hvals['nsink']
 
         # Read the fields + add the sink properties
         fields = self.known_fields.copy()


https://bitbucket.org/yt_analysis/yt/commits/018386999add/
Changeset:   018386999add
User:        Corentin Cadiou
Date:        2017-11-30 16:39:46+00:00
Summary:     use debug instead of info
Affected #:  1 file

diff -r c442a38e360acdc9ef3cd7fa35a3c4f3fd523801 -r 018386999add31f8b6fbdf3e716d403c8fa031a9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -77,7 +77,7 @@
                              if PH.any_exist(self.ds)]
         self.particle_handlers = particle_handlers
         for ph in particle_handlers:
-            mylog.info('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
+            mylog.debug('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
             ph.read_header()
             self._add_ptype(ph.ptype)
 


https://bitbucket.org/yt_analysis/yt/commits/05cc24839d5e/
Changeset:   05cc24839d5e
User:        Corentin Cadiou
Date:        2017-11-30 23:35:33+00:00
Summary:     Make the file descriptor great again
Affected #:  1 file

diff -r 018386999add31f8b6fbdf3e716d403c8fa031a9 -r 05cc24839d5e8859f3f036c47462859b4eab07fa yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -41,6 +41,8 @@
     # These properties are static properties
     ptype = None  # The name to give to the particle type
     fname = None  # The name of the file(s).
+    file_descriptor = None # The name of the file descriptor (if any)
+
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
 
@@ -72,6 +74,11 @@
             basename,
             self.fname.format(iout=iout, icpu=icpu))
 
+        if self.file_descriptor is not None:
+            self.file_descriptor = os.path.join(
+                basename,
+                self.file_descriptor)
+
     @property
     def exists(self):
         '''


https://bitbucket.org/yt_analysis/yt/commits/2a1b88b81830/
Changeset:   2a1b88b81830
User:        Corentin Cadiou
Date:        2017-11-30 23:40:17+00:00
Summary:     generic file descriptor
Affected #:  1 file

diff -r 05cc24839d5e8859f3f036c47462859b4eab07fa -r 2a1b88b818304b23803316dc926dbb36247f3b3b yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -91,6 +91,19 @@
         '''
         return os.path.exists(self.fname)
 
+    @property
+    def has_part_descriptor(self):
+        '''
+        This function should return True if a *file descriptor*
+        exists.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.file_descriptor)
+
+
+
     @classmethod
     def any_exist(cls, ds):
         '''
@@ -221,19 +234,12 @@
         self.field_offsets = field_offsets
         self.field_types = _pfields
 
-    @property
-    def has_part_descriptor(self):
-        '''
-        Does the output include particle file descriptor?
-        '''
-        return os.path.exists(self.file_descriptor)
-
-
 
 class SinkParticleFileHandler(ParticleFileHandler):
     '''Handle sink files'''
     ptype = 'sink'
     fname = 'sink_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'sink_file_descriptor.txt'
 
     attrs = (('nsink', 1, 'I'),
              ('nindsink', 1, 'I'))
@@ -297,6 +303,13 @@
             self.local_particle_count = hvals['nsink']
 
         # Read the fields + add the sink properties
+        if self.has_part_descriptor:
+            fields = (
+                _read_part_file_descriptor(self.file_descriptor)
+            )
+        else:
+            fields = self.known_fields.copy()
+
         fields = self.known_fields.copy()
         for i in range(self.ds.dimensionality*2+1):
             for j in range(self.ds.max_level, self.ds.min_level):


https://bitbucket.org/yt_analysis/yt/commits/04244a443ab1/
Changeset:   04244a443ab1
User:        Corentin Cadiou
Date:        2017-12-05 15:41:52+00:00
Summary:     Merge branch 'master' into refactoring
Affected #:  4 files

diff -r 2a1b88b818304b23803316dc926dbb36247f3b3b -r 04244a443ab131eaf9ce0cf8b2e851324f7a9b0f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2004,8 +2004,8 @@
 yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
 yt also support the new way particles are handled introduced after
-version `stable_17_09` (the version introduced after the 2017 Ramses
-User Meeting). In this case, the file `part_file_descriptor.txt`
+version ``stable_17_09`` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file ``part_file_descriptor.txt``
 containing the different fields in the particle files will be read. If
 you use a custom version of RAMSES, make sure this file is up-to-date
 and reflects the true layout of the particles.
@@ -2108,7 +2108,7 @@
 convention from
 `python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
 For example, to add support for a longint field named
-`my_custom_field`, one would add `('my_custom_field', 'l')` to `assoc`.
+``my_custom_field``, one would add ``('my_custom_field', 'l')`` to ``assoc``.
 
 
 .. _loading-sph-data:

diff -r 2a1b88b818304b23803316dc926dbb36247f3b3b -r 04244a443ab131eaf9ce0cf8b2e851324f7a9b0f yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -171,12 +171,14 @@
 
         return tr
 
-VERSION_RE = re.compile('# version: *(\d+)')
-VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
 def _read_part_file_descriptor(fname):
     """
     Read the particle file descriptor and returns the array of the fields found.
     """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
     # Mapping
     mapping = [
         ('position_x', 'particle_position_x'),
@@ -195,7 +197,6 @@
     mapping = {k: v for k, v in mapping}
 
     with open(fname, 'r') as f:
-        f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
         mylog.info('Reading part file descriptor.')

diff -r 2a1b88b818304b23803316dc926dbb36247f3b3b -r 04244a443ab131eaf9ce0cf8b2e851324f7a9b0f yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -102,8 +102,6 @@
         '''
         return os.path.exists(self.file_descriptor)
 
-
-
     @classmethod
     def any_exist(cls, ds):
         '''


https://bitbucket.org/yt_analysis/yt/commits/7c8eb02f8a03/
Changeset:   7c8eb02f8a03
User:        Corentin Cadiou
Date:        2017-12-03 13:08:05+00:00
Summary:     Add field handler file
Affected #:  1 file

diff -r 04244a443ab131eaf9ce0cf8b2e851324f7a9b0f -r 7c8eb02f8a03d070c284a562adb9826697f11054 yt/frontends/ramses/field_handlers.py
--- /dev/null
+++ b/yt/frontends/ramses/field_handlers.py
@@ -0,0 +1,209 @@
+import os
+import yt.utilities.fortran_utils as fpu
+import glob
+from yt.extern.six import add_metaclass
+from yt.funcs import mylog
+
+FIELD_HANDLERS = set()
+
+def get_field_handlers():
+    return FIELD_HANDLERS
+
+def register_field_handler(ph):
+    FIELD_HANDLERS.add(ph)
+
+
+class RAMSESFieldFileHandlerRegister(type):
+    """
+    This is a base class that on instantiation registers the file
+    handler into the list. Used as a metaclass.
+    """
+    def __new__(meta, name, bases, class_dict):
+        cls = type.__new__(meta, name, bases, class_dict)
+        if cls.ftype is not None:
+            register_field_handler(cls)
+        return cls
+
+
+ at add_metaclass(RAMSESFieldFileHandlerRegister)
+class FieldFileHandler(object):
+    '''
+    Abstract class to handle particles in RAMSES. Each instance
+    represents a single file (one domain).
+
+    To add support to a new particle file, inherit from this class and
+    implement all functions containing a `NotImplementedError`.
+
+    See `SinkParticleFileHandler` for an example implementation.'''
+
+    # These properties are static properties
+    ftype = None  # The name to give to the particle type
+    fname = None  # The name of the file(s).
+    attrs = None  # The attributes of the header
+    known_fields = None  # A list of tuple containing the field name and its type
+
+    # These properties are computed dynamically
+    field_offsets = None     # Mapping from field to offset in file
+    field_types = None       # Mapping from field to the type of the data (float, integer, …)
+
+    def __init__(self, ds, domain_id):
+        '''
+        Initalize an instance of the class. This automatically sets
+        the full path to the file. This is not intended to be
+        overriden in most cases.
+
+        If you need more flexibility, rewrite this function to your
+        need in the inherited class.
+        '''
+        self.ds = ds
+        self.domain_id = domain_id
+        basename = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
+        iout = int(
+            os.path.basename(ds.parameter_filename)
+            .split(".")[0].
+            split("_")[1])
+        icpu = domain_id
+
+        self.fname = os.path.join(
+            basename,
+            self.fname.format(iout=iout, icpu=icpu))
+
+    @property
+    def exists(self):
+        '''
+        This function should return True if the *file* the instance
+        exists. It is called for each file of the type found on the
+        disk.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.fname)
+
+    @classmethod
+    def any_exist(cls, ds):
+        '''
+        This function should return True if the kind of particle
+        represented by the class exists in the dataset. It takes as
+        argument the class itself —not an instance— and a dataset.
+
+        Arguments
+        ---------
+        * ds: a Ramses Dataset
+
+        Note
+        ----
+        This function is usually called once at the initialization of
+        the RAMSES Dataset structure to determine if the particle type
+        (e.g. regular particles) exists.
+        '''
+        raise NotImplementedError
+
+    @classmethod
+    def get_field_list(cls, ds):
+        raise NotImplementedError
+
+
+    def read_header(self):
+        '''
+        This function is called once per file. It should:
+        * read the header of the file and store any relevant information
+        * detect the fields in the file
+        * compute the offsets (location in the file) of each field
+
+        It is in charge of setting `self.field_offsets` and `self.field_types`.
+        * `field_offsets`: dictionary: tuple -> integer
+           A dictionary that maps `(type, field_name)` to their
+           location in the file (integer)
+        * `field_types`: dictionary: tuple -> character
+           A dictionary that maps `(type, field_name)` to their type
+           (character), following Python's struct convention.
+        '''
+        raise NotImplementedError
+
+
+class HydroFieldFileHandler(FieldFileHandler):
+    ftype = 'ramses'
+    fname = 'part_{iout:05d}.out{icpu:05d}'
+    attrs = ( ('ncpu', 1, 'i'),
+              ('nvar', 1, 'i'),
+              ('ndim', 1, 'i'),
+              ('nlevelmax', 1, 'i'),
+              ('nboundary', 1, 'i'),
+              ('gamma', 1, 'd'))
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'hydro_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        return ret
+
+    @classmethod
+    def get_field_list(cls, ds):
+        num = os.path.basename(ds.parameter_filename).split("."
+                )[0].split("_")[1]
+        testdomain = 1 # Just pick the first domain file to read
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(ds.parameter_filename)),
+            num, testdomain)
+        fname = basename % "hydro"
+
+        if not os.path.exists(fname):
+            cls.fluid_field_list = []
+            return
+
+        f = open(fname, 'rb')
+        attrs = cls.attrs
+        hvals = fpu.read_attrs(f, attrs)
+
+        # Store some metadata
+        ds.gamma = hvals['gamma']
+        nvar = cls.nvar = hvals['nvar']
+
+        foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
+        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+        if rt_flag: # rt run
+            if nvar < 10:
+                mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
+                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
+                          "Metallicity", "HII", "HeII", "HeIII"]
+            else:
+                mylog.info('Detected RAMSES-RT file WITH IR trapping.')
+                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
+                          "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
+        else:
+            if nvar < 5:
+                mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+                raise ValueError
+            # Basic hydro runs
+            if nvar == 5:
+                fields = ["Density",
+                          "x-velocity", "y-velocity", "z-velocity",
+                          "Pressure"]
+            if nvar > 5 and nvar < 11:
+                fields = ["Density",
+                          "x-velocity", "y-velocity", "z-velocity",
+                          "Pressure", "Metallicity"]
+            # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+            if nvar == 11:
+                fields = ["Density",
+                          "x-velocity", "y-velocity", "z-velocity",
+                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                          "Pressure"]
+            if nvar > 11:
+                fields = ["Density",
+                          "x-velocity", "y-velocity", "z-velocity",
+                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                          "Pressure","Metallicity"]
+        # Allow some wiggle room for users to add too many variables
+        while len(fields) < nvar:
+            fields.append("var"+str(len(fields)))
+        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+        cls.fluid_field_list = fields
+        return fields


https://bitbucket.org/yt_analysis/yt/commits/71f2216c813c/
Changeset:   71f2216c813c
User:        Corentin Cadiou
Date:        2017-12-03 14:34:03+00:00
Summary:     WIP
Affected #:  3 files

diff -r 7c8eb02f8a03d070c284a562adb9826697f11054 -r 71f2216c813c83dbd472f67b7816b82740759125 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -41,6 +41,7 @@
     RAMSESFieldInfo, _X
 from .hilbert import get_cpu_list
 from .particle_handlers import get_particle_handlers
+from .field_handlers import get_field_handlers
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
@@ -65,16 +66,26 @@
         basename = "%s/%%s_%s.out%05i" % (
             basedir, num, domain_id)
         part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
-        for t in ['grav', 'hydro', 'amr']:
+        for t in ['grav', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)
         self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
-        self._read_hydro_header()
+        # self._read_hydro_header()
+
+        # Autodetect field files
+        field_handlers = [FH(self)
+                          for FH in get_field_handlers()
+                          if FH.any_exist(ds)]
+        self.field_handlers = field_handlers
+        for fh in field_handlers:
+            mylog.debug('Detected particle type %s in domain_id=%s' % (fh.ftype, domain_id))
+            fh.detect_fields(ds)
+            # self._add_ftype(fh.ftype)
 
         # Autodetect particle files
         particle_handlers = [PH(ds, domain_id)
                              for PH in get_particle_handlers()
-                             if PH.any_exist(self.ds)]
+                             if PH.any_exist(ds)]
         self.particle_handlers = particle_handlers
         for ph in particle_handlers:
             mylog.debug('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
@@ -90,54 +101,59 @@
     def __repr__(self):
         return "RAMSESDomainFile: %i" % self.domain_id
 
-    @property
-    def _has_hydro(self):
-        '''
-        Does the output include hydro?
-        '''
-        return os.path.exists(self.hydro_fn)
+    # @property
+    # def _has_hydro(self):
+    #     '''
+    #     Does the output include hydro?
+    #     '''
+    #     return os.path.exists(self.hydro_fn)
 
     @property
     def level_count(self):
-        if self._level_count is not None: return self._level_count
-        self.hydro_offset
-        return self._level_count
+        lvl_count = None
+        for fh in self.field_handlers:
+            fh.offset
+            if lvl_count is None:
+                lvl_count = fh.level_count.copy()
+            else:
+                lvl_count += fh._level_count
+        return lvl_count
 
-    @property
-    def hydro_offset(self):
-        if self._hydro_offset is not None: return self._hydro_offset
-        # We now have to open the file and calculate it
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 6)
-        # It goes: level, CPU, 8-variable
-        min_level = self.ds.min_level
-        n_levels = self.amr_header['nlevelmax'] - min_level
-        hydro_offset = np.zeros(n_levels, dtype='int64')
-        hydro_offset -= 1
-        level_count = np.zeros(n_levels, dtype='int64')
-        skipped = []
-        for level in range(self.amr_header['nlevelmax']):
-            for cpu in range(self.amr_header['nboundary'] +
-                             self.amr_header['ncpu']):
-                header = ( ('file_ilevel', 1, 'I'),
-                           ('file_ncache', 1, 'I') )
-                try:
-                    hvals = fpu.read_attrs(f, header, "=")
-                except AssertionError:
-                    print("You are running with the wrong number of fields.")
-                    print("If you specified these in the load command, check the array length.")
-                    print("In this file there are %s hydro fields." % skipped)
-                    #print"The last set of field sizes was: %s" % skipped
-                    raise
-                if hvals['file_ncache'] == 0: continue
-                assert(hvals['file_ilevel'] == level+1)
-                if cpu + 1 == self.domain_id and level >= min_level:
-                    hydro_offset[level - min_level] = f.tell()
-                    level_count[level - min_level] = hvals['file_ncache']
-                skipped = fpu.skip(f, 8 * self.nvar)
-        self._hydro_offset = hydro_offset
-        self._level_count = level_count
-        return self._hydro_offset
+    # @property
+    # def hydro_offset(self):
+    #     if self._hydro_offset is not None: return self._hydro_offset
+    #     # We now have to open the file and calculate it
+    #     f = open(self.hydro_fn, "rb")
+    #     fpu.skip(f, 6)
+    #     # It goes: level, CPU, 8-variable
+    #     min_level = self.ds.min_level
+    #     n_levels = self.amr_header['nlevelmax'] - min_level
+    #     hydro_offset = np.zeros(n_levels, dtype='int64')
+    #     hydro_offset -= 1
+    #     level_count = np.zeros(n_levels, dtype='int64')
+    #     skipped = []
+    #     for level in range(self.amr_header['nlevelmax']):
+    #         for cpu in range(self.amr_header['nboundary'] +
+    #                          self.amr_header['ncpu']):
+    #             header = ( ('file_ilevel', 1, 'I'),
+    #                        ('file_ncache', 1, 'I') )
+    #             try:
+    #                 hvals = fpu.read_attrs(f, header, "=")
+    #             except AssertionError:
+    #                 print("You are running with the wrong number of fields.")
+    #                 print("If you specified these in the load command, check the array length.")
+    #                 print("In this file there are %s hydro fields." % skipped)
+    #                 #print"The last set of field sizes was: %s" % skipped
+    #                 raise
+    #             if hvals['file_ncache'] == 0: continue
+    #             assert(hvals['file_ilevel'] == level+1)
+    #             if cpu + 1 == self.domain_id and level >= min_level:
+    #                 hydro_offset[level - min_level] = f.tell()
+    #                 level_count[level - min_level] = hvals['file_ncache']
+    #             skipped = fpu.skip(f, 8 * self.nvar)
+    #     self._hydro_offset = hydro_offset
+    #     self._level_count = level_count
+    #     return self._hydro_offset
 
     def _add_ptype(self, ptype):
         if hasattr(self, 'particle_types'):
@@ -147,15 +163,15 @@
         new.add(ptype)
         self.particle_types = self.particle_types_raw = tuple(new)
 
-    def _read_hydro_header(self):
-        # If no hydro file is found, return
-        if not self._has_hydro:
-            return
-        if self.nvar > 0: return self.nvar
-        # Read the number of hydro variables
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 1)
-        self.nvar = fpu.read_vector(f, "i")[0]
+    # def _read_hydro_header(self):
+    #     # If no hydro file is found, return
+    #     if not self._has_hydro:
+    #         return
+    #     if self.nvar > 0: return self.nvar
+    #     # Read the number of hydro variables
+    #     f = open(self.hydro_fn, "rb")
+    #     fpu.skip(f, 1)
+    #     self.nvar = fpu.read_vector(f, "i")[0]
 
     def _read_amr_header(self):
         hvals = {}
@@ -285,32 +301,39 @@
     _domain_offset = 1
     _block_reorder = "F"
 
-    def fill(self, content, fields, selector):
+    def fill(self, content, fields, selector, file_handler):
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.
         oct_handler = self.oct_handler
-        all_fields = self.domain.ds.index.fluid_field_list
+        all_fields = [f for ft, f in file_handler.field_list]
         fields = [f for ft, f in fields]
         tr = {}
         cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
         levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
             selector, self.domain_id, cell_count)
+        # Initializing data container
         for field in fields:
             tr[field] = np.zeros(cell_count, 'float64')
-        for level, offset in enumerate(self.domain.hydro_offset):
+
+        # Loop over levels
+        for level, offset in enumerate(file_handler.offset):
             if offset == -1: continue
             content.seek(offset)
-            nc = self.domain.level_count[level]
-            temp = {}
+            nc = file_handler.level_count[level]
+            tmp = {}
+            # Initalize temporary data container for io
             for field in all_fields:
-                temp[field] = np.empty((nc, 8), dtype="float64")
+                tmp[field] = np.empty((nc, 8), dtype="float64")
             for i in range(8):
+                # Read the selected fields
                 for field in all_fields:
                     if field not in fields:
                         fpu.skip(content)
                     else:
-                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+                        tmp[field][:,i] = fpu.read_vector(content, 'd') # i-th cell
+
+            print(tr.keys(), tmp.keys())
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, tmp)
         return tr
 
 class RAMSESIndex(OctreeIndex):
@@ -340,90 +363,22 @@
         self.num_grids = total_octs
 
     def _detect_output_fields(self):
-        # Do we want to attempt to figure out what the fields are in the file?
         dsl = set([])
-        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
-            self._setup_auto_fields()
 
+        # Get the detected particle fields
         for domain in self.domains:
             for ph in domain.particle_handlers:
                 dsl.update(set(ph.field_offsets.keys()))
 
         self.particle_field_list = list(dsl)
-        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
 
-    def _setup_auto_fields(self):
-        '''
-        If no fluid fields are set, the code tries to set up a fluids array by hand
-        '''
-        # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self.dataset.parameter_filename).split("."
-                )[0].split("_")[1]
-        testdomain = 1 # Just pick the first domain file to read
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(self.dataset.parameter_filename)),
-            num, testdomain)
-        hydro_fn = basename % "hydro"
-        # Do we have a hydro file?
-        if not os.path.exists(hydro_fn):
-            self.fluid_field_list = []
-            return
-        # Read the number of hydro variables
-        f = open(hydro_fn, "rb")
-        hydro_header = ( ('ncpu', 1, 'i'),
-                         ('nvar', 1, 'i'),
-                         ('ndim', 1, 'i'),
-                         ('nlevelmax', 1, 'i'),
-                         ('nboundary', 1, 'i'),
-                         ('gamma', 1, 'd')
-                         )
-        hvals = fpu.read_attrs(f, hydro_header)
-        self.ds.gamma = hvals['gamma']
-        nvar = hvals['nvar']
-        # OK, we got NVAR, now set up the arrays depending on what NVAR is
-        # but first check for radiative transfer!
-        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
-        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
-        if rt_flag: # rt run
-            if nvar < 10:
-                mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
-            else:
-                mylog.info('Detected RAMSES-RT file WITH IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
-        else:
-            if nvar < 5:
-                mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-                raise ValueError
-            # Basic hydro runs
-            if nvar == 5:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure"]
-            if nvar > 5 and nvar < 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure", "Metallicity"]
-            # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-            if nvar == 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure"]
-            if nvar > 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure","Metallicity"]
-        # Allow some wiggle room for users to add too many variables
-        while len(fields) < nvar:
-            fields.append("var"+str(len(fields)))
-        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
-        self.fluid_field_list = fields
+        # Get the detected fields
+        dsl = set([])
+        for fh in self.domains[0].field_handlers:
+            dsl.update(set(fh.field_list))
+        self.fluid_field_list = list(dsl)
+
+        self.field_list = self.particle_field_list + self.fluid_field_list
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
@@ -443,7 +398,7 @@
 
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
+        for i, og in enumerate(sobjs):
             if ngz > 0:
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
@@ -481,17 +436,18 @@
         return npart
 
     def print_stats(self):
+        '''
+        Prints out (stdout) relevant information about the simulation
 
-        # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs.
+        This function prints information based on the fluid on the grids,
+        and therefore does not work for DM only runs.
+        '''
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
 
         self._initialize_level_stats()
-        """
-        Prints out (stdout) relevant information about the simulation
-        """
+
         header = "%3s\t%14s\t%14s" % ("level", "# cells","# cells^3")
         print(header)
         print("%s" % (len(header.expandtabs())*"-"))

diff -r 7c8eb02f8a03d070c284a562adb9826697f11054 -r 71f2216c813c83dbd472f67b7816b82740759125 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -3,6 +3,7 @@
 import glob
 from yt.extern.six import add_metaclass
 from yt.funcs import mylog
+import numpy as np
 
 FIELD_HANDLERS = set()
 
@@ -37,7 +38,7 @@
     See `SinkParticleFileHandler` for an example implementation.'''
 
     # These properties are static properties
-    ftype = None  # The name to give to the particle type
+    ftype = None  # The name to give to the field type
     fname = None  # The name of the file(s).
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
@@ -46,7 +47,7 @@
     field_offsets = None     # Mapping from field to offset in file
     field_types = None       # Mapping from field to the type of the data (float, integer, …)
 
-    def __init__(self, ds, domain_id):
+    def __init__(self, domain):
         '''
         Initalize an instance of the class. This automatically sets
         the full path to the file. This is not intended to be
@@ -55,19 +56,19 @@
         If you need more flexibility, rewrite this function to your
         need in the inherited class.
         '''
-        self.ds = ds
-        self.domain_id = domain_id
+        self.domain = domain
+        self.domain_id = domain.domain_id
+        ds = domain.ds
         basename = os.path.abspath(
               os.path.dirname(ds.parameter_filename))
         iout = int(
             os.path.basename(ds.parameter_filename)
             .split(".")[0].
             split("_")[1])
-        icpu = domain_id
 
         self.fname = os.path.join(
             basename,
-            self.fname.format(iout=iout, icpu=icpu))
+            self.fname.format(iout=iout, icpu=domain.domain_id))
 
     @property
     def exists(self):
@@ -101,7 +102,7 @@
         raise NotImplementedError
 
     @classmethod
-    def get_field_list(cls, ds):
+    def detect_fields(cls, ds):
         raise NotImplementedError
 
 
@@ -125,7 +126,7 @@
 
 class HydroFieldFileHandler(FieldFileHandler):
     ftype = 'ramses'
-    fname = 'part_{iout:05d}.out{icpu:05d}'
+    fname = 'hydro_{iout:05d}.out{icpu:05d}'
     attrs = ( ('ncpu', 1, 'i'),
               ('nvar', 1, 'i'),
               ('ndim', 1, 'i'),
@@ -142,7 +143,10 @@
         return ret
 
     @classmethod
-    def get_field_list(cls, ds):
+    def detect_fields(cls, ds):
+        if getattr(cls, 'field_list', None) is not None:
+            return cls.field_list
+
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
         testdomain = 1 # Just pick the first domain file to read
@@ -152,10 +156,6 @@
             num, testdomain)
         fname = basename % "hydro"
 
-        if not os.path.exists(fname):
-            cls.fluid_field_list = []
-            return
-
         f = open(fname, 'rb')
         attrs = cls.attrs
         hvals = fpu.read_attrs(f, attrs)
@@ -164,46 +164,104 @@
         ds.gamma = hvals['gamma']
         nvar = cls.nvar = hvals['nvar']
 
-        foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
-        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
-        if rt_flag: # rt run
-            if nvar < 10:
-                mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
-                          "Metallicity", "HII", "HeII", "HeIII"]
-            else:
-                mylog.info('Detected RAMSES-RT file WITH IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
-                          "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
+        if ds._fields_in_file is not None:
+            fields = [('ramses', f) for f in ds._fields_in_file]
         else:
-            if nvar < 5:
-                mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-                raise ValueError
-            # Basic hydro runs
-            if nvar == 5:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure"]
-            if nvar > 5 and nvar < 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure", "Metallicity"]
-            # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-            if nvar == 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure"]
-            if nvar > 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure","Metallicity"]
+            foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
+            rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+            if rt_flag: # rt run
+                if nvar < 10:
+                    mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
+                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
+                              "Metallicity", "HII", "HeII", "HeIII"]
+                else:
+                    mylog.info('Detected RAMSES-RT file WITH IR trapping.')
+                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
+                              "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
+            else:
+                if nvar < 5:
+                    mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+                    raise ValueError
+                # Basic hydro runs
+                if nvar == 5:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "Pressure"]
+                if nvar > 5 and nvar < 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "Pressure", "Metallicity"]
+                # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+                if nvar == 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                              "Pressure"]
+                if nvar > 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                              "Pressure","Metallicity"]
+            mylog.debug("No fields specified by user; automatically setting fields array to %s"
+                        % str(fields))
+
         # Allow some wiggle room for users to add too many variables
+        count_extra = 0
         while len(fields) < nvar:
             fields.append("var"+str(len(fields)))
-        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
-        cls.fluid_field_list = fields
+            count_extra += 1
+        if count_extra > 0:
+            mylog.debug('Detected %s extra fluid fields.' % count_extra)
+        cls.field_list = [(cls.ftype, f) for f in fields]
+
         return fields
+
+    @property
+    def offset(self):
+        if getattr(self, '_offset', None) is not None:
+            return self._offset
+
+        with open(self.fname, 'rb') as f:
+            # Skip header
+            fpu.skip(f, 6)
+
+            # It goes: level, CPU, 8-variable (1 cube)
+            min_level = self.domain.ds.min_level
+            n_levels = self.domain.amr_header['nlevelmax'] - min_level
+            offset = np.zeros(n_levels, dtype='int64')
+            offset -= 1
+            level_count = np.zeros(n_levels, dtype='int64')
+            skipped = []
+            amr_header = self.domain.amr_header
+            for level in range(amr_header['nlevelmax']):
+                for cpu in range(amr_header['nboundary'] +
+                                 amr_header['ncpu']):
+                    header = ( ('file_ilevel', 1, 'I'),
+                               ('file_ncache', 1, 'I') )
+                    try:
+                        hvals = fpu.read_attrs(f, header, "=")
+                    except AssertionError:
+                        mylog.error(
+                            "You are running with the wrong number of fields. "
+                            "If you specified these in the load command, check the array length. "
+                            "In this file there are %s hydro fields." % skipped)
+                        raise
+                    if hvals['file_ncache'] == 0: continue
+                    assert(hvals['file_ilevel'] == level+1)
+                    if cpu + 1 == self.domain_id and level >= min_level:
+                        offset[level - min_level] = f.tell()
+                        level_count[level - min_level] = hvals['file_ncache']
+                    skipped = fpu.skip(f, 8 * self.nvar)
+        self._offset = offset
+        self._level_count = level_count
+        return self._offset
+
+    @property
+    def level_count(self):
+        if getattr(self, '_level_count', None) is not None:
+            return self._level_count
+        self.offset
+
+        return self._level_count

diff -r 7c8eb02f8a03d070c284a562adb9826697f11054 -r 71f2216c813c83dbd472f67b7816b82740759125 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -83,7 +83,7 @@
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
 
-    def _read_fluid_selection(self, chunks, selector, fields, size):
+    def _read_fluid_selection_foo(self, chunks, selector, fields, size):
         # Chunks in this case will have affiliated domain subset objects
         # Each domain subset will contain a hydro_offset array, which gives
         # pointers to level-by-level hydro information
@@ -91,6 +91,7 @@
         for chunk in chunks:
             for subset in chunk.objs:
                 # Now we read the entire thing
+                print(fields)
                 f = open(subset.domain.hydro_fn, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
@@ -106,6 +107,50 @@
             d[field] = np.concatenate(tr.pop(field))
         return d
 
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        ftypes = set(f[0] for f in fields)
+
+        tr = defaultdict(list)
+        fields_by_type = defaultdict(list)
+        for field_type, field_name in fields:
+            fields_by_type[field_type].append((field_type, field_name))
+
+        for chunk in chunks:
+            for subset in chunk.objs:
+                file_handlers = subset.domain.field_handlers
+                for ftype in ftypes:
+                    # Get the file handler
+                    ok = False
+                    for field_handler in file_handlers:
+                        if field_handler.ftype == ftype:
+                            ok = True
+                            break
+
+                    if not ok:
+                        raise YTFieldTypeNotFound(ftype)
+
+                    # Get the fields of the given type
+                    sub_fields = fields_by_type[ftype]
+
+                    # Open the file
+                    f = open(field_handler.fname, 'rb')
+                    content = IO(f.read())
+                    rv = subset.fill(content, sub_fields, selector, field_handler)
+                    for ft, f in fields:
+                        d = rv.pop(f)
+                        mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
+                            f, d.size, d.min(), d.max(), d.size)
+                        tr[(ft, f)].append(d)
+
+        d = {}
+        for field in fields:
+            d[field] = np.concatenate(tr.pop(field))
+
+
+        return tr
+
+
     def _read_particle_coords(self, chunks, ptf):
         pn = "particle_position_%s"
         fields = [(ptype, "particle_position_%s" % ax)
@@ -146,7 +191,7 @@
         '''Read the particle files.'''
         tr = {}
 
-        # Sequential read depending on particle type (io or sink)
+        # Sequential read depending on particle type
         for ptype in set(f[0] for f in fields):
 
             # Select relevant fiels


https://bitbucket.org/yt_analysis/yt/commits/945ed78c859b/
Changeset:   945ed78c859b
User:        Corentin Cadiou
Date:        2017-12-03 17:46:24+00:00
Summary:     passing all tests
Affected #:  3 files

diff -r 71f2216c813c83dbd472f67b7816b82740759125 -r 945ed78c859bbfee7199d90bed88ec6f723fb666 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -57,7 +57,6 @@
     def __init__(self, ds, domain_id):
         self.ds = ds
         self.domain_id = domain_id
-        self.nvar = 0 # Set this later!
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
@@ -90,7 +89,7 @@
         for ph in particle_handlers:
             mylog.debug('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
             ph.read_header()
-            self._add_ptype(ph.ptype)
+            # self._add_ptype(ph.ptype)
 
         # Load the AMR structure
         self._read_amr()
@@ -101,13 +100,6 @@
     def __repr__(self):
         return "RAMSESDomainFile: %i" % self.domain_id
 
-    # @property
-    # def _has_hydro(self):
-    #     '''
-    #     Does the output include hydro?
-    #     '''
-    #     return os.path.exists(self.hydro_fn)
-
     @property
     def level_count(self):
         lvl_count = None
@@ -119,60 +111,6 @@
                 lvl_count += fh._level_count
         return lvl_count
 
-    # @property
-    # def hydro_offset(self):
-    #     if self._hydro_offset is not None: return self._hydro_offset
-    #     # We now have to open the file and calculate it
-    #     f = open(self.hydro_fn, "rb")
-    #     fpu.skip(f, 6)
-    #     # It goes: level, CPU, 8-variable
-    #     min_level = self.ds.min_level
-    #     n_levels = self.amr_header['nlevelmax'] - min_level
-    #     hydro_offset = np.zeros(n_levels, dtype='int64')
-    #     hydro_offset -= 1
-    #     level_count = np.zeros(n_levels, dtype='int64')
-    #     skipped = []
-    #     for level in range(self.amr_header['nlevelmax']):
-    #         for cpu in range(self.amr_header['nboundary'] +
-    #                          self.amr_header['ncpu']):
-    #             header = ( ('file_ilevel', 1, 'I'),
-    #                        ('file_ncache', 1, 'I') )
-    #             try:
-    #                 hvals = fpu.read_attrs(f, header, "=")
-    #             except AssertionError:
-    #                 print("You are running with the wrong number of fields.")
-    #                 print("If you specified these in the load command, check the array length.")
-    #                 print("In this file there are %s hydro fields." % skipped)
-    #                 #print"The last set of field sizes was: %s" % skipped
-    #                 raise
-    #             if hvals['file_ncache'] == 0: continue
-    #             assert(hvals['file_ilevel'] == level+1)
-    #             if cpu + 1 == self.domain_id and level >= min_level:
-    #                 hydro_offset[level - min_level] = f.tell()
-    #                 level_count[level - min_level] = hvals['file_ncache']
-    #             skipped = fpu.skip(f, 8 * self.nvar)
-    #     self._hydro_offset = hydro_offset
-    #     self._level_count = level_count
-    #     return self._hydro_offset
-
-    def _add_ptype(self, ptype):
-        if hasattr(self, 'particle_types'):
-            new = set(self.particle_types)
-        else:
-            new = set()
-        new.add(ptype)
-        self.particle_types = self.particle_types_raw = tuple(new)
-
-    # def _read_hydro_header(self):
-    #     # If no hydro file is found, return
-    #     if not self._has_hydro:
-    #         return
-    #     if self.nvar > 0: return self.nvar
-    #     # Read the number of hydro variables
-    #     f = open(self.hydro_fn, "rb")
-    #     fpu.skip(f, 1)
-    #     self.nvar = fpu.read_vector(f, "i")[0]
-
     def _read_amr_header(self):
         hvals = {}
         f = open(self.amr_fn, "rb")

diff -r 71f2216c813c83dbd472f67b7816b82740759125 -r 945ed78c859bbfee7199d90bed88ec6f723fb666 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -106,21 +106,12 @@
         raise NotImplementedError
 
 
-    def read_header(self):
-        '''
-        This function is called once per file. It should:
-        * read the header of the file and store any relevant information
-        * detect the fields in the file
-        * compute the offsets (location in the file) of each field
+    @property
+    def offset(self):
+        raise NotImplementedError
 
-        It is in charge of setting `self.field_offsets` and `self.field_types`.
-        * `field_offsets`: dictionary: tuple -> integer
-           A dictionary that maps `(type, field_name)` to their
-           location in the file (integer)
-        * `field_types`: dictionary: tuple -> character
-           A dictionary that maps `(type, field_name)` to their type
-           (character), following Python's struct convention.
-        '''
+    @property
+    def level_count(self):
         raise NotImplementedError
 
 

diff -r 71f2216c813c83dbd472f67b7816b82740759125 -r 945ed78c859bbfee7199d90bed88ec6f723fb666 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -83,20 +83,27 @@
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
 
-    def _read_fluid_selection_foo(self, chunks, selector, fields, size):
-        # Chunks in this case will have affiliated domain subset objects
-        # Each domain subset will contain a hydro_offset array, which gives
-        # pointers to level-by-level hydro information
+    def _generic_fluid_handler(self, chunks, selector, fields, size, ftype):
         tr = defaultdict(list)
+
         for chunk in chunks:
             for subset in chunk.objs:
+                fname = None
+                for fh in subset.domain.field_handlers:
+                    if fh.ftype == ftype:
+                        file_handler = fh
+                        fname = fh.fname
+                        break
+
+                if fname is None:
+                    raise YTFieldTypeNotFound(ftype)
+
                 # Now we read the entire thing
-                print(fields)
-                f = open(subset.domain.hydro_fn, "rb")
+                with open(fname, "rb") as f:
+                    content = IO(f.read())
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                content = IO(f.read())
-                rv = subset.fill(content, fields, selector)
+                rv = subset.fill(content, fields, selector, file_handler)
                 for ft, f in fields:
                     d = rv.pop(f)
                     mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
@@ -105,51 +112,24 @@
         d = {}
         for field in fields:
             d[field] = np.concatenate(tr.pop(field))
+
         return d
 
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
-        ftypes = set(f[0] for f in fields)
-
-        tr = defaultdict(list)
-        fields_by_type = defaultdict(list)
-        for field_type, field_name in fields:
-            fields_by_type[field_type].append((field_type, field_name))
-
-        for chunk in chunks:
-            for subset in chunk.objs:
-                file_handlers = subset.domain.field_handlers
-                for ftype in ftypes:
-                    # Get the file handler
-                    ok = False
-                    for field_handler in file_handlers:
-                        if field_handler.ftype == ftype:
-                            ok = True
-                            break
-
-                    if not ok:
-                        raise YTFieldTypeNotFound(ftype)
+        d = {}
 
-                    # Get the fields of the given type
-                    sub_fields = fields_by_type[ftype]
+        # Group fields by field type
+        for ft in set(f[0] for f in fields):
+            # Select the fields for the current reader
+            fields_subs = list(
+                filter(lambda f: f[0]==ft,
+                       fields))
 
-                    # Open the file
-                    f = open(field_handler.fname, 'rb')
-                    content = IO(f.read())
-                    rv = subset.fill(content, sub_fields, selector, field_handler)
-                    for ft, f in fields:
-                        d = rv.pop(f)
-                        mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
-                            f, d.size, d.min(), d.max(), d.size)
-                        tr[(ft, f)].append(d)
+            newd = self._generic_fluid_handler(chunks, selector, fields_subs, size,
+                                               ft)
+            d.update(newd)
 
-        d = {}
-        for field in fields:
-            d[field] = np.concatenate(tr.pop(field))
-
-
-        return tr
-
+        return d
 
     def _read_particle_coords(self, chunks, ptf):
         pn = "particle_position_%s"


https://bitbucket.org/yt_analysis/yt/commits/7efec2ca45bc/
Changeset:   7efec2ca45bc
User:        Corentin Cadiou
Date:        2017-12-03 20:48:55+00:00
Summary:     WIP
Affected #:  1 file

diff -r 945ed78c859bbfee7199d90bed88ec6f723fb666 -r 7efec2ca45bc8855c016914d9434cb140d4ec756 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -105,14 +105,54 @@
     def detect_fields(cls, ds):
         raise NotImplementedError
 
+    @property
+    def level_count(self):
+        if getattr(self, '_level_count', None) is not None:
+            return self._level_count
+        self.offset
+
+        return self._level_count
 
     @property
     def offset(self):
-        raise NotImplementedError
+        if getattr(self, '_offset', None) is not None:
+            return self._offset
+
+        with open(self.fname, 'rb') as f:
+            # Skip headers
+            nskip = len(self.header)
+            fpu.skip(f, nskip)
 
-    @property
-    def level_count(self):
-        raise NotImplementedError
+            # It goes: level, CPU, 8-variable (1 cube)
+            min_level = self.domain.ds.min_level
+            n_levels = self.domain.amr_header['nlevelmax'] - min_level
+            offset = np.zeros(n_levels, dtype='int64')
+            offset -= 1
+            level_count = np.zeros(n_levels, dtype='int64')
+            skipped = []
+            amr_header = self.domain.amr_header
+            for level in range(amr_header['nlevelmax']):
+                for cpu in range(amr_header['nboundary'] +
+                                 amr_header['ncpu']):
+                    header = ( ('file_ilevel', 1, 'I'),
+                               ('file_ncache', 1, 'I') )
+                    try:
+                        hvals = fpu.read_attrs(f, header, "=")
+                    except AssertionError:
+                        mylog.error(
+                            "You are running with the wrong number of fields. "
+                            "If you specified these in the load command, check the array length. "
+                            "In this file there are %s hydro fields." % skipped)
+                        raise
+                    if hvals['file_ncache'] == 0: continue
+                    assert(hvals['file_ilevel'] == level+1)
+                    if cpu + 1 == self.domain_id and level >= min_level:
+                        offset[level - min_level] = f.tell()
+                        level_count[level - min_level] = hvals['file_ncache']
+                    skipped = fpu.skip(f, 8 * self.nvar)
+        self._offset = offset
+        self._level_count = level_count
+        return self._offset
 
 
 class HydroFieldFileHandler(FieldFileHandler):
@@ -145,7 +185,7 @@
             os.path.abspath(
               os.path.dirname(ds.parameter_filename)),
             num, testdomain)
-        fname = basename % "hydro"
+        fname = basename % 'hydro'
 
         f = open(fname, 'rb')
         attrs = cls.attrs
@@ -209,50 +249,61 @@
 
         return fields
 
-    @property
-    def offset(self):
-        if getattr(self, '_offset', None) is not None:
-            return self._offset
+class RTFieldFileHandler(FieldFileHandler):
+    ftype = 'rt'
+    fname = 'rt_{iout:05d}.out{icpu:05d}'
+    header = ( ('ncpu', 1, 'i'),
+               ('nvar', 1, 'i'),
+               ('ndim', 1, 'i'),
+               ('nlevelmax', 1, 'i'),
+               ('nboundary', 1, 'i'),
+               ('gamma', 1, 'd')
+    )
 
-        with open(self.fname, 'rb') as f:
-            # Skip header
-            fpu.skip(f, 6)
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'info_rt_?????.txt')
+        ret = len(glob.glob(files)) == 0
+        return ret
+
+    @classmethod
+    def detect_fields(cls, ds):
+        if getattr(cls, 'field_list', None) is not None:
+            return cls.field_list
+
+        ngroups = ds.parameters['nGroups']
 
-            # It goes: level, CPU, 8-variable (1 cube)
-            min_level = self.domain.ds.min_level
-            n_levels = self.domain.amr_header['nlevelmax'] - min_level
-            offset = np.zeros(n_levels, dtype='int64')
-            offset -= 1
-            level_count = np.zeros(n_levels, dtype='int64')
-            skipped = []
-            amr_header = self.domain.amr_header
-            for level in range(amr_header['nlevelmax']):
-                for cpu in range(amr_header['nboundary'] +
-                                 amr_header['ncpu']):
-                    header = ( ('file_ilevel', 1, 'I'),
-                               ('file_ncache', 1, 'I') )
-                    try:
-                        hvals = fpu.read_attrs(f, header, "=")
-                    except AssertionError:
-                        mylog.error(
-                            "You are running with the wrong number of fields. "
-                            "If you specified these in the load command, check the array length. "
-                            "In this file there are %s hydro fields." % skipped)
-                        raise
-                    if hvals['file_ncache'] == 0: continue
-                    assert(hvals['file_ilevel'] == level+1)
-                    if cpu + 1 == self.domain_id and level >= min_level:
-                        offset[level - min_level] = f.tell()
-                        level_count[level - min_level] = hvals['file_ncache']
-                    skipped = fpu.skip(f, 8 * self.nvar)
-        self._offset = offset
-        self._level_count = level_count
-        return self._offset
+        fields = []
+        for ng in range(ngroups):
+            tmp = ["Photon_density_%s", "Photon_flux_x_%s", "Photon_flux_y_%s", "Photon_flux_z_%s"]
+            fields.extend([t % (ng + 1) for t in tmp])
+
+        cls.field_list = [(cls.ftype, f) for f in fields]
+        return fields
+
+    # def __init__(self, *args, **kwargs):
+    #     super(RTFieldFileHandler, self).__init__(*args, **kwargs)
+    #     # Parse the rt descriptor
+    #     fname = self.domain.parameter_filename.replace('info_', 'info_rt_')
 
-    @property
-    def level_count(self):
-        if getattr(self, '_level_count', None) is not None:
-            return self._level_count
-        self.offset
+    #     rheader = {}
+    #     def read_rhs(cast):
+    #         line = f.readline()
+    #         p, v = line.split("=")
+    #         rheader[p.strip()] = cast(v)
 
-        return self._level_count
+    #     with open(fname, 'r') as f:
+    #         for i in range(4): read_rhs(int)
+    #         f.readline()
+    #         for i in range(2): read_rhs(float)
+    #         f.readline()
+    #         for i in range(3): read_rhs(float)
+    #         f.readline()
+    #         for i in range(3): read_rhs(float)
+
+    #         # Touchy part, we have to read the photon group properties
+    #         mylog.warning('Not reading photon group properties')
+
+    #         self.rt_parameters = rheader


https://bitbucket.org/yt_analysis/yt/commits/143b9fd7f1f2/
Changeset:   143b9fd7f1f2
User:        Corentin Cadiou
Date:        2017-12-04 09:55:28+00:00
Summary:     Cleanup + generic code
Affected #:  2 files

diff -r 7efec2ca45bc8855c016914d9434cb140d4ec756 -r 143b9fd7f1f22fcb987ed76441e82c70328c5dce yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -270,7 +270,6 @@
                     else:
                         tmp[field][:,i] = fpu.read_vector(content, 'd') # i-th cell
 
-            print(tr.keys(), tmp.keys())
             oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, tmp)
         return tr
 
@@ -433,7 +432,6 @@
         cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force
                       its value.
         '''
-        self.fluid_types += ("ramses",)
         self._fields_in_file = fields
         self._extra_particle_fields = extra_particle_fields
         self._warn_extra_fields = False
@@ -441,7 +439,9 @@
         self._bbox = bbox
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
-
+        for FH in get_field_handlers():
+            if FH.any_exist(self):
+                self.fluid_types += (FH.ftype, )
         self.storage_filename = storage_filename
 
 

diff -r 7efec2ca45bc8855c016914d9434cb140d4ec756 -r 143b9fd7f1f22fcb987ed76441e82c70328c5dce yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -118,9 +118,10 @@
         if getattr(self, '_offset', None) is not None:
             return self._offset
 
+        nvar = self.parameters['nvar']
         with open(self.fname, 'rb') as f:
             # Skip headers
-            nskip = len(self.header)
+            nskip = len(self.attrs)
             fpu.skip(f, nskip)
 
             # It goes: level, CPU, 8-variable (1 cube)
@@ -149,7 +150,7 @@
                     if cpu + 1 == self.domain_id and level >= min_level:
                         offset[level - min_level] = f.tell()
                         level_count[level - min_level] = hvals['file_ncache']
-                    skipped = fpu.skip(f, 8 * self.nvar)
+                    skipped = fpu.skip(f, 8 * nvar)
         self._offset = offset
         self._level_count = level_count
         return self._offset
@@ -167,10 +168,14 @@
 
     @classmethod
     def any_exist(cls, ds):
+        if getattr(cls, '_any_exist', None) is not None:
+            return cls._any_exist
+
         files = os.path.join(
             os.path.split(ds.parameter_filename)[0],
             'hydro_?????.out?????')
         ret = len(glob.glob(files)) > 0
+        cls._any_exist = ret
         return ret
 
     @classmethod
@@ -190,10 +195,11 @@
         f = open(fname, 'rb')
         attrs = cls.attrs
         hvals = fpu.read_attrs(f, attrs)
+        cls.parameters = hvals
 
         # Store some metadata
         ds.gamma = hvals['gamma']
-        nvar = cls.nvar = hvals['nvar']
+        nvar = hvals['nvar']
 
         if ds._fields_in_file is not None:
             fields = [('ramses', f) for f in ds._fields_in_file]


https://bitbucket.org/yt_analysis/yt/commits/b6fe7f4d4cb9/
Changeset:   b6fe7f4d4cb9
User:        Corentin Cadiou
Date:        2017-12-04 09:55:42+00:00
Summary:     RT support + more doc
Affected #:  2 files

diff -r 143b9fd7f1f22fcb987ed76441e82c70328c5dce -r b6fe7f4d4cb993c9c60a0cfaa93e58963b94bea5 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -39,7 +39,7 @@
 
     # These properties are static properties
     ftype = None  # The name to give to the field type
-    fname = None  # The name of the file(s).
+    fname = None  # The name of the file(s)
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
 
@@ -73,7 +73,7 @@
     @property
     def exists(self):
         '''
-        This function should return True if the *file* the instance
+        This function should return True if the *file* for the domain
         exists. It is called for each file of the type found on the
         disk.
 
@@ -85,7 +85,7 @@
     @classmethod
     def any_exist(cls, ds):
         '''
-        This function should return True if the kind of particle
+        This function should return True if the kind of field
         represented by the class exists in the dataset. It takes as
         argument the class itself —not an instance— and a dataset.
 
@@ -103,10 +103,23 @@
 
     @classmethod
     def detect_fields(cls, ds):
+        '''
+        Called once to setup the fields of this type
+
+        It should set the following static variables:
+        * parameters: dictionary
+           Dictionary containing the variables. The keys should match
+           those of `cls.attrs`
+        * field_list: list of (ftype, fname)
+           The list of the field present in the file
+        '''
         raise NotImplementedError
 
     @property
     def level_count(self):
+        '''
+        Return the number of cells per level.
+        '''
         if getattr(self, '_level_count', None) is not None:
             return self._level_count
         self.offset
@@ -115,6 +128,16 @@
 
     @property
     def offset(self):
+        '''
+        Compute the offsets of the fields.
+
+        By default, it skips the header (as defined by `cls.attrs`)
+        and computes the offset at each level.
+
+        It should be generic enough for most of the cases, but it the
+        *structure* of your fluid file is non-canonial, change this.
+        '''
+
         if getattr(self, '_offset', None) is not None:
             return self._offset
 
@@ -256,22 +279,26 @@
         return fields
 
 class RTFieldFileHandler(FieldFileHandler):
-    ftype = 'rt'
+    ftype = 'ramses-rt'
     fname = 'rt_{iout:05d}.out{icpu:05d}'
-    header = ( ('ncpu', 1, 'i'),
-               ('nvar', 1, 'i'),
-               ('ndim', 1, 'i'),
-               ('nlevelmax', 1, 'i'),
-               ('nboundary', 1, 'i'),
-               ('gamma', 1, 'd')
+    attrs = ( ('ncpu', 1, 'i'),
+              ('nvar', 1, 'i'),
+              ('ndim', 1, 'i'),
+              ('nlevelmax', 1, 'i'),
+              ('nboundary', 1, 'i'),
+              ('gamma', 1, 'd')
     )
 
     @classmethod
     def any_exist(cls, ds):
+        if getattr(cls, '_any_exist', None) is not None:
+            return cls._any_exist
         files = os.path.join(
             os.path.split(ds.parameter_filename)[0],
             'info_rt_?????.txt')
-        ret = len(glob.glob(files)) == 0
+        ret = len(glob.glob(files)) == 1
+
+        cls._any_exist = ret
         return ret
 
     @classmethod
@@ -279,7 +306,35 @@
         if getattr(cls, 'field_list', None) is not None:
             return cls.field_list
 
-        ngroups = ds.parameters['nGroups']
+        fname = ds.parameter_filename.replace('info_', 'info_rt_')
+
+        rheader = {}
+        def read_rhs(cast):
+            line = f.readline()
+            p, v = line.split("=")
+            rheader[p.strip()] = cast(v)
+
+        with open(fname, 'r') as f:
+            for i in range(4): read_rhs(int)
+            f.readline()
+            for i in range(2): read_rhs(float)
+            f.readline()
+            for i in range(3): read_rhs(float)
+            f.readline()
+            for i in range(3): read_rhs(float)
+
+            # Touchy part, we have to read the photon group properties
+            mylog.warning('Not reading photon group properties')
+
+            cls.rt_parameters = rheader
+
+        ngroups = rheader['nGroups']
+
+        iout = int(str(ds).split('_')[1])
+        basedir = os.path.split(ds.parameter_filename)[0]
+        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
+        with open(fname, 'rb') as f:
+            cls.parameters = fpu.read_attrs(f, cls.attrs)
 
         fields = []
         for ng in range(ngroups):
@@ -288,28 +343,3 @@
 
         cls.field_list = [(cls.ftype, f) for f in fields]
         return fields
-
-    # def __init__(self, *args, **kwargs):
-    #     super(RTFieldFileHandler, self).__init__(*args, **kwargs)
-    #     # Parse the rt descriptor
-    #     fname = self.domain.parameter_filename.replace('info_', 'info_rt_')
-
-    #     rheader = {}
-    #     def read_rhs(cast):
-    #         line = f.readline()
-    #         p, v = line.split("=")
-    #         rheader[p.strip()] = cast(v)
-
-    #     with open(fname, 'r') as f:
-    #         for i in range(4): read_rhs(int)
-    #         f.readline()
-    #         for i in range(2): read_rhs(float)
-    #         f.readline()
-    #         for i in range(3): read_rhs(float)
-    #         f.readline()
-    #         for i in range(3): read_rhs(float)
-
-    #         # Touchy part, we have to read the photon group properties
-    #         mylog.warning('Not reading photon group properties')
-
-    #         self.rt_parameters = rheader

diff -r 143b9fd7f1f22fcb987ed76441e82c70328c5dce -r b6fe7f4d4cb993c9c60a0cfaa93e58963b94bea5 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -13,10 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
 import os
 import numpy as np
 
+from yt import units
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -26,6 +26,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from .field_handlers import RTFieldFileHandler
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
@@ -34,6 +35,8 @@
 pressure_units = "code_pressure"
 ener_units = "code_mass * code_velocity**2 / code_time**2"
 ang_mom_units = "code_mass * code_velocity * code_length"
+flux_unit = "1 / code_length**2 / code_time"
+dens_unit = "1 / code_length**3"
 
 known_species_masses = dict(
   (sp, mh * v) for sp, v in [
@@ -119,6 +122,20 @@
         ("BH_efficiency", ("", [], None))
     )
 
+    # def __init__(self, ds, field_list, slice_info = None):
+    #     rt_flag = RTFieldFileHandler.any_exist(ds)
+    #     if rt_flag:
+    #         ds.fluid_types += ('rt', )
+    #         print(ds.fluid_types)
+    #         for igroup in range(RTFieldFileHandler.rt_parameters['nGroups']):
+    #             self.known_other_fields += ('Photon_density_%s' % igroup,
+    #                                         (dens_unit, [], 'photon_density_%s' % igroup))
+    #             for k in 'xyz':
+    #                 self.known_other_fields += ('Photon_flux_%s_%s' % (k, igroup),
+    #                                             (flux_unit, [],
+    #                                              'photon_flux_%s_%s' % (k, igroup)))
+    #     super(RAMSESFieldInfo, self).__init__(ds, field_list, slice_info)
+
     def setup_fluid_fields(self):
         def _temperature(field, data):
             rv = data["gas", "pressure"]/data["gas", "density"]
@@ -127,32 +144,44 @@
         self.add_field(("gas", "temperature"), sampling_type="cell",  function=_temperature,
                         units=self.ds.unit_system["temperature"])
         self.create_cooling_fields()
+
         # See if we need to load the rt fields
-        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
-        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+        rt_flag = RTFieldFileHandler.any_exist(self.ds)
         if rt_flag: # rt run
-            self.setup_rt_fields()
+            self.create_rt_fields()
+
+    def create_rt_fields(self):
+        self.ds.fluid_types += ('rt', )
+        p = RTFieldFileHandler.rt_parameters.copy()
+        p.update(self.ds.parameters)
+        ngroups = p['nGroups']
+        rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])
+        dens_conv = (p['unit_np'] / rt_c).value / units.cm**3
 
-    def setup_rt_fields(self):
-        def _temp_IR(field, data):
-            rv = data["gas", "pres_IR"]/data["gas", "density"]
-            rv *= mass_hydrogen_cgs/boltzmann_constant_cgs
-            return rv
-        self.add_field(("gas", "temp_IR"), sampling_type="cell",
-                       function=_temp_IR,
-                       units=self.ds.unit_system["temperature"])
-        for species in ['H_p1', 'He_p1', 'He_p2']:
-            def _species_density(field, data):
-                return data['gas', species+'_fraction']*data['gas', 'density']
-            self.add_field(('gas', species+'_density'), sampling_type='cell',
-                           function=_species_density,
-                           units=self.ds.unit_system['density'])
-            def _species_mass(field, data):
-                return (data['gas', species+'_density']*
-                        data['index', 'cell_volume'])
-            self.add_field(('gas', species+'_mass'), sampling_type='cell',
-                           function=_species_mass,
-                           units=self.ds.unit_system['mass'])
+        def gen_pdens(igroup):
+            def _photon_density(field, data):
+                rv = data['ramses-rt', 'Photon_density_%s' % (igroup + 1)] * dens_conv
+                return rv
+            return _photon_density
+
+        for igroup in range(ngroups):
+            self.add_field(('rt', 'photon_density_%s' % (igroup + 1)), sampling_type='cell',
+                           function=gen_pdens(igroup),
+                           units=dens_unit)
+
+        flux_conv = p['unit_pf'] / units.cm**2 / units.s
+
+        def gen_flux(key, igroup):
+            def _photon_flux(field, data):
+                rv = data['ramses-rt', 'Photon_flux_%s_%s' % (key, igroup + 1)] * flux_conv
+                return rv
+            return _photon_flux
+
+        for key in 'xyz':
+            for igroup in range(ngroups):
+                self.add_field(('rt', 'photon_flux_%s_%s' % (key, igroup + 1)), sampling_type='cell',
+                               function=gen_flux(key, igroup),
+                               units=flux_unit)
 
 
     def create_cooling_fields(self):
@@ -169,9 +198,9 @@
                      'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
                 # Return array in unit 'per volume' consistently with line below
-                return data.ds.arr(rv, 'code_length**-3')
+                return data.ds.arr(rv, dens_unit)
             self.add_field(name = name, sampling_type="cell", function=_func,
-                                 units = "code_length**-3")
+                                 units = dens_unit)
         avals = {}
         tvals = {}
         with open(filename, "rb") as f:


https://bitbucket.org/yt_analysis/yt/commits/0af526046f7d/
Changeset:   0af526046f7d
User:        Corentin Cadiou
Date:        2017-12-04 10:50:35+00:00
Summary:     use getter/setter for rt_parameters
Affected #:  2 files

diff -r b6fe7f4d4cb993c9c60a0cfaa93e58963b94bea5 -r 0af526046f7d56dcdf8acceea59f055020b73892 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -343,3 +343,15 @@
 
         cls.field_list = [(cls.ftype, f) for f in fields]
         return fields
+
+    @property
+    def rt_parameters(self):
+        if self._rt_parameters: return self._rt_parameters
+
+        # Call detect fields to get the rt_parameters
+        self.detect_fields(RTFieldFileHandler, self.ds)
+        return self._rt_parameters
+
+    @rt_parameters.setter
+    def rt_parameters(self, val):
+        self._rt_parameters = val

diff -r b6fe7f4d4cb993c9c60a0cfaa93e58963b94bea5 -r 0af526046f7d56dcdf8acceea59f055020b73892 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -122,20 +122,6 @@
         ("BH_efficiency", ("", [], None))
     )
 
-    # def __init__(self, ds, field_list, slice_info = None):
-    #     rt_flag = RTFieldFileHandler.any_exist(ds)
-    #     if rt_flag:
-    #         ds.fluid_types += ('rt', )
-    #         print(ds.fluid_types)
-    #         for igroup in range(RTFieldFileHandler.rt_parameters['nGroups']):
-    #             self.known_other_fields += ('Photon_density_%s' % igroup,
-    #                                         (dens_unit, [], 'photon_density_%s' % igroup))
-    #             for k in 'xyz':
-    #                 self.known_other_fields += ('Photon_flux_%s_%s' % (k, igroup),
-    #                                             (flux_unit, [],
-    #                                              'photon_flux_%s_%s' % (k, igroup)))
-    #     super(RAMSESFieldInfo, self).__init__(ds, field_list, slice_info)
-
     def setup_fluid_fields(self):
         def _temperature(field, data):
             rv = data["gas", "pressure"]/data["gas", "density"]
@@ -152,7 +138,8 @@
 
     def create_rt_fields(self):
         self.ds.fluid_types += ('rt', )
-        p = RTFieldFileHandler.rt_parameters.copy()
+        tmp = RTFieldFileHandler.rt_parameters
+        p = tmp.copy()
         p.update(self.ds.parameters)
         ngroups = p['nGroups']
         rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])


https://bitbucket.org/yt_analysis/yt/commits/381724757e25/
Changeset:   381724757e25
User:        Corentin Cadiou
Date:        2017-12-04 12:07:57+00:00
Summary:     don't cache any_exist attribute (class based) and field_list

This is to prevent caching when cahching datasets
Affected #:  1 file

diff -r 0af526046f7d56dcdf8acceea59f055020b73892 -r 381724757e25a2367fa5e792c29eecfafd14506e yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -191,9 +191,6 @@
 
     @classmethod
     def any_exist(cls, ds):
-        if getattr(cls, '_any_exist', None) is not None:
-            return cls._any_exist
-
         files = os.path.join(
             os.path.split(ds.parameter_filename)[0],
             'hydro_?????.out?????')
@@ -203,9 +200,6 @@
 
     @classmethod
     def detect_fields(cls, ds):
-        if getattr(cls, 'field_list', None) is not None:
-            return cls.field_list
-
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
         testdomain = 1 # Just pick the first domain file to read
@@ -291,8 +285,6 @@
 
     @classmethod
     def any_exist(cls, ds):
-        if getattr(cls, '_any_exist', None) is not None:
-            return cls._any_exist
         files = os.path.join(
             os.path.split(ds.parameter_filename)[0],
             'info_rt_?????.txt')
@@ -303,9 +295,6 @@
 
     @classmethod
     def detect_fields(cls, ds):
-        if getattr(cls, 'field_list', None) is not None:
-            return cls.field_list
-
         fname = ds.parameter_filename.replace('info_', 'info_rt_')
 
         rheader = {}


https://bitbucket.org/yt_analysis/yt/commits/adf0346f9984/
Changeset:   adf0346f9984
User:        Corentin Cadiou
Date:        2017-12-04 15:47:41+00:00
Summary:     minor update to doc
Affected #:  1 file

diff -r 381724757e25a2367fa5e792c29eecfafd14506e -r adf0346f99844826e1b6c2bd10a3e5840b7377b2 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2095,7 +2095,8 @@
          the selected region, you may want to use ``ds.box(…)``.
 
       .. note::
-	 This feature is only available when using Hilbert ordering.
+	 The ``bbox`` feature is only available for datasets using
+	 Hilbert ordering.
 
 
 Adding custom particle fields


https://bitbucket.org/yt_analysis/yt/commits/6fe1d152a558/
Changeset:   6fe1d152a558
User:        Corentin Cadiou
Date:        2017-12-05 15:47:46+00:00
Summary:     remove useless glob
Affected #:  1 file

diff -r adf0346f99844826e1b6c2bd10a3e5840b7377b2 -r 6fe1d152a5586990a6613138936209f2f612a7d2 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -15,7 +15,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
 import os
 import numpy as np
 import stat


https://bitbucket.org/yt_analysis/yt/commits/d9879655ae05/
Changeset:   d9879655ae05
User:        Corentin Cadiou
Date:        2017-12-05 21:48:20+00:00
Summary:     remove utf8 char
Affected #:  1 file

diff -r 6fe1d152a5586990a6613138936209f2f612a7d2 -r d9879655ae05856a973887c5b3c74938674137c5 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -45,7 +45,7 @@
 
     # These properties are computed dynamically
     field_offsets = None     # Mapping from field to offset in file
-    field_types = None       # Mapping from field to the type of the data (float, integer, …)
+    field_types = None       # Mapping from field to the type of the data (float, integer, ...)
 
     def __init__(self, domain):
         '''
@@ -87,7 +87,7 @@
         '''
         This function should return True if the kind of field
         represented by the class exists in the dataset. It takes as
-        argument the class itself —not an instance— and a dataset.
+        argument the class itself - not an instance - and a dataset.
 
         Arguments
         ---------


https://bitbucket.org/yt_analysis/yt/commits/9ee097408c6c/
Changeset:   9ee097408c6c
User:        Corentin Cadiou
Date:        2017-12-06 13:32:45+00:00
Summary:     support hydro file desc.
Affected #:  2 files

diff -r d9879655ae05856a973887c5b3c74938674137c5 -r 9ee097408c6cccbffe576be4b1838c48086a7407 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -5,6 +5,8 @@
 from yt.funcs import mylog
 import numpy as np
 
+from .io import _read_fluid_file_descriptor
+
 FIELD_HANDLERS = set()
 
 def get_field_handlers():
@@ -42,6 +44,7 @@
     fname = None  # The name of the file(s)
     attrs = None  # The attributes of the header
     known_fields = None  # A list of tuple containing the field name and its type
+    file_descriptor = None # The name of the file descriptor (if any)
 
     # These properties are computed dynamically
     field_offsets = None     # Mapping from field to offset in file
@@ -70,6 +73,11 @@
             basename,
             self.fname.format(iout=iout, icpu=domain.domain_id))
 
+        if self.file_descriptor is not None:
+            self.file_descriptor = os.path.join(
+                basename,
+                self.file_descriptor)
+
     @property
     def exists(self):
         '''
@@ -82,6 +90,18 @@
         '''
         return os.path.exists(self.fname)
 
+    @property
+    def has_part_descriptor(self):
+        '''
+        This function should return True if a *file descriptor*
+        exists.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.file_descriptor)
+
+
     @classmethod
     def any_exist(cls, ds):
         '''
@@ -182,6 +202,7 @@
 class HydroFieldFileHandler(FieldFileHandler):
     ftype = 'ramses'
     fname = 'hydro_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'hydro_file_descriptor.txt'
     attrs = ( ('ncpu', 1, 'i'),
               ('nvar', 1, 'i'),
               ('ndim', 1, 'i'),
@@ -203,11 +224,12 @@
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
         testdomain = 1 # Just pick the first domain file to read
+        basepath = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, testdomain)
+            basepath, num, testdomain)
         fname = basename % 'hydro'
+        fname_desc = os.path.join(basepath, cls.file_descriptor)
 
         f = open(fname, 'rb')
         attrs = cls.attrs
@@ -218,9 +240,19 @@
         ds.gamma = hvals['gamma']
         nvar = hvals['nvar']
 
+        ok = False
         if ds._fields_in_file is not None:
-            fields = [('ramses', f) for f in ds._fields_in_file]
-        else:
+            fields = [f for f in ds._fields_in_file]
+            ok = True
+        elif os.path.exists(fname_desc):
+            mylog.info('Reading hydro file descriptor.')
+            # For now, we can only read double precision fields
+            fields = [f[0] for f in _read_fluid_file_descriptor(fname_desc)]
+
+            # We get no fields for old-style hydro file descriptor
+            ok = len(fields) > 0
+
+        if not ok:
             foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
             rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
             if rt_flag: # rt run

diff -r d9879655ae05856a973887c5b3c74938674137c5 -r 9ee097408c6cccbffe576be4b1838c48086a7407 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -199,7 +199,7 @@
 
 def _read_part_file_descriptor(fname):
     """
-    Read the particle file descriptor and returns the array of the fields found.
+    Read a file descriptor and returns the array of the fields found.
     """
     VERSION_RE = re.compile('# version: *(\d+)')
     VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
@@ -253,3 +253,56 @@
             raise YTParticleOutputFormatNotImplemented()
 
     return fields
+
+def _read_fluid_file_descriptor(fname):
+    """
+    Read a file descriptor and returns the array of the fields found.
+    """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
+    # Mapping
+    mapping = [
+        ('density', 'Density'),
+        ('velocity_x', 'x-velocity'),
+        ('velocity_y', 'y-velocity'),
+        ('velocity_z', 'z-velocity'),
+        ('pressure', 'Pressure'),
+        ('metallicity', 'Metallicity'),
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
+
+    with open(fname, 'r') as f:
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        mylog.info('Reading fluid file descriptor.')
+        if not tmp:
+            return []
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise YTFileNotParseable(fname, i+1)
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+                dtype = tmp.group(3)
+
+                if varname in mapping:
+                    varname = mapping[varname]
+                else:
+                    varname = 'particle_%s' % varname
+
+                fields.append((varname, dtype))
+        else:
+            mylog.error('Version %s', version)
+            raise YTParticleOutputFormatNotImplemented()
+
+    return fields


https://bitbucket.org/yt_analysis/yt/commits/d161c9d35bd8/
Changeset:   d161c9d35bd8
User:        Corentin Cadiou
Date:        2017-12-07 09:26:22+00:00
Summary:     remove more utf8
Affected #:  1 file

diff -r 9ee097408c6cccbffe576be4b1838c48086a7407 -r d161c9d35bd8f56713aafac45be1dab4a68a08a9 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -48,7 +48,7 @@
 
     # These properties are computed dynamically
     field_offsets = None     # Mapping from field to offset in file
-    field_types = None       # Mapping from field to the type of the data (float, integer, …)
+    field_types = None       # Mapping from field to the type of the data (float, integer, ...)
     local_particle_count = None  # The number of particle in the domain
 
     def __init__(self, ds, domain_id):
@@ -107,7 +107,7 @@
         '''
         This function should return True if the kind of particle
         represented by the class exists in the dataset. It takes as
-        argument the class itself —not an instance— and a dataset.
+        argument the class itself -not an instance- and a dataset.
 
         Arguments
         ---------


https://bitbucket.org/yt_analysis/yt/commits/22afc2c63f2a/
Changeset:   22afc2c63f2a
User:        Corentin Cadiou
Date:        2017-12-07 12:05:51+00:00
Summary:     use p2-aware copying
Affected #:  2 files

diff -r d161c9d35bd8f56713aafac45be1dab4a68a08a9 -r 22afc2c63f2a9b67450de18a3cf7a81b9e322eb4 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -139,7 +139,8 @@
     def create_rt_fields(self):
         self.ds.fluid_types += ('rt', )
         tmp = RTFieldFileHandler.rt_parameters
-        p = tmp.copy()
+        # Copy the list
+        p = list(tmp)
         p.update(self.ds.parameters)
         ngroups = p['nGroups']
         rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])

diff -r d161c9d35bd8f56713aafac45be1dab4a68a08a9 -r 22afc2c63f2a9b67450de18a3cf7a81b9e322eb4 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -195,7 +195,7 @@
                 _read_part_file_descriptor(self.file_descriptor)
             )
         else:
-            particle_fields = self.known_fields.copy()
+            particle_fields = list(self.known_fields)
 
             if self.ds._extra_particle_fields is not None:
                 particle_fields += self.ds._extra_particle_fields
@@ -306,9 +306,8 @@
                 _read_part_file_descriptor(self.file_descriptor)
             )
         else:
-            fields = self.known_fields.copy()
+            fields = list(self.known_fields)
 
-        fields = self.known_fields.copy()
         for i in range(self.ds.dimensionality*2+1):
             for j in range(self.ds.max_level, self.ds.min_level):
                 fields.append((


https://bitbucket.org/yt_analysis/yt/commits/8264d6ee9e80/
Changeset:   8264d6ee9e80
User:        Corentin Cadiou
Date:        2017-12-09 08:46:15+00:00
Summary:     update -> extend
Affected #:  1 file

diff -r 22afc2c63f2a9b67450de18a3cf7a81b9e322eb4 -r 8264d6ee9e80af3298da262048c5096deb6cc837 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -141,7 +141,7 @@
         tmp = RTFieldFileHandler.rt_parameters
         # Copy the list
         p = list(tmp)
-        p.update(self.ds.parameters)
+        p.extend(self.ds.parameters)
         ngroups = p['nGroups']
         rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])
         dens_conv = (p['unit_np'] / rt_c).value / units.cm**3


https://bitbucket.org/yt_analysis/yt/commits/cb8e7fee45cf/
Changeset:   cb8e7fee45cf
User:        Corentin Cadiou
Date:        2017-12-09 09:36:00+00:00
Summary:     Re-add removed fields
Affected #:  2 files

diff -r 8264d6ee9e80af3298da262048c5096deb6cc837 -r cb8e7fee45cfc2b48cdcc8edbd3f79f680ddf26d yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -289,7 +289,7 @@
                               "x-velocity", "y-velocity", "z-velocity",
                               "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                               "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                              "Pressure","Metallicity"]
+                              "Pressure", "Metallicity"]
             mylog.debug("No fields specified by user; automatically setting fields array to %s"
                         % str(fields))
 
@@ -365,14 +365,10 @@
         cls.field_list = [(cls.ftype, f) for f in fields]
         return fields
 
-    @property
-    def rt_parameters(self):
-        if self._rt_parameters: return self._rt_parameters
+    @classmethod
+    def get_rt_parameters(cls, ds):
+        if cls.rt_parameters: return cls.rt_parameters
 
         # Call detect fields to get the rt_parameters
-        self.detect_fields(RTFieldFileHandler, self.ds)
-        return self._rt_parameters
-
-    @rt_parameters.setter
-    def rt_parameters(self, val):
-        self._rt_parameters = val
+        cls.detect_fields(ds)
+        return cls.rt_parameters

diff -r 8264d6ee9e80af3298da262048c5096deb6cc837 -r cb8e7fee45cfc2b48cdcc8edbd3f79f680ddf26d yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -74,7 +74,7 @@
         ("x-velocity", (vel_units, ["velocity_x"], None)),
         ("y-velocity", (vel_units, ["velocity_y"], None)),
         ("z-velocity", (vel_units, ["velocity_z"], None)),
-        ("Pres_IR", (pressure_units, ["pres_IR"], None)),
+        ("Pres_IR", (pressure_units, ["pres_IR", "pressure_IR"], None)),
         ("Pressure", (pressure_units, ["pressure"], None)),
         ("Metallicity", ("", ["metallicity"], None)),
         ("HII",  ("", ["H_p1_fraction"], None)),
@@ -138,14 +138,36 @@
 
     def create_rt_fields(self):
         self.ds.fluid_types += ('rt', )
-        tmp = RTFieldFileHandler.rt_parameters
-        # Copy the list
-        p = list(tmp)
-        p.extend(self.ds.parameters)
+        p = RTFieldFileHandler.get_rt_parameters(self.ds).copy()
+        p.update(self.ds.parameters)
         ngroups = p['nGroups']
         rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])
         dens_conv = (p['unit_np'] / rt_c).value / units.cm**3
 
+        ########################################
+        # Adding the fields in the hydro_* files
+        def _temp_IR(field, data):
+            rv = data["gas", "pres_IR"]/data["gas", "density"]
+            rv *= mass_hydrogen_cgs/boltzmann_constant_cgs
+            return rv
+        self.add_field(("gas", "temp_IR"), sampling_type="cell",
+                       function=_temp_IR,
+                       units=self.ds.unit_system["temperature"])
+        for species in ['H_p1', 'He_p1', 'He_p2']:
+            def _species_density(field, data):
+                return data['gas', species+'_fraction']*data['gas', 'density']
+            self.add_field(('gas', species+'_density'), sampling_type='cell',
+                           function=_species_density,
+                           units=self.ds.unit_system['density'])
+            def _species_mass(field, data):
+                return (data['gas', species+'_density']*
+                        data['index', 'cell_volume'])
+            self.add_field(('gas', species+'_mass'), sampling_type='cell',
+                           function=_species_mass,
+                           units=self.ds.unit_system['mass'])
+
+        ########################################
+        # Adding the fields in the rt_ files
         def gen_pdens(igroup):
             def _photon_density(field, data):
                 rv = data['ramses-rt', 'Photon_density_%s' % (igroup + 1)] * dens_conv


https://bitbucket.org/yt_analysis/yt/commits/85fba33b0f67/
Changeset:   85fba33b0f67
User:        Corentin Cadiou
Date:        2017-12-09 10:50:42+00:00
Summary:     fix flake8
Affected #:  1 file

diff -r cb8e7fee45cfc2b48cdcc8edbd3f79f680ddf26d -r 85fba33b0f67fb493c79c11354ddc1d51c5396f3 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -242,12 +242,12 @@
 
         ok = False
         if ds._fields_in_file is not None:
-            fields = [f for f in ds._fields_in_file]
+            fields = list(ds._fields_in_file)
             ok = True
         elif os.path.exists(fname_desc):
             mylog.info('Reading hydro file descriptor.')
             # For now, we can only read double precision fields
-            fields = [f[0] for f in _read_fluid_file_descriptor(fname_desc)]
+            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]
 
             # We get no fields for old-style hydro file descriptor
             ok = len(fields) > 0


https://bitbucket.org/yt_analysis/yt/commits/33936c7b7425/
Changeset:   33936c7b7425
User:        Corentin Cadiou
Date:        2017-12-09 15:11:14+00:00
Summary:     update failing test
Affected #:  2 files

diff -r 85fba33b0f67fb493c79c11354ddc1d51c5396f3 -r 33936c7b74255d514ba70698cad613df0eef02e5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -361,13 +361,12 @@
 
     def _get_particle_type_counts(self):
         npart = 0
-        npart = {k: 0 for k in self.ds.particle_types}
-        do_all = 'all' in self.ds.particle_types
+        npart = {k: 0 for k in self.ds.particle_types
+                 if k is not 'all'}
         for dom in self.domains:
             for fh in dom.particle_handlers:
                 count = fh.local_particle_count
                 npart[fh.ptype] += count
-                if do_all: npart['all'] += count
 
         return npart
 

diff -r 85fba33b0f67fb493c79c11354ddc1d51c5396f3 -r 33936c7b74255d514ba70698cad613df0eef02e5 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -49,6 +49,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         assert_equal(s1, s2)
+
     assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
@@ -238,6 +239,5 @@
     ds = yt.load(ramses_sink)
     pcount = ds.particle_type_counts
 
-    assert_equal(pcount['all'], 17140, err_msg='Got wrong number of particle')
     assert_equal(pcount['io'], 17132, err_msg='Got wrong number of io particle')
     assert_equal(pcount['sink'], 8, err_msg='Got wrong number of sink particle')


https://bitbucket.org/yt_analysis/yt/commits/f4c1e77231ad/
Changeset:   f4c1e77231ad
User:        Corentin Cadiou
Date:        2017-12-09 19:56:37+00:00
Summary:     flaking
Affected #:  1 file

diff -r 33936c7b74255d514ba70698cad613df0eef02e5 -r f4c1e77231adc2f0d0abb62f4d1b21192d2a3a17 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -300,7 +300,7 @@
             count_extra += 1
         if count_extra > 0:
             mylog.debug('Detected %s extra fluid fields.' % count_extra)
-        cls.field_list = [(cls.ftype, f) for f in fields]
+        cls.field_list = [(cls.ftype, e) for e in fields]
 
         return fields
 
@@ -362,7 +362,7 @@
             tmp = ["Photon_density_%s", "Photon_flux_x_%s", "Photon_flux_y_%s", "Photon_flux_z_%s"]
             fields.extend([t % (ng + 1) for t in tmp])
 
-        cls.field_list = [(cls.ftype, f) for f in fields]
+        cls.field_list = [(cls.ftype, e) for e in fields]
         return fields
 
     @classmethod


https://bitbucket.org/yt_analysis/yt/commits/680517fbe50a/
Changeset:   680517fbe50a
User:        Corentin Cadiou
Date:        2017-12-20 20:54:02+00:00
Summary:     info → debug
Affected #:  2 files

diff -r f4c1e77231adc2f0d0abb62f4d1b21192d2a3a17 -r 680517fbe50a8ea4d3606df89dc7f9df3a5e4569 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -459,7 +459,7 @@
                                     filtered_type='io', requires=['particle_family'])
 
             for k in particle_families.keys():
-                mylog.info('Adding particle_type: %s' % k)
+                mylog.debug('Adding particle_type: %s' % k)
                 self.add_particle_filter('%s' % k)
 
     def __repr__(self):

diff -r f4c1e77231adc2f0d0abb62f4d1b21192d2a3a17 -r 680517fbe50a8ea4d3606df89dc7f9df3a5e4569 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -245,7 +245,7 @@
             fields = list(ds._fields_in_file)
             ok = True
         elif os.path.exists(fname_desc):
-            mylog.info('Reading hydro file descriptor.')
+            mylog.debug('Reading hydro file descriptor.')
             # For now, we can only read double precision fields
             fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]
 
@@ -345,7 +345,7 @@
             for i in range(3): read_rhs(float)
 
             # Touchy part, we have to read the photon group properties
-            mylog.warning('Not reading photon group properties')
+            mylog.debug('Not reading photon group properties')
 
             cls.rt_parameters = rheader
 


https://bitbucket.org/yt_analysis/yt/commits/9e24f81b4351/
Changeset:   9e24f81b4351
User:        Corentin Cadiou
Date:        2017-12-20 21:20:54+00:00
Summary:     use default units
Affected #:  1 file

diff -r 680517fbe50a8ea4d3606df89dc7f9df3a5e4569 -r 9e24f81b435177bac156622a02cdf103cf093f7b yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -36,7 +36,7 @@
 ener_units = "code_mass * code_velocity**2 / code_time**2"
 ang_mom_units = "code_mass * code_velocity * code_length"
 flux_unit = "1 / code_length**2 / code_time"
-dens_unit = "1 / code_length**3"
+number_density_unit = "1 / code_length**3"
 
 known_species_masses = dict(
   (sp, mh * v) for sp, v in [
@@ -177,7 +177,7 @@
         for igroup in range(ngroups):
             self.add_field(('rt', 'photon_density_%s' % (igroup + 1)), sampling_type='cell',
                            function=gen_pdens(igroup),
-                           units=dens_unit)
+                           units=self.ds.unit_system['number_density'])
 
         flux_conv = p['unit_pf'] / units.cm**2 / units.s
 
@@ -187,6 +187,7 @@
                 return rv
             return _photon_flux
 
+        flux_unit = str(1/self.ds.unit_system['time']/self.ds.unit_system['length']**2)
         for key in 'xyz':
             for igroup in range(ngroups):
                 self.add_field(('rt', 'photon_flux_%s_%s' % (key, igroup + 1)), sampling_type='cell',
@@ -208,9 +209,9 @@
                      'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
                 # Return array in unit 'per volume' consistently with line below
-                return data.ds.arr(rv, dens_unit)
-            self.add_field(name = name, sampling_type="cell", function=_func,
-                                 units = dens_unit)
+                return data.ds.arr(rv, number_density_unit)
+            self.add_field(name=name, sampling_type="cell", function=_func,
+                           units=self.ds.unit_system['number_density'])
         avals = {}
         tvals = {}
         with open(filename, "rb") as f:


https://bitbucket.org/yt_analysis/yt/commits/ac3fed1b16cf/
Changeset:   ac3fed1b16cf
User:        Corentin Cadiou
Date:        2017-12-21 17:49:30+00:00
Summary:     Explicit message for subclassing
Affected #:  2 files

diff -r 9e24f81b435177bac156622a02cdf103cf093f7b -r ac3fed1b16cf060323d9093adc2d6622b8629a2b yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -119,6 +119,7 @@
         the RAMSES Dataset structure to determine if the particle type
         (e.g. regular particles) exists.
         '''
+        # this function must be implemented by subclasses
         raise NotImplementedError
 
     @classmethod
@@ -133,6 +134,7 @@
         * field_list: list of (ftype, fname)
            The list of the field present in the file
         '''
+        # this function must be implemented by subclasses
         raise NotImplementedError
 
     @property

diff -r 9e24f81b435177bac156622a02cdf103cf093f7b -r ac3fed1b16cf060323d9093adc2d6622b8629a2b yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -119,6 +119,7 @@
         the RAMSES Dataset structure to determine if the particle type
         (e.g. regular particles) exists.
         '''
+        # this function must be implemented by subclasses
         raise NotImplementedError
 
 
@@ -137,6 +138,7 @@
            A dictionary that maps `(type, field_name)` to their type
            (character), following Python's struct convention.
         '''
+        # this function must be implemented by subclasses
         raise NotImplementedError
 
 


https://bitbucket.org/yt_analysis/yt/commits/b86e1f3e5be2/
Changeset:   b86e1f3e5be2
User:        Corentin Cadiou
Date:        2017-12-21 17:50:25+00:00
Summary:     Register → Registry
Affected #:  2 files

diff -r ac3fed1b16cf060323d9093adc2d6622b8629a2b -r b86e1f3e5be2cb3b4c2e19e92278ebdebc542863 yt/frontends/ramses/field_handlers.py
--- a/yt/frontends/ramses/field_handlers.py
+++ b/yt/frontends/ramses/field_handlers.py
@@ -16,7 +16,7 @@
     FIELD_HANDLERS.add(ph)
 
 
-class RAMSESFieldFileHandlerRegister(type):
+class RAMSESFieldFileHandlerRegistry(type):
     """
     This is a base class that on instantiation registers the file
     handler into the list. Used as a metaclass.
@@ -28,7 +28,7 @@
         return cls
 
 
- at add_metaclass(RAMSESFieldFileHandlerRegister)
+ at add_metaclass(RAMSESFieldFileHandlerRegistry)
 class FieldFileHandler(object):
     '''
     Abstract class to handle particles in RAMSES. Each instance

diff -r ac3fed1b16cf060323d9093adc2d6622b8629a2b -r b86e1f3e5be2cb3b4c2e19e92278ebdebc542863 yt/frontends/ramses/particle_handlers.py
--- a/yt/frontends/ramses/particle_handlers.py
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -15,7 +15,7 @@
     PARTICLE_HANDLERS.add(ph)
 
 
-class RAMSESParticleFileHandlerRegister(type):
+class RAMSESParticleFileHandlerRegistry(type):
     """
     This is a base class that on instantiation registers the file
     handler into the list. Used as a metaclass.
@@ -27,7 +27,7 @@
         return cls
 
 
- at add_metaclass(RAMSESParticleFileHandlerRegister)
+ at add_metaclass(RAMSESParticleFileHandlerRegistry)
 class ParticleFileHandler(object):
     '''
     Abstract class to handle particles in RAMSES. Each instance


https://bitbucket.org/yt_analysis/yt/commits/fb764fb1218e/
Changeset:   fb764fb1218e
User:        Corentin Cadiou
Date:        2017-12-21 17:51:30+00:00
Summary:     revert change to mylog.debug
Affected #:  1 file

diff -r b86e1f3e5be2cb3b4c2e19e92278ebdebc542863 -r fb764fb1218e247d63d2241b5d39be311587f7d3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -459,7 +459,7 @@
                                     filtered_type='io', requires=['particle_family'])
 
             for k in particle_families.keys():
-                mylog.debug('Adding particle_type: %s' % k)
+                mylog.info('Adding particle_type: %s' % k)
                 self.add_particle_filter('%s' % k)
 
     def __repr__(self):


https://bitbucket.org/yt_analysis/yt/commits/6d651dcc2ec6/
Changeset:   6d651dcc2ec6
User:        ngoldbaum
Date:        2018-01-15 14:24:24+00:00
Summary:     Merge pull request #1641 from cphyc/refactoring-hydro

RAMSES Refactoring hydro + particles
Affected #:  7 files

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2004,7 +2004,7 @@
 yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
 yt also support the new way particles are handled introduced after
-version ``stable_17_091`` (the version introduced after the 2017 Ramses
+version ``stable_17_09`` (the version introduced after the 2017 Ramses
 User Meeting). In this case, the file ``part_file_descriptor.txt``
 containing the different fields in the particle files will be read. If
 you use a custom version of RAMSES, make sure this file is up-to-date
@@ -2095,7 +2095,8 @@
          the selected region, you may want to use ``ds.box(…)``.
 
       .. note::
-	 This feature is only available when using Hilbert ordering.
+	 The ``bbox`` feature is only available for datasets using
+	 Hilbert ordering.
 
 
 Adding custom particle fields

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -15,7 +15,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
 import os
 import numpy as np
 import stat
@@ -35,12 +34,13 @@
     OctreeSubset
 from yt.data_objects.particle_filters import add_particle_filter
 
+from yt.utilities.physical_constants import mp, kb
 from .definitions import ramses_header, field_aliases, particle_families
-from .io import _read_part_file_descriptor
-from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
 from .hilbert import get_cpu_list
+from .particle_handlers import get_particle_handlers
+from .field_handlers import get_field_handlers
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
@@ -56,7 +56,6 @@
     def __init__(self, ds, domain_id):
         self.ds = ds
         self.domain_id = domain_id
-        self.nvar = 0 # Set this later!
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
@@ -65,13 +64,33 @@
         basename = "%s/%%s_%s.out%05i" % (
             basedir, num, domain_id)
         part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
-        for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
+        for t in ['grav', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)
         self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
-        self._read_hydro_header()
-        self._read_particle_header()
-        self._read_sink_header()
+        # self._read_hydro_header()
+
+        # Autodetect field files
+        field_handlers = [FH(self)
+                          for FH in get_field_handlers()
+                          if FH.any_exist(ds)]
+        self.field_handlers = field_handlers
+        for fh in field_handlers:
+            mylog.debug('Detected particle type %s in domain_id=%s' % (fh.ftype, domain_id))
+            fh.detect_fields(ds)
+            # self._add_ftype(fh.ftype)
+
+        # Autodetect particle files
+        particle_handlers = [PH(ds, domain_id)
+                             for PH in get_particle_handlers()
+                             if PH.any_exist(ds)]
+        self.particle_handlers = particle_handlers
+        for ph in particle_handlers:
+            mylog.debug('Detected particle type %s in domain_id=%s' % (ph.ptype, domain_id))
+            ph.read_header()
+            # self._add_ptype(ph.ptype)
+
+        # Load the AMR structure
         self._read_amr()
 
     _hydro_offset = None
@@ -81,224 +100,15 @@
         return "RAMSESDomainFile: %i" % self.domain_id
 
     @property
-    def _has_hydro(self):
-        '''
-        Does the output include hydro?
-        '''
-        return os.path.exists(self.hydro_fn)
-
-    @property
-    def _has_sink(self):
-        '''
-        Does the output include sinks (black holes)?
-        '''
-        return os.path.exists(self.sink_fn)
-
-    @property
-    def _has_part_descriptor(self):
-        '''
-        Does the output include particle file descriptor?
-        '''
-        return os.path.exists(self._part_file_descriptor)
-
-    @property
     def level_count(self):
-        if self._level_count is not None: return self._level_count
-        self.hydro_offset
-        return self._level_count
-
-    @property
-    def hydro_offset(self):
-        if self._hydro_offset is not None: return self._hydro_offset
-        # We now have to open the file and calculate it
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 6)
-        # It goes: level, CPU, 8-variable
-        min_level = self.ds.min_level
-        n_levels = self.amr_header['nlevelmax'] - min_level
-        hydro_offset = np.zeros(n_levels, dtype='int64')
-        hydro_offset -= 1
-        level_count = np.zeros(n_levels, dtype='int64')
-        skipped = []
-        for level in range(self.amr_header['nlevelmax']):
-            for cpu in range(self.amr_header['nboundary'] +
-                             self.amr_header['ncpu']):
-                header = ( ('file_ilevel', 1, 'I'),
-                           ('file_ncache', 1, 'I') )
-                try:
-                    hvals = fpu.read_attrs(f, header, "=")
-                except AssertionError:
-                    print("You are running with the wrong number of fields.")
-                    print("If you specified these in the load command, check the array length.")
-                    print("In this file there are %s hydro fields." % skipped)
-                    #print"The last set of field sizes was: %s" % skipped
-                    raise
-                if hvals['file_ncache'] == 0: continue
-                assert(hvals['file_ilevel'] == level+1)
-                if cpu + 1 == self.domain_id and level >= min_level:
-                    hydro_offset[level - min_level] = f.tell()
-                    level_count[level - min_level] = hvals['file_ncache']
-                skipped = fpu.skip(f, 8 * self.nvar)
-        self._hydro_offset = hydro_offset
-        self._level_count = level_count
-        return self._hydro_offset
-
-    def _add_ptype(self, ptype):
-        if hasattr(self, 'particle_types'):
-            new = set(self.particle_types)
-        else:
-            new = set()
-        new.add(ptype)
-        self.particle_types = self.particle_types_raw = tuple(new)
-
-    def _read_hydro_header(self):
-        # If no hydro file is found, return
-        if not self._has_hydro:
-            return
-        if self.nvar > 0: return self.nvar
-        # Read the number of hydro variables
-        f = open(self.hydro_fn, "rb")
-        fpu.skip(f, 1)
-        self.nvar = fpu.read_vector(f, "i")[0]
-
-
-    def _read_sink_header(self):
-        if not self._has_sink:
-            self.local_sink_count = 0
-            self.sink_field_offsets = {}
-            return
-        f = open(self.sink_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = (('nsink', 1, 'I'),
-                 ('nindsink', 1, 'I'))
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.sink_header = hvals
-        self.local_sink_count = hvals['nsink']
-
-        sink_fields = [
-            ("particle_identifier", "i"),
-            ("particle_mass", "d"),
-            ("particle_position_x", "d"),
-            ("particle_position_y", "d"),
-            ("particle_position_z", "d"),
-            ("particle_velocity_x", "d"),
-            ("particle_velocity_y", "d"),
-            ("particle_velocity_z", "d"),
-            ("particle_age", "d"),
-            ("BH_real_accretion", "d"),
-            ("BH_bondi_accretion", "d"),
-            ("BH_eddington_accretion", "d"),
-            ("BH_esave", "d"),
-            ("gas_spin_x", "d"),
-            ("gas_spin_y", "d"),
-            ("gas_spin_z", "d"),
-            ("BH_spin_x", "d"),
-            ("BH_spin_y", "d"),
-            ("BH_spin_z", "d"),
-            ("BH_spin", "d"),
-            ("BH_efficiency", "d")]
-
-        for i in range(self.ds.dimensionality*2+1):
-            for j in range(self.ds.max_level, self.ds.min_level):
-                sink_fields.append((
-                    "particle_prop_%s_%s" % (i, j), "d"
-                ))
-
-        field_offsets = {}
-        _pfields = {}
-        for field, vtype in sink_fields:
-            if f.tell() >= flen: break
-            field_offsets["sink", field] = f.tell()
-            _pfields["sink", field] = vtype
-            fpu.skip(f, 1)
-        self.sink_field_offsets = field_offsets
-        self.sink_field_types = _pfields
-
-        self._add_ptype('sink')
-
-
-    def _read_particle_header(self):
-        if not os.path.exists(self.part_fn):
-            self.local_particle_count = 0
-            self.particle_field_offsets = {}
-            return
-
-        f = open(self.part_fn, "rb")
-        f.seek(0, os.SEEK_END)
-        flen = f.tell()
-        f.seek(0)
-        hvals = {}
-        attrs = ( ('ncpu', 1, 'I'),
-                  ('ndim', 1, 'I'),
-                  ('npart', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        fpu.read_vector(f, 'I')
-
-        attrs = ( ('nstar_tot', 1, 'I'),
-                  ('mstar_tot', 1, 'd'),
-                  ('mstar_lost', 1, 'd'),
-                  ('nsink', 1, 'I') )
-        hvals.update(fpu.read_attrs(f, attrs))
-        self.particle_header = hvals
-        self.local_particle_count = hvals['npart']
-
-        # Try reading particle file descriptor
-        if self._has_part_descriptor:
-            particle_fields = (
-                _read_part_file_descriptor(self._part_file_descriptor))
-        else:
-            particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "i"),
-                ("particle_refinement_level", "I")]
-
-            if self.ds._extra_particle_fields is not None:
-                particle_fields += self.ds._extra_particle_fields
-
-        ptype = 'io'
-
-        field_offsets = {}
-        _pfields = {}
-
-
-        # Read offsets
-        for field, vtype in particle_fields:
-            if f.tell() >= flen: break
-            field_offsets[ptype, field] = f.tell()
-            _pfields[ptype, field] = vtype
-            fpu.skip(f, 1)
-
-        iextra = 0
-        while f.tell() < flen:
-            iextra += 1
-            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
-            particle_fields.append((field, vtype))
-
-            field_offsets[ptype, field] = f.tell()
-            _pfields[ptype, field] = vtype
-            fpu.skip(f, 1)
-
-        if iextra > 0 and not self.ds._warn_extra_fields:
-            self.ds._warn_extra_fields = True
-            w = ("Detected %s extra particle fields assuming kind "
-                 "`double`. Consider using the `extra_particle_fields` "
-                 "keyword argument if you have unexpected behavior.")
-            mylog.warning(w % iextra)
-
-        self.particle_field_offsets = field_offsets
-        self.particle_field_types = _pfields
-
-        # Register the particle type
-        self._add_ptype(ptype)
+        lvl_count = None
+        for fh in self.field_handlers:
+            fh.offset
+            if lvl_count is None:
+                lvl_count = fh.level_count.copy()
+            else:
+                lvl_count += fh._level_count
+        return lvl_count
 
     def _read_amr_header(self):
         hvals = {}
@@ -428,32 +238,38 @@
     _domain_offset = 1
     _block_reorder = "F"
 
-    def fill(self, content, fields, selector):
+    def fill(self, content, fields, selector, file_handler):
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.
         oct_handler = self.oct_handler
-        all_fields = self.domain.ds.index.fluid_field_list
+        all_fields = [f for ft, f in file_handler.field_list]
         fields = [f for ft, f in fields]
         tr = {}
         cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
         levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
             selector, self.domain_id, cell_count)
+        # Initializing data container
         for field in fields:
             tr[field] = np.zeros(cell_count, 'float64')
-        for level, offset in enumerate(self.domain.hydro_offset):
+
+        # Loop over levels
+        for level, offset in enumerate(file_handler.offset):
             if offset == -1: continue
             content.seek(offset)
-            nc = self.domain.level_count[level]
-            temp = {}
+            nc = file_handler.level_count[level]
+            tmp = {}
+            # Initalize temporary data container for io
             for field in all_fields:
-                temp[field] = np.empty((nc, 8), dtype="float64")
+                tmp[field] = np.empty((nc, 8), dtype="float64")
             for i in range(8):
+                # Read the selected fields
                 for field in all_fields:
                     if field not in fields:
                         fpu.skip(content)
                     else:
-                        temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
-            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
+                        tmp[field][:,i] = fpu.read_vector(content, 'd') # i-th cell
+
+            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, tmp)
         return tr
 
 class RAMSESIndex(OctreeIndex):
@@ -483,90 +299,22 @@
         self.num_grids = total_octs
 
     def _detect_output_fields(self):
-        # Do we want to attempt to figure out what the fields are in the file?
         dsl = set([])
-        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
-            self._setup_auto_fields()
 
+        # Get the detected particle fields
         for domain in self.domains:
-            dsl.update(set(domain.particle_field_offsets.keys()))
-            dsl.update(set(domain.sink_field_offsets.keys()))
+            for ph in domain.particle_handlers:
+                dsl.update(set(ph.field_offsets.keys()))
 
         self.particle_field_list = list(dsl)
-        self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
 
-    def _setup_auto_fields(self):
-        '''
-        If no fluid fields are set, the code tries to set up a fluids array by hand
-        '''
-        # TODO: copy/pasted from DomainFile; needs refactoring!
-        num = os.path.basename(self.dataset.parameter_filename).split("."
-                )[0].split("_")[1]
-        testdomain = 1 # Just pick the first domain file to read
-        basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(self.dataset.parameter_filename)),
-            num, testdomain)
-        hydro_fn = basename % "hydro"
-        # Do we have a hydro file?
-        if not os.path.exists(hydro_fn):
-            self.fluid_field_list = []
-            return
-        # Read the number of hydro variables
-        f = open(hydro_fn, "rb")
-        hydro_header = ( ('ncpu', 1, 'i'),
-                         ('nvar', 1, 'i'),
-                         ('ndim', 1, 'i'),
-                         ('nlevelmax', 1, 'i'),
-                         ('nboundary', 1, 'i'),
-                         ('gamma', 1, 'd')
-                         )
-        hvals = fpu.read_attrs(f, hydro_header)
-        self.ds.gamma = hvals['gamma']
-        nvar = hvals['nvar']
-        # OK, we got NVAR, now set up the arrays depending on what NVAR is
-        # but first check for radiative transfer!
-        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
-        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
-        if rt_flag: # rt run
-            if nvar < 10:
-                mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
-            else:
-                mylog.info('Detected RAMSES-RT file WITH IR trapping.')
-                fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR", "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
-        else:
-            if nvar < 5:
-                mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
-                raise ValueError
-            # Basic hydro runs
-            if nvar == 5:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure"]
-            if nvar > 5 and nvar < 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "Pressure", "Metallicity"]
-            # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
-            if nvar == 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure"]
-            if nvar > 11:
-                fields = ["Density",
-                          "x-velocity", "y-velocity", "z-velocity",
-                          "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
-                          "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
-                          "Pressure","Metallicity"]
-        # Allow some wiggle room for users to add too many variables
-        while len(fields) < nvar:
-            fields.append("var"+str(len(fields)))
-        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
-        self.fluid_field_list = fields
+        # Get the detected fields
+        dsl = set([])
+        for fh in self.domains[0].field_handlers:
+            dsl.update(set(fh.field_list))
+        self.fluid_field_list = list(dsl)
+
+        self.field_list = self.particle_field_list + self.fluid_field_list
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
@@ -586,7 +334,7 @@
 
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for i,og in enumerate(sobjs):
+        for i, og in enumerate(sobjs):
             if ngz > 0:
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
@@ -613,23 +361,28 @@
 
     def _get_particle_type_counts(self):
         npart = 0
+        npart = {k: 0 for k in self.ds.particle_types
+                 if k is not 'all'}
         for dom in self.domains:
-            npart += dom.local_particle_count
+            for fh in dom.particle_handlers:
+                count = fh.local_particle_count
+                npart[fh.ptype] += count
 
-        return {'io': npart}
+        return npart
 
     def print_stats(self):
+        '''
+        Prints out (stdout) relevant information about the simulation
 
-        # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs.
+        This function prints information based on the fluid on the grids,
+        and therefore does not work for DM only runs.
+        '''
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
 
         self._initialize_level_stats()
-        """
-        Prints out (stdout) relevant information about the simulation
-        """
+
         header = "%3s\t%14s\t%14s" % ("level", "# cells","# cells^3")
         print(header)
         print("%s" % (len(header.expandtabs())*"-"))
@@ -677,7 +430,6 @@
         cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force
                       its value.
         '''
-        self.fluid_types += ("ramses",)
         self._fields_in_file = fields
         self._extra_particle_fields = extra_particle_fields
         self._warn_extra_fields = False
@@ -685,7 +437,9 @@
         self._bbox = bbox
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
-
+        for FH in get_field_handlers():
+            if FH.any_exist(self):
+                self.fluid_types += (FH.ftype, )
         self.storage_filename = storage_filename
 
 
@@ -837,17 +591,13 @@
 
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
-        # Check for the presence of sink files
-        sink_files = os.path.join(
-            os.path.split(self.parameter_filename)[0],
-            'sink_?????.out?????')
-        has_sink = len(glob.glob(sink_files))
+        # Add the particle types
+        ptypes = []
+        for PH in get_particle_handlers():
+            if PH.any_exist(self):
+                ptypes.append(PH.ptype)
 
-        if has_sink:
-            ptypes = ('io', 'sink')
-        else:
-            ptypes = ('io', )
-
+        ptypes = tuple(ptypes)
         self.particle_types = self.particle_types_raw = ptypes
 
 

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/field_handlers.py
--- /dev/null
+++ b/yt/frontends/ramses/field_handlers.py
@@ -0,0 +1,376 @@
+import os
+import yt.utilities.fortran_utils as fpu
+import glob
+from yt.extern.six import add_metaclass
+from yt.funcs import mylog
+import numpy as np
+
+from .io import _read_fluid_file_descriptor
+
+FIELD_HANDLERS = set()
+
+def get_field_handlers():
+    return FIELD_HANDLERS
+
+def register_field_handler(ph):
+    FIELD_HANDLERS.add(ph)
+
+
+class RAMSESFieldFileHandlerRegistry(type):
+    """
+    This is a base class that on instantiation registers the file
+    handler into the list. Used as a metaclass.
+    """
+    def __new__(meta, name, bases, class_dict):
+        cls = type.__new__(meta, name, bases, class_dict)
+        if cls.ftype is not None:
+            register_field_handler(cls)
+        return cls
+
+
+ at add_metaclass(RAMSESFieldFileHandlerRegistry)
+class FieldFileHandler(object):
+    '''
+    Abstract class to handle particles in RAMSES. Each instance
+    represents a single file (one domain).
+
+    To add support to a new particle file, inherit from this class and
+    implement all functions containing a `NotImplementedError`.
+
+    See `SinkParticleFileHandler` for an example implementation.'''
+
+    # These properties are static properties
+    ftype = None  # The name to give to the field type
+    fname = None  # The name of the file(s)
+    attrs = None  # The attributes of the header
+    known_fields = None  # A list of tuple containing the field name and its type
+    file_descriptor = None # The name of the file descriptor (if any)
+
+    # These properties are computed dynamically
+    field_offsets = None     # Mapping from field to offset in file
+    field_types = None       # Mapping from field to the type of the data (float, integer, ...)
+
+    def __init__(self, domain):
+        '''
+        Initalize an instance of the class. This automatically sets
+        the full path to the file. This is not intended to be
+        overriden in most cases.
+
+        If you need more flexibility, rewrite this function to your
+        need in the inherited class.
+        '''
+        self.domain = domain
+        self.domain_id = domain.domain_id
+        ds = domain.ds
+        basename = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
+        iout = int(
+            os.path.basename(ds.parameter_filename)
+            .split(".")[0].
+            split("_")[1])
+
+        self.fname = os.path.join(
+            basename,
+            self.fname.format(iout=iout, icpu=domain.domain_id))
+
+        if self.file_descriptor is not None:
+            self.file_descriptor = os.path.join(
+                basename,
+                self.file_descriptor)
+
+    @property
+    def exists(self):
+        '''
+        This function should return True if the *file* for the domain
+        exists. It is called for each file of the type found on the
+        disk.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.fname)
+
+    @property
+    def has_part_descriptor(self):
+        '''
+        This function should return True if a *file descriptor*
+        exists.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.file_descriptor)
+
+
+    @classmethod
+    def any_exist(cls, ds):
+        '''
+        This function should return True if the kind of field
+        represented by the class exists in the dataset. It takes as
+        argument the class itself - not an instance - and a dataset.
+
+        Arguments
+        ---------
+        * ds: a Ramses Dataset
+
+        Note
+        ----
+        This function is usually called once at the initialization of
+        the RAMSES Dataset structure to determine if the particle type
+        (e.g. regular particles) exists.
+        '''
+        # this function must be implemented by subclasses
+        raise NotImplementedError
+
+    @classmethod
+    def detect_fields(cls, ds):
+        '''
+        Called once to setup the fields of this type
+
+        It should set the following static variables:
+        * parameters: dictionary
+           Dictionary containing the variables. The keys should match
+           those of `cls.attrs`
+        * field_list: list of (ftype, fname)
+           The list of the field present in the file
+        '''
+        # this function must be implemented by subclasses
+        raise NotImplementedError
+
+    @property
+    def level_count(self):
+        '''
+        Return the number of cells per level.
+        '''
+        if getattr(self, '_level_count', None) is not None:
+            return self._level_count
+        self.offset
+
+        return self._level_count
+
+    @property
+    def offset(self):
+        '''
+        Compute the offsets of the fields.
+
+        By default, it skips the header (as defined by `cls.attrs`)
+        and computes the offset at each level.
+
+        It should be generic enough for most of the cases, but it the
+        *structure* of your fluid file is non-canonial, change this.
+        '''
+
+        if getattr(self, '_offset', None) is not None:
+            return self._offset
+
+        nvar = self.parameters['nvar']
+        with open(self.fname, 'rb') as f:
+            # Skip headers
+            nskip = len(self.attrs)
+            fpu.skip(f, nskip)
+
+            # It goes: level, CPU, 8-variable (1 cube)
+            min_level = self.domain.ds.min_level
+            n_levels = self.domain.amr_header['nlevelmax'] - min_level
+            offset = np.zeros(n_levels, dtype='int64')
+            offset -= 1
+            level_count = np.zeros(n_levels, dtype='int64')
+            skipped = []
+            amr_header = self.domain.amr_header
+            for level in range(amr_header['nlevelmax']):
+                for cpu in range(amr_header['nboundary'] +
+                                 amr_header['ncpu']):
+                    header = ( ('file_ilevel', 1, 'I'),
+                               ('file_ncache', 1, 'I') )
+                    try:
+                        hvals = fpu.read_attrs(f, header, "=")
+                    except AssertionError:
+                        mylog.error(
+                            "You are running with the wrong number of fields. "
+                            "If you specified these in the load command, check the array length. "
+                            "In this file there are %s hydro fields." % skipped)
+                        raise
+                    if hvals['file_ncache'] == 0: continue
+                    assert(hvals['file_ilevel'] == level+1)
+                    if cpu + 1 == self.domain_id and level >= min_level:
+                        offset[level - min_level] = f.tell()
+                        level_count[level - min_level] = hvals['file_ncache']
+                    skipped = fpu.skip(f, 8 * nvar)
+        self._offset = offset
+        self._level_count = level_count
+        return self._offset
+
+
+class HydroFieldFileHandler(FieldFileHandler):
+    ftype = 'ramses'
+    fname = 'hydro_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'hydro_file_descriptor.txt'
+    attrs = ( ('ncpu', 1, 'i'),
+              ('nvar', 1, 'i'),
+              ('ndim', 1, 'i'),
+              ('nlevelmax', 1, 'i'),
+              ('nboundary', 1, 'i'),
+              ('gamma', 1, 'd'))
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'hydro_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        cls._any_exist = ret
+        return ret
+
+    @classmethod
+    def detect_fields(cls, ds):
+        num = os.path.basename(ds.parameter_filename).split("."
+                )[0].split("_")[1]
+        testdomain = 1 # Just pick the first domain file to read
+        basepath = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
+        basename = "%s/%%s_%s.out%05i" % (
+            basepath, num, testdomain)
+        fname = basename % 'hydro'
+        fname_desc = os.path.join(basepath, cls.file_descriptor)
+
+        f = open(fname, 'rb')
+        attrs = cls.attrs
+        hvals = fpu.read_attrs(f, attrs)
+        cls.parameters = hvals
+
+        # Store some metadata
+        ds.gamma = hvals['gamma']
+        nvar = hvals['nvar']
+
+        ok = False
+        if ds._fields_in_file is not None:
+            fields = list(ds._fields_in_file)
+            ok = True
+        elif os.path.exists(fname_desc):
+            mylog.debug('Reading hydro file descriptor.')
+            # For now, we can only read double precision fields
+            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]
+
+            # We get no fields for old-style hydro file descriptor
+            ok = len(fields) > 0
+
+        if not ok:
+            foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
+            rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+            if rt_flag: # rt run
+                if nvar < 10:
+                    mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
+                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
+                              "Metallicity", "HII", "HeII", "HeIII"]
+                else:
+                    mylog.info('Detected RAMSES-RT file WITH IR trapping.')
+                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
+                              "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
+            else:
+                if nvar < 5:
+                    mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+                    raise ValueError
+                # Basic hydro runs
+                if nvar == 5:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "Pressure"]
+                if nvar > 5 and nvar < 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "Pressure", "Metallicity"]
+                # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+                if nvar == 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                              "Pressure"]
+                if nvar > 11:
+                    fields = ["Density",
+                              "x-velocity", "y-velocity", "z-velocity",
+                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+                              "Pressure", "Metallicity"]
+            mylog.debug("No fields specified by user; automatically setting fields array to %s"
+                        % str(fields))
+
+        # Allow some wiggle room for users to add too many variables
+        count_extra = 0
+        while len(fields) < nvar:
+            fields.append("var"+str(len(fields)))
+            count_extra += 1
+        if count_extra > 0:
+            mylog.debug('Detected %s extra fluid fields.' % count_extra)
+        cls.field_list = [(cls.ftype, e) for e in fields]
+
+        return fields
+
+class RTFieldFileHandler(FieldFileHandler):
+    ftype = 'ramses-rt'
+    fname = 'rt_{iout:05d}.out{icpu:05d}'
+    attrs = ( ('ncpu', 1, 'i'),
+              ('nvar', 1, 'i'),
+              ('ndim', 1, 'i'),
+              ('nlevelmax', 1, 'i'),
+              ('nboundary', 1, 'i'),
+              ('gamma', 1, 'd')
+    )
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'info_rt_?????.txt')
+        ret = len(glob.glob(files)) == 1
+
+        cls._any_exist = ret
+        return ret
+
+    @classmethod
+    def detect_fields(cls, ds):
+        fname = ds.parameter_filename.replace('info_', 'info_rt_')
+
+        rheader = {}
+        def read_rhs(cast):
+            line = f.readline()
+            p, v = line.split("=")
+            rheader[p.strip()] = cast(v)
+
+        with open(fname, 'r') as f:
+            for i in range(4): read_rhs(int)
+            f.readline()
+            for i in range(2): read_rhs(float)
+            f.readline()
+            for i in range(3): read_rhs(float)
+            f.readline()
+            for i in range(3): read_rhs(float)
+
+            # Touchy part, we have to read the photon group properties
+            mylog.debug('Not reading photon group properties')
+
+            cls.rt_parameters = rheader
+
+        ngroups = rheader['nGroups']
+
+        iout = int(str(ds).split('_')[1])
+        basedir = os.path.split(ds.parameter_filename)[0]
+        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
+        with open(fname, 'rb') as f:
+            cls.parameters = fpu.read_attrs(f, cls.attrs)
+
+        fields = []
+        for ng in range(ngroups):
+            tmp = ["Photon_density_%s", "Photon_flux_x_%s", "Photon_flux_y_%s", "Photon_flux_z_%s"]
+            fields.extend([t % (ng + 1) for t in tmp])
+
+        cls.field_list = [(cls.ftype, e) for e in fields]
+        return fields
+
+    @classmethod
+    def get_rt_parameters(cls, ds):
+        if cls.rt_parameters: return cls.rt_parameters
+
+        # Call detect fields to get the rt_parameters
+        cls.detect_fields(ds)
+        return cls.rt_parameters

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -13,10 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
 import os
 import numpy as np
 
+from yt import units
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -26,6 +26,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from .field_handlers import RTFieldFileHandler
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
@@ -34,6 +35,8 @@
 pressure_units = "code_pressure"
 ener_units = "code_mass * code_velocity**2 / code_time**2"
 ang_mom_units = "code_mass * code_velocity * code_length"
+flux_unit = "1 / code_length**2 / code_time"
+number_density_unit = "1 / code_length**3"
 
 known_species_masses = dict(
   (sp, mh * v) for sp, v in [
@@ -71,7 +74,7 @@
         ("x-velocity", (vel_units, ["velocity_x"], None)),
         ("y-velocity", (vel_units, ["velocity_y"], None)),
         ("z-velocity", (vel_units, ["velocity_z"], None)),
-        ("Pres_IR", (pressure_units, ["pres_IR"], None)),
+        ("Pres_IR", (pressure_units, ["pres_IR", "pressure_IR"], None)),
         ("Pressure", (pressure_units, ["pressure"], None)),
         ("Metallicity", ("", ["metallicity"], None)),
         ("HII",  ("", ["H_p1_fraction"], None)),
@@ -127,13 +130,22 @@
         self.add_field(("gas", "temperature"), sampling_type="cell",  function=_temperature,
                         units=self.ds.unit_system["temperature"])
         self.create_cooling_fields()
+
         # See if we need to load the rt fields
-        foldername  = os.path.abspath(os.path.dirname(self.ds.parameter_filename))
-        rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
+        rt_flag = RTFieldFileHandler.any_exist(self.ds)
         if rt_flag: # rt run
-            self.setup_rt_fields()
+            self.create_rt_fields()
 
-    def setup_rt_fields(self):
+    def create_rt_fields(self):
+        self.ds.fluid_types += ('rt', )
+        p = RTFieldFileHandler.get_rt_parameters(self.ds).copy()
+        p.update(self.ds.parameters)
+        ngroups = p['nGroups']
+        rt_c = p['rt_c_frac'] * units.c / (p['unit_l'] / p['unit_t'])
+        dens_conv = (p['unit_np'] / rt_c).value / units.cm**3
+
+        ########################################
+        # Adding the fields in the hydro_* files
         def _temp_IR(field, data):
             rv = data["gas", "pres_IR"]/data["gas", "density"]
             rv *= mass_hydrogen_cgs/boltzmann_constant_cgs
@@ -154,6 +166,34 @@
                            function=_species_mass,
                            units=self.ds.unit_system['mass'])
 
+        ########################################
+        # Adding the fields in the rt_ files
+        def gen_pdens(igroup):
+            def _photon_density(field, data):
+                rv = data['ramses-rt', 'Photon_density_%s' % (igroup + 1)] * dens_conv
+                return rv
+            return _photon_density
+
+        for igroup in range(ngroups):
+            self.add_field(('rt', 'photon_density_%s' % (igroup + 1)), sampling_type='cell',
+                           function=gen_pdens(igroup),
+                           units=self.ds.unit_system['number_density'])
+
+        flux_conv = p['unit_pf'] / units.cm**2 / units.s
+
+        def gen_flux(key, igroup):
+            def _photon_flux(field, data):
+                rv = data['ramses-rt', 'Photon_flux_%s_%s' % (key, igroup + 1)] * flux_conv
+                return rv
+            return _photon_flux
+
+        flux_unit = str(1/self.ds.unit_system['time']/self.ds.unit_system['length']**2)
+        for key in 'xyz':
+            for igroup in range(ngroups):
+                self.add_field(('rt', 'photon_flux_%s_%s' % (key, igroup + 1)), sampling_type='cell',
+                               function=gen_flux(key, igroup),
+                               units=flux_unit)
+
 
     def create_cooling_fields(self):
         num = os.path.basename(self.ds.parameter_filename).split("."
@@ -169,9 +209,9 @@
                      'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
                 # Return array in unit 'per volume' consistently with line below
-                return data.ds.arr(rv, 'code_length**-3')
-            self.add_field(name = name, sampling_type="cell", function=_func,
-                                 units = "code_length**-3")
+                return data.ds.arr(rv, number_density_unit)
+            self.add_field(name=name, sampling_type="cell", function=_func,
+                           units=self.ds.unit_system['number_density'])
         avals = {}
         tvals = {}
         with open(filename, "rb") as f:

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -34,7 +34,7 @@
     from cStringIO import StringIO as IO
 
 def _ramses_particle_file_handler(fname, foffsets, data_types,
-                                  subset, fields):
+                                  subset, fields, count):
     '''General file handler, called by _read_particle_subset
 
     Parameters
@@ -44,11 +44,13 @@
     foffsets: dict
         Offsets in file of the fields
     data_types: dict
-         Data type of the fields
+        Data type of the fields
     subset: ``RAMSESDomainSubset``
-         A RAMSES domain subset object
+        A RAMSES domain subset object
     fields: list of tuple
-         The fields to read
+        The fields to read
+    count: integer
+        The number of elements to count
     '''
     tr = {}
     with open(fname, "rb") as f:
@@ -56,6 +58,9 @@
         # This means that no other conversions need to be applied to convert
         # positions into the same domain as the octs themselves.
         for field in sorted(fields, key=lambda a: foffsets[a]):
+            if count == 0:
+                tr[field] = np.empty(0, dtype=data_types[field])
+                continue
             f.seek(foffsets[field])
             dt = data_types[field]
             tr[field] = fpu.read_vector(f, dt)
@@ -78,19 +83,27 @@
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
 
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        # Chunks in this case will have affiliated domain subset objects
-        # Each domain subset will contain a hydro_offset array, which gives
-        # pointers to level-by-level hydro information
+    def _generic_fluid_handler(self, chunks, selector, fields, size, ftype):
         tr = defaultdict(list)
+
         for chunk in chunks:
             for subset in chunk.objs:
+                fname = None
+                for fh in subset.domain.field_handlers:
+                    if fh.ftype == ftype:
+                        file_handler = fh
+                        fname = fh.fname
+                        break
+
+                if fname is None:
+                    raise YTFieldTypeNotFound(ftype)
+
                 # Now we read the entire thing
-                f = open(subset.domain.hydro_fn, "rb")
+                with open(fname, "rb") as f:
+                    content = IO(f.read())
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                content = IO(f.read())
-                rv = subset.fill(content, fields, selector)
+                rv = subset.fill(content, fields, selector, file_handler)
                 for ft, f in fields:
                     d = rv.pop(f)
                     mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
@@ -99,6 +112,23 @@
         d = {}
         for field in fields:
             d[field] = np.concatenate(tr.pop(field))
+
+        return d
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        d = {}
+
+        # Group fields by field type
+        for ft in set(f[0] for f in fields):
+            # Select the fields for the current reader
+            fields_subs = list(
+                filter(lambda f: f[0]==ft,
+                       fields))
+
+            newd = self._generic_fluid_handler(chunks, selector, fields_subs, size,
+                                               ft)
+            d.update(newd)
+
         return d
 
     def _read_particle_coords(self, chunks, ptf):
@@ -141,34 +171,35 @@
         '''Read the particle files.'''
         tr = {}
 
-        # Sequential read depending on particle type (io or sink)
+        # Sequential read depending on particle type
         for ptype in set(f[0] for f in fields):
 
             # Select relevant fiels
             subs_fields = filter(lambda f: f[0] == ptype, fields)
 
-            if ptype == 'io':
-                fname = subset.domain.part_fn
-                foffsets = subset.domain.particle_field_offsets
-                data_types = subset.domain.particle_field_types
-
-            elif ptype == 'sink':
-                fname = subset.domain.sink_fn
-                foffsets = subset.domain.sink_field_offsets
-                data_types = subset.domain.sink_field_types
-
-            else:
-                # Raise here an exception
+            ok = False
+            for ph in subset.domain.particle_handlers:
+                if ph.ptype == ptype:
+                    fname = ph.fname
+                    foffsets = ph.field_offsets
+                    data_types = ph.field_types
+                    ok = True
+                    count = ph.local_particle_count
+                    break
+            if not ok:
                 raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
-                fname, foffsets, data_types, subset, subs_fields))
+                fname, foffsets, data_types, subset, subs_fields,
+                count=count
+            ))
 
         return tr
 
+
 def _read_part_file_descriptor(fname):
     """
-    Read the particle file descriptor and returns the array of the fields found.
+    Read a file descriptor and returns the array of the fields found.
     """
     VERSION_RE = re.compile('# version: *(\d+)')
     VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
@@ -222,3 +253,56 @@
             raise YTParticleOutputFormatNotImplemented()
 
     return fields
+
+def _read_fluid_file_descriptor(fname):
+    """
+    Read a file descriptor and returns the array of the fields found.
+    """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
+    # Mapping
+    mapping = [
+        ('density', 'Density'),
+        ('velocity_x', 'x-velocity'),
+        ('velocity_y', 'y-velocity'),
+        ('velocity_z', 'z-velocity'),
+        ('pressure', 'Pressure'),
+        ('metallicity', 'Metallicity'),
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
+
+    with open(fname, 'r') as f:
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        mylog.info('Reading fluid file descriptor.')
+        if not tmp:
+            return []
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise YTFileNotParseable(fname, i+1)
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+                dtype = tmp.group(3)
+
+                if varname in mapping:
+                    varname = mapping[varname]
+                else:
+                    varname = 'particle_%s' % varname
+
+                fields.append((varname, dtype))
+        else:
+            mylog.error('Version %s', version)
+            raise YTParticleOutputFormatNotImplemented()
+
+    return fields

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/particle_handlers.py
--- /dev/null
+++ b/yt/frontends/ramses/particle_handlers.py
@@ -0,0 +1,331 @@
+import os
+import yt.utilities.fortran_utils as fpu
+import glob
+from yt.extern.six import add_metaclass
+from yt.funcs import mylog
+
+from .io import _read_part_file_descriptor
+
+PARTICLE_HANDLERS = set()
+
+def get_particle_handlers():
+    return PARTICLE_HANDLERS
+
+def register_particle_handler(ph):
+    PARTICLE_HANDLERS.add(ph)
+
+
+class RAMSESParticleFileHandlerRegistry(type):
+    """
+    This is a base class that on instantiation registers the file
+    handler into the list. Used as a metaclass.
+    """
+    def __new__(meta, name, bases, class_dict):
+        cls = type.__new__(meta, name, bases, class_dict)
+        if cls.ptype is not None:
+            register_particle_handler(cls)
+        return cls
+
+
+ at add_metaclass(RAMSESParticleFileHandlerRegistry)
+class ParticleFileHandler(object):
+    '''
+    Abstract class to handle particles in RAMSES. Each instance
+    represents a single file (one domain).
+
+    To add support to a new particle file, inherit from this class and
+    implement all functions containing a `NotImplementedError`.
+
+    See `SinkParticleFileHandler` for an example implementation.'''
+
+    # These properties are static properties
+    ptype = None  # The name to give to the particle type
+    fname = None  # The name of the file(s).
+    file_descriptor = None # The name of the file descriptor (if any)
+
+    attrs = None  # The attributes of the header
+    known_fields = None  # A list of tuple containing the field name and its type
+
+    # These properties are computed dynamically
+    field_offsets = None     # Mapping from field to offset in file
+    field_types = None       # Mapping from field to the type of the data (float, integer, ...)
+    local_particle_count = None  # The number of particle in the domain
+
+    def __init__(self, ds, domain_id):
+        '''
+        Initalize an instance of the class. This automatically sets
+        the full path to the file. This is not intended to be
+        overriden in most cases.
+
+        If you need more flexibility, rewrite this function to your
+        need in the inherited class.
+        '''
+        self.ds = ds
+        self.domain_id = domain_id
+        basename = os.path.abspath(
+              os.path.dirname(ds.parameter_filename))
+        iout = int(
+            os.path.basename(ds.parameter_filename)
+            .split(".")[0].
+            split("_")[1])
+        icpu = domain_id
+
+        self.fname = os.path.join(
+            basename,
+            self.fname.format(iout=iout, icpu=icpu))
+
+        if self.file_descriptor is not None:
+            self.file_descriptor = os.path.join(
+                basename,
+                self.file_descriptor)
+
+    @property
+    def exists(self):
+        '''
+        This function should return True if the *file* the instance
+        exists. It is called for each file of the type found on the
+        disk.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.fname)
+
+    @property
+    def has_part_descriptor(self):
+        '''
+        This function should return True if a *file descriptor*
+        exists.
+
+        By default, it just returns whether the file exists. Override
+        it for more complex cases.
+        '''
+        return os.path.exists(self.file_descriptor)
+
+    @classmethod
+    def any_exist(cls, ds):
+        '''
+        This function should return True if the kind of particle
+        represented by the class exists in the dataset. It takes as
+        argument the class itself -not an instance- and a dataset.
+
+        Arguments
+        ---------
+        * ds: a Ramses Dataset
+
+        Note
+        ----
+        This function is usually called once at the initialization of
+        the RAMSES Dataset structure to determine if the particle type
+        (e.g. regular particles) exists.
+        '''
+        # this function must be implemented by subclasses
+        raise NotImplementedError
+
+
+    def read_header(self):
+        '''
+        This function is called once per file. It should:
+        * read the header of the file and store any relevant information
+        * detect the fields in the file
+        * compute the offsets (location in the file) of each field
+
+        It is in charge of setting `self.field_offsets` and `self.field_types`.
+        * `field_offsets`: dictionary: tuple -> integer
+           A dictionary that maps `(type, field_name)` to their
+           location in the file (integer)
+        * `field_types`: dictionary: tuple -> character
+           A dictionary that maps `(type, field_name)` to their type
+           (character), following Python's struct convention.
+        '''
+        # this function must be implemented by subclasses
+        raise NotImplementedError
+
+
+class DefaultParticleFileHandler(ParticleFileHandler):
+    ptype = 'io'
+    fname = 'part_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'part_file_descriptor.txt'
+
+    attrs = ( ('ncpu', 1, 'I'),
+              ('ndim', 1, 'I'),
+              ('npart', 1, 'I'),
+              ('localseed', 4, 'I'),
+              ('nstar_tot', 1, 'I'),
+              ('mstar_tot', 1, 'd'),
+              ('mstar_lost', 1, 'd'),
+              ('nsink', 1, 'I') )
+
+    known_fields = [
+        ("particle_position_x", "d"),
+        ("particle_position_y", "d"),
+        ("particle_position_z", "d"),
+        ("particle_velocity_x", "d"),
+        ("particle_velocity_y", "d"),
+        ("particle_velocity_z", "d"),
+        ("particle_mass", "d"),
+        ("particle_identifier", "i"),
+        ("particle_refinement_level", "I")]
+
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'part_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        return ret
+
+    def read_header(self):
+        if not self.exists:
+            self.field_offsets = {}
+            self.field_types = {}
+            self.local_particle_count = 0
+            return
+        f = open(self.fname, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        attrs = self.attrs
+        hvals.update(fpu.read_attrs(f, attrs))
+        self.header = hvals
+        self.local_particle_count = hvals['npart']
+
+        if self.has_part_descriptor:
+            particle_fields = (
+                _read_part_file_descriptor(self.file_descriptor)
+            )
+        else:
+            particle_fields = list(self.known_fields)
+
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
+
+        field_offsets = {}
+        _pfields = {}
+
+        ptype = self.ptype
+
+        # Read offsets
+        for field, vtype in particle_fields:
+            if f.tell() >= flen: break
+            field_offsets[ptype, field] = f.tell()
+            _pfields[ptype, field] = vtype
+            fpu.skip(f, 1)
+
+        iextra = 0
+        while f.tell() < flen:
+            iextra += 1
+            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
+            particle_fields.append((field, vtype))
+
+            field_offsets[ptype, field] = f.tell()
+            _pfields[ptype, field] = vtype
+            fpu.skip(f, 1)
+
+        if iextra > 0 and not self.ds._warn_extra_fields:
+            self.ds._warn_extra_fields = True
+            w = ("Detected %s extra particle fields assuming kind "
+                 "`double`. Consider using the `extra_particle_fields` "
+                 "keyword argument if you have unexpected behavior.")
+            mylog.warning(w % iextra)
+
+        self.field_offsets = field_offsets
+        self.field_types = _pfields
+
+
+class SinkParticleFileHandler(ParticleFileHandler):
+    '''Handle sink files'''
+    ptype = 'sink'
+    fname = 'sink_{iout:05d}.out{icpu:05d}'
+    file_descriptor = 'sink_file_descriptor.txt'
+
+    attrs = (('nsink', 1, 'I'),
+             ('nindsink', 1, 'I'))
+
+    known_fields = [
+        ("particle_identifier", "i"),
+        ("particle_mass", "d"),
+        ("particle_position_x", "d"),
+        ("particle_position_y", "d"),
+        ("particle_position_z", "d"),
+        ("particle_velocity_x", "d"),
+        ("particle_velocity_y", "d"),
+        ("particle_velocity_z", "d"),
+        ("particle_age", "d"),
+        ("BH_real_accretion", "d"),
+        ("BH_bondi_accretion", "d"),
+        ("BH_eddington_accretion", "d"),
+        ("BH_esave", "d"),
+        ("gas_spin_x", "d"),
+        ("gas_spin_y", "d"),
+        ("gas_spin_z", "d"),
+        ("BH_spin_x", "d"),
+        ("BH_spin_y", "d"),
+        ("BH_spin_z", "d"),
+        ("BH_spin", "d"),
+        ("BH_efficiency", "d")]
+
+    @classmethod
+    def any_exist(cls, ds):
+        files = os.path.join(
+            os.path.split(ds.parameter_filename)[0],
+            'sink_?????.out?????')
+        ret = len(glob.glob(files)) > 0
+        return ret
+
+    def read_header(self):
+        if not self.exists:
+            self.field_offsets = {}
+            self.field_types = {}
+            self.local_particle_count = 0
+            return
+        f = open(self.fname, "rb")
+        f.seek(0, os.SEEK_END)
+        flen = f.tell()
+        f.seek(0)
+        hvals = {}
+        # Read the header of the file
+        attrs = self.attrs
+
+        hvals.update(fpu.read_attrs(f, attrs))
+        self._header = hvals
+
+        # This is somehow a trick here: we only want one domain to
+        # be read, as ramses writes all the sinks in all the
+        # domains. Here, we set the local_particle_count to 0 except
+        # for the first domain to be red.
+        if getattr(self.ds, '_sink_file_flag', False):
+            self.local_particle_count = 0
+        else:
+            self.ds._sink_file_flag = True
+            self.local_particle_count = hvals['nsink']
+
+        # Read the fields + add the sink properties
+        if self.has_part_descriptor:
+            fields = (
+                _read_part_file_descriptor(self.file_descriptor)
+            )
+        else:
+            fields = list(self.known_fields)
+
+        for i in range(self.ds.dimensionality*2+1):
+            for j in range(self.ds.max_level, self.ds.min_level):
+                fields.append((
+                    "particle_prop_%s_%s" % (i, j), "d"
+                ))
+
+        field_offsets = {}
+        _pfields = {}
+
+        # Fill the fields, offsets and types
+        self.fields = []
+        for field, vtype in fields:
+            self.fields.append(field)
+            if f.tell() >= flen: break
+            field_offsets[self.ptype, field] = f.tell()
+            _pfields[self.ptype, field] = vtype
+            fpu.skip(f, 1)
+        self.field_offsets = field_offsets
+        self.field_types = _pfields

diff -r 874b6eac680daf627a1af1c38a6dae82e898baa8 -r 6d651dcc2ec6fd148eb2d66fd880b4541c834081 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -49,6 +49,7 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         assert_equal(s1, s2)
+
     assert_equal(ds.particle_type_counts, {'io': 1090895})
 
 @requires_file(output_00080)
@@ -171,7 +172,6 @@
 
 ramses_sink = "ramses_sink_00016/output_00016/info_00016.txt"
 @requires_file(ramses_sink)
- at requires_file(ramsesNonCosmo)
 def test_ramses_sink():
     expected_fields = ["BH_bondi_accretion", "BH_eddington_accretion",
                        "BH_efficiency", "BH_esave",
@@ -205,7 +205,6 @@
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
 
-
 ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
 @requires_file(ramses_new_format)
 def test_new_format():
@@ -234,3 +233,11 @@
     assert(all(ad['star', 'particle_family'] == 2))
     assert(all(ad['star', 'particle_tag'] == 0))
     assert(len(ad['star', 'particle_tag']) == 600)
+
+ at requires_file(ramses_sink)
+def test_ramses_part_count():
+    ds = yt.load(ramses_sink)
+    pcount = ds.particle_type_counts
+
+    assert_equal(pcount['io'], 17132, err_msg='Got wrong number of io particle')
+    assert_equal(pcount['sink'], 8, err_msg='Got wrong number of sink particle')

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list