[yt-svn] commit/yt: 49 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Dec 6 08:14:16 PST 2017


49 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/b9d36ce75ed6/
Changeset:   b9d36ce75ed6
User:        Corentin Cadiou
Date:        2017-11-08 14:54:33+00:00
Summary:     add support for reading of part_file_descriptor.txt
Affected #:  3 files

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -36,6 +36,7 @@
     OctreeSubset
 
 from .definitions import ramses_header, field_aliases
+from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
@@ -58,12 +59,14 @@
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
+        basedir = os.path.abspath(
+            os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
+            basedir, num, domain_id)
+        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
         for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
             setattr(self, "%s_fn" % t, basename % t)
+        self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
         self._read_particle_header()
@@ -214,6 +217,7 @@
             self.local_particle_count = 0
             self.particle_field_offsets = {}
             return
+
         f = open(self.part_fn, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -233,7 +237,12 @@
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
 
-        particle_fields = [
+        # Try reading particle file descriptor
+        if os.path.exists(self._part_file_descriptor) and \
+           self.ds._extra_particle_fields is None:
+            particle_fields = _read_part_file_descriptor(self._part_file_descriptor)
+        else:
+            particle_fields = [
                 ("particle_position_x", "d"),
                 ("particle_position_y", "d"),
                 ("particle_position_z", "d"),
@@ -244,8 +253,8 @@
                 ("particle_identifier", "i"),
                 ("particle_refinement_level", "I")]
 
-        if self.ds._extra_particle_fields is not None:
-            particle_fields += self.ds._extra_particle_fields
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
 
         field_offsets = {}
         _pfields = {}

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -88,6 +88,7 @@
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
         ("particle_age", ("code_time", ['age'], None)),
+        ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
     )
 

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,6 +24,7 @@
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
 from yt.extern.six import PY3
+import re
 
 if PY3:
     from io import BytesIO as IO
@@ -162,3 +163,48 @@
                 fname, foffsets, data_types, subset, subs_fields))
 
         return tr
+
+VERSION_RE = re.compile(' *version: *(\d+)')
+VAR_DESC_RE = re.compile(' *variable # *(\d+): *(\w+)')
+def _read_part_file_descriptor(fname):
+    """
+    Read the particle file descriptor and returns the array of the fields found.
+    """
+    # The kind of the known types
+    assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
+            [('velocity_%s' % k, 'd') for k in 'xyz'] + \
+            [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
+            ('family', 'i'), ('tag', 'i'), ('birth_time', 'd'),
+            ('metallicity', 'd')]
+
+    assoc = {k: v for k, v in assoc}
+    if True: #with open(fname, 'r') as f:
+        f = open(fname, 'r')
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        if not tmp:
+            print(line)
+            raise Exception('File format not understood')
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise Exception('Error while reading %s at line %s' % (fname, i+1))
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+
+                if varname in assoc:
+                    dtype = assoc[varname]
+                else:
+                    dtype = 'd'
+
+                fields.append(("particle_%s" % varname, dtype))
+        else:
+            raise Exception('Unrecognized particle file descriptor version: %s' % version)
+
+    return fields


https://bitbucket.org/yt_analysis/yt/commits/8a1689730599/
Changeset:   8a1689730599
User:        Corentin Cadiou
Date:        2017-11-11 11:25:38+00:00
Summary:     reading particle file descriptor
Affected #:  4 files

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 8a16897305994fcbf095eeff700a93db8440eda3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,7 +35,7 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 
-from .definitions import ramses_header, field_aliases
+from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
@@ -240,7 +240,9 @@
         # Try reading particle file descriptor
         if os.path.exists(self._part_file_descriptor) and \
            self.ds._extra_particle_fields is None:
-            particle_fields = _read_part_file_descriptor(self._part_file_descriptor)
+            particle_fields = (
+                _read_part_file_descriptor(self._part_file_descriptor))
+            ptype = 'io'
         else:
             particle_fields = [
                 ("particle_position_x", "d"),
@@ -256,10 +258,12 @@
             if self.ds._extra_particle_fields is not None:
                 particle_fields += self.ds._extra_particle_fields
 
+            ptype = 'io'
+
+
         field_offsets = {}
         _pfields = {}
 
-        ptype = 'io'
 
         # Read offsets
         for field, vtype in particle_fields:

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 8a16897305994fcbf095eeff700a93db8440eda3 yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -61,3 +61,14 @@
                           'Metallicity'),
 
 }
+
+particle_families = {
+    1: 'DM',
+    2: 'star',
+    3: 'cloud',
+    4: 'dust',
+    -2: 'star_tracer',
+    -3: 'cloud_tracer',
+    -4: 'dust_tracer',
+    0: 'gas_tracer',
+}

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 8a16897305994fcbf095eeff700a93db8440eda3 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -64,6 +64,20 @@
 _X = 0.76 # H fraction, hardcoded
 _Y = 0.24 # He fraction, hardcoded
 
+
+# Association between particles types and families, hardcoded
+_families = {
+    1: 'DM',
+    2: 'star',
+    3: 'cloud',
+    4: 'dust',
+    -2: 'star_tracer',
+    -3: 'cloud_tracer',
+    -4: 'dust_tracer',
+    0: 'gas_tracer',
+}
+
+
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),
@@ -90,6 +104,8 @@
         ("particle_age", ("code_time", ['age'], None)),
         ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
+        ("particle_family", ("", [], None)),
+        ("particle_tag", ("", [], None))
     )
 
     known_sink_fields = (

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 8a16897305994fcbf095eeff700a93db8440eda3 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -174,7 +174,7 @@
     assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
             [('velocity_%s' % k, 'd') for k in 'xyz'] + \
             [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-            ('family', 'i'), ('tag', 'i'), ('birth_time', 'd'),
+            ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
             ('metallicity', 'd')]
 
     assoc = {k: v for k, v in assoc}
@@ -182,6 +182,7 @@
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
+        mylog.info('Reading part file descriptor.')
         if not tmp:
             print(line)
             raise Exception('File format not understood')


https://bitbucket.org/yt_analysis/yt/commits/1b8067b62cc2/
Changeset:   1b8067b62cc2
User:        Corentin Cadiou
Date:        2017-11-11 11:25:44+00:00
Summary:     Merge branch 'master' of https://github.com/yt-project/yt
Affected #:  5 files

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 1b8067b62cc26e047e28d8951d105d3bd4d3cc43 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -104,7 +104,7 @@
   local_ytdata_003:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_005:
+  local_absorption_spectrum_006:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 1b8067b62cc26e047e28d8951d105d3bd4d3cc43 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -941,6 +941,8 @@
         self.unit_registry.add("code_length", 1.0, dimensions.length)
         self.unit_registry.add("code_mass", 1.0, dimensions.mass)
         self.unit_registry.add("code_density", 1.0, dimensions.density)
+        self.unit_registry.add("code_specific_energy", 1.0,
+                               dimensions.energy / dimensions.mass)
         self.unit_registry.add("code_time", 1.0, dimensions.time)
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
@@ -1018,10 +1020,12 @@
             self.mass_unit / (self.length_unit * (self.time_unit)**2))
         temperature_unit = getattr(self, "temperature_unit", 1.0)
         density_unit = getattr(self, "density_unit", self.mass_unit / self.length_unit**3)
+        specific_energy_unit = getattr(self, "specific_energy_unit", vel_unit**2)
         self.unit_registry.modify("code_velocity", vel_unit)
         self.unit_registry.modify("code_temperature", temperature_unit)
         self.unit_registry.modify("code_pressure", pressure_unit)
         self.unit_registry.modify("code_density", density_unit)
+        self.unit_registry.modify("code_specific_energy", specific_energy_unit)
         # domain_width does not yet exist
         if (self.domain_left_edge is not None and
             self.domain_right_edge is not None):

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 1b8067b62cc26e047e28d8951d105d3bd4d3cc43 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -337,6 +337,14 @@
             vel_unit = self.velocity_unit
         self.time_unit = self.length_unit / vel_unit
 
+        if "specific_energy" in unit_base:
+            specific_energy_unit = unit_base["specific_energy"]
+        else:
+            # Sane default
+            specific_energy_unit = (1, "(km/s) ** 2")
+        specific_energy_unit = _fix_unit_ordering(specific_energy_unit)
+        self.specific_energy_unit = self.quan(*specific_energy_unit)
+
     @staticmethod
     def _validate_header(filename):
         '''

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 1b8067b62cc26e047e28d8951d105d3bd4d3cc43 yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -37,7 +37,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

diff -r b9d36ce75ed612c5c1adb6ce5680947af31a2a64 -r 1b8067b62cc26e047e28d8951d105d3bd4d3cc43 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -30,7 +30,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/6e2a167f657f/
Changeset:   6e2a167f657f
User:        Corentin Cadiou
Date:        2017-11-11 11:26:55+00:00
Summary:     Merge branch 'master' into ramses-new-particles
Affected #:  5 files

diff -r 8a16897305994fcbf095eeff700a93db8440eda3 -r 6e2a167f657fc9f15f9100db628574c766a3dc0c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -104,7 +104,7 @@
   local_ytdata_003:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_005:
+  local_absorption_spectrum_006:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo

diff -r 8a16897305994fcbf095eeff700a93db8440eda3 -r 6e2a167f657fc9f15f9100db628574c766a3dc0c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -941,6 +941,8 @@
         self.unit_registry.add("code_length", 1.0, dimensions.length)
         self.unit_registry.add("code_mass", 1.0, dimensions.mass)
         self.unit_registry.add("code_density", 1.0, dimensions.density)
+        self.unit_registry.add("code_specific_energy", 1.0,
+                               dimensions.energy / dimensions.mass)
         self.unit_registry.add("code_time", 1.0, dimensions.time)
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
@@ -1018,10 +1020,12 @@
             self.mass_unit / (self.length_unit * (self.time_unit)**2))
         temperature_unit = getattr(self, "temperature_unit", 1.0)
         density_unit = getattr(self, "density_unit", self.mass_unit / self.length_unit**3)
+        specific_energy_unit = getattr(self, "specific_energy_unit", vel_unit**2)
         self.unit_registry.modify("code_velocity", vel_unit)
         self.unit_registry.modify("code_temperature", temperature_unit)
         self.unit_registry.modify("code_pressure", pressure_unit)
         self.unit_registry.modify("code_density", density_unit)
+        self.unit_registry.modify("code_specific_energy", specific_energy_unit)
         # domain_width does not yet exist
         if (self.domain_left_edge is not None and
             self.domain_right_edge is not None):

diff -r 8a16897305994fcbf095eeff700a93db8440eda3 -r 6e2a167f657fc9f15f9100db628574c766a3dc0c yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -337,6 +337,14 @@
             vel_unit = self.velocity_unit
         self.time_unit = self.length_unit / vel_unit
 
+        if "specific_energy" in unit_base:
+            specific_energy_unit = unit_base["specific_energy"]
+        else:
+            # Sane default
+            specific_energy_unit = (1, "(km/s) ** 2")
+        specific_energy_unit = _fix_unit_ordering(specific_energy_unit)
+        self.specific_energy_unit = self.quan(*specific_energy_unit)
+
     @staticmethod
     def _validate_header(filename):
         '''

diff -r 8a16897305994fcbf095eeff700a93db8440eda3 -r 6e2a167f657fc9f15f9100db628574c766a3dc0c yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -37,7 +37,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

diff -r 8a16897305994fcbf095eeff700a93db8440eda3 -r 6e2a167f657fc9f15f9100db628574c766a3dc0c yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -30,7 +30,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/dc63c0d12097/
Changeset:   dc63c0d12097
User:        Corentin Cadiou
Date:        2017-11-11 12:55:22+00:00
Summary:     Support for new particles types
Affected #:  3 files

diff -r 6e2a167f657fc9f15f9100db628574c766a3dc0c -r dc63c0d12097c4112cc3fdb48698046cb9256b2c yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -34,6 +34,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt import add_particle_filter
 
 from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor
@@ -94,6 +95,13 @@
         return os.path.exists(self.sink_fn)
 
     @property
+    def _has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self._part_file_descriptor)
+
+    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -238,7 +246,7 @@
         self.local_particle_count = hvals['npart']
 
         # Try reading particle file descriptor
-        if os.path.exists(self._part_file_descriptor) and \
+        if self._has_part_descriptor and \
            self.ds._extra_particle_fields is None:
             particle_fields = (
                 _read_part_file_descriptor(self._part_file_descriptor))
@@ -679,6 +687,22 @@
 
         self.storage_filename = storage_filename
 
+        # Add particles filters
+        for fname, value in particle_families.items():
+            def loc(val):
+                def closure(pfilter, data):
+                    filter = data[(pfilter.filtered_type, "particle_family")] == val
+                    return filter
+
+                return closure
+            add_particle_filter(fname, loc(value),
+                                filtered_type='io', requires=['particle_family'])
+
+
+    def add_ptypes(self):
+        for k in particle_families.keys():
+            mylog.info('Adding particle_type: %s' % k)
+            self.add_particle_filter('%s' % k)
 
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]

diff -r 6e2a167f657fc9f15f9100db628574c766a3dc0c -r dc63c0d12097c4112cc3fdb48698046cb9256b2c yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -63,12 +63,12 @@
 }
 
 particle_families = {
-    1: 'DM',
-    2: 'star',
-    3: 'cloud',
-    4: 'dust',
-    -2: 'star_tracer',
-    -3: 'cloud_tracer',
-    -4: 'dust_tracer',
-    0: 'gas_tracer',
+    'DM': 1,
+    'star': 2,
+    'cloud': 3,
+    'dust': 4,
+    'star_tracer': -2,
+    'cloud_tracer': -3,
+    'dust_tracer': -4,
+    'gas_tracer': 0
 }

diff -r 6e2a167f657fc9f15f9100db628574c766a3dc0c -r dc63c0d12097c4112cc3fdb48698046cb9256b2c yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -17,6 +17,7 @@
 import os
 import numpy as np
 
+import yt
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -26,6 +27,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from .definitions import particle_families
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
@@ -65,19 +67,6 @@
 _Y = 0.24 # He fraction, hardcoded
 
 
-# Association between particles types and families, hardcoded
-_families = {
-    1: 'DM',
-    2: 'star',
-    3: 'cloud',
-    4: 'dust',
-    -2: 'star_tracer',
-    -3: 'cloud_tracer',
-    -4: 'dust_tracer',
-    0: 'gas_tracer',
-}
-
-
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/9b795dbe3790/
Changeset:   9b795dbe3790
User:        Corentin Cadiou
Date:        2017-11-11 13:08:02+00:00
Summary:     remove useless (and erroneous) check
Affected #:  1 file

diff -r dc63c0d12097c4112cc3fdb48698046cb9256b2c -r 9b795dbe3790d08bdc431f7e81bfe0fb6297aede yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -246,8 +246,7 @@
         self.local_particle_count = hvals['npart']
 
         # Try reading particle file descriptor
-        if self._has_part_descriptor and \
-           self.ds._extra_particle_fields is None:
+        if self._has_part_descriptor:
             particle_fields = (
                 _read_part_file_descriptor(self._part_file_descriptor))
             ptype = 'io'


https://bitbucket.org/yt_analysis/yt/commits/82deaf98aa8e/
Changeset:   82deaf98aa8e
User:        Corentin Cadiou
Date:        2017-11-11 13:10:38+00:00
Summary:     flake8
Affected #:  1 file

diff -r 9b795dbe3790d08bdc431f7e81bfe0fb6297aede -r 82deaf98aa8e3f817a22d8fa8168e655a2f75a7b yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -17,7 +17,6 @@
 import os
 import numpy as np
 
-import yt
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, \
     mass_hydrogen_cgs, \
@@ -27,7 +26,6 @@
 import yt.utilities.fortran_utils as fpu
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from .definitions import particle_families
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"


https://bitbucket.org/yt_analysis/yt/commits/ab3f4ab82d2b/
Changeset:   ab3f4ab82d2b
User:        Corentin Cadiou
Date:        2017-11-11 13:22:42+00:00
Summary:     add doc
Affected #:  1 file

diff -r 82deaf98aa8e3f817a22d8fa8168e655a2f75a7b -r ab3f4ab82d2bc6f8c98414b4847d82843f07cb2d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -318,7 +318,7 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt supports AMReX/BoxLib particle data stored in the standard format used 
+* yt supports AMReX/BoxLib particle data stored in the standard format used
   by Nyx and WarpX, and optionally Castro. It currently does not support the ASCII particle
   data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
@@ -331,7 +331,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Most AMReX/BoxLib codes output cell-centered data. If the underlying discretization
-is not cell-centered, then fields are typically averaged to cell centers before 
+is not cell-centered, then fields are typically averaged to cell centers before
 they are written to plot files for visualization. WarpX, however, has the option
 to output the raw (i.e., not averaged to cell centers) data as well.  If you
 run your WarpX simulation with ``warpx.plot_raw_fields = 1`` in your inputs
@@ -348,10 +348,10 @@
 The raw fields in WarpX are nodal in at least one direction. We define a field
 to be "nodal" in a given direction if the field data is defined at the "low"
 and "high" sides of the cell in that direction, rather than at the cell center.
-Instead of returning one field value per cell selected, nodal fields return a 
+Instead of returning one field value per cell selected, nodal fields return a
 number of values, depending on their centering. This centering is marked by
 a `nodal_flag` that describes whether the fields is nodal in each dimension.
-``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while 
+``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while
 ``nodal_flag = [0, 0, 1]`` means that the field is nodal in the z direction
 and cell centered in the others, i.e. it is defined on the z faces of each cell.
 ``nodal_flag = [1, 1, 0]`` would mean that the field is centered in the z direction,
@@ -371,7 +371,7 @@
 
 Here, the field ``('raw', 'Ex')`` is nodal in two directions, so four values per cell
 are returned, corresponding to the four edges in each cell on which the variable
-is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned 
+is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned
 per cell. The standard, averaged-to-cell-centers fields are still available.
 
 Currently, slices and data selection are implemented for nodal fields. Projections,
@@ -684,7 +684,7 @@
 * fits.gz
 * fts.gz
 
-yt can currently read two kinds of FITS files: FITS image files and FITS 
+yt can currently read two kinds of FITS files: FITS image files and FITS
 binary table files containing positions, times, and energies of X-ray events.
 
 Though a FITS image is composed of a single array in the FITS file,
@@ -760,14 +760,14 @@
 If your data is of the first case, yt will determine the length units based
 on the information in the header. If your data is of the second or third
 cases, no length units will be assigned, but the world coordinate information
-about the axes will be stored in separate fields. If your data is of the 
-fourth type, the coordinates of the first three axes will be determined 
+about the axes will be stored in separate fields. If your data is of the
+fourth type, the coordinates of the first three axes will be determined
 according to cases 1-3.
 
 .. note::
 
-  Linear length-based coordinates (Case 1 above) are only supported if all 
-  dimensions have the same value for ``CUNITx``. WCS coordinates are only 
+  Linear length-based coordinates (Case 1 above) are only supported if all
+  dimensions have the same value for ``CUNITx``. WCS coordinates are only
   supported for Cases 2-4.
 
 FITS Data Decomposition
@@ -791,8 +791,8 @@
              512	  981940800
 
 For 3D spectral-cube data, the decomposition into grids will be done along the
-spectral axis since this will speed up many common operations for this 
-particular type of dataset. 
+spectral axis since this will speed up many common operations for this
+particular type of dataset.
 
 yt will generate its own domain decomposition, but the number of grids can be
 set manually by passing the ``nprocs`` parameter to the ``load`` call:
@@ -830,10 +830,10 @@
 will be generated from the pixel coordinates in the file using the WCS
 transformations provided by AstroPy.
 
-X-ray event data will be loaded as particle fields in yt, but a grid will be 
-constructed from the WCS information in the FITS header. There is a helper 
-function, ``setup_counts_fields``, which may be used to make deposited image 
-fields from the event data for different energy bands (for an example see 
+X-ray event data will be loaded as particle fields in yt, but a grid will be
+constructed from the WCS information in the FITS header. There is a helper
+function, ``setup_counts_fields``, which may be used to make deposited image
+fields from the event data for different energy bands (for an example see
 :ref:`xray_fits`).
 
 .. note::
@@ -848,7 +848,7 @@
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
-The following are additional options that may be passed to the ``load`` command 
+The following are additional options that may be passed to the ``load`` command
 when analyzing FITS data:
 
 ``nan_mask``
@@ -888,9 +888,9 @@
 Miscellaneous Tools for Use with FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-A number of tools have been prepared for use with FITS data that enhance yt's 
-visualization and analysis capabilities for this particular type of data. These 
-are included in the ``yt.frontends.fits.misc`` module, and can be imported like 
+A number of tools have been prepared for use with FITS data that enhance yt's
+visualization and analysis capabilities for this particular type of data. These
+are included in the ``yt.frontends.fits.misc`` module, and can be imported like
 so:
 
 .. code-block:: python
@@ -900,7 +900,7 @@
 ``setup_counts_fields``
 """""""""""""""""""""""
 
-This function can be used to create image fields from X-ray counts data in 
+This function can be used to create image fields from X-ray counts data in
 different energy bands:
 
 .. code-block:: python
@@ -914,9 +914,9 @@
 ``ds9_region``
 """"""""""""""
 
-This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and 
-creates a "cut region" data container from it, that can be used to select 
-the cells in the FITS dataset that fall within the region. To use this 
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and
+creates a "cut region" data container from it, that can be used to select
+the cells in the FITS dataset that fall within the region. To use this
 functionality, the `pyregion <https://github.com/astropy/pyregion/>`_
 package must be installed.
 
@@ -930,8 +930,8 @@
 ``PlotWindowWCS``
 """""""""""""""""
 
-This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS 
-data and adds celestial coordinates to the plot axes. To use it, a 
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS
+data and adds celestial coordinates to the plot axes. To use it, a
 version of AstroPy >= 1.3 must be installed.
 
 .. code-block:: python
@@ -940,7 +940,7 @@
   wcs_slc.show() # for the IPython notebook
   wcs_slc.save()
 
-``WCSAxes`` is still in an experimental state, but as its functionality 
+``WCSAxes`` is still in an experimental state, but as its functionality
 improves it will be utilized more here.
 
 ``create_spectral_slabs``
@@ -948,14 +948,14 @@
 
 .. note::
 
-  The following functionality requires the 
-  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be 
+  The following functionality requires the
+  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be
   installed.
 
 If you have a spectral intensity dataset of some sort, and would like to
-extract emission in particular slabs along the spectral axis of a certain 
-width, ``create_spectral_slabs`` can be used to generate a dataset with 
-these slabs as different fields. In this example, we use it to extract 
+extract emission in particular slabs along the spectral axis of a certain
+width, ``create_spectral_slabs`` can be used to generate a dataset with
+these slabs as different fields. In this example, we use it to extract
 individual lines from an intensity cube:
 
 .. code-block:: python
@@ -968,12 +968,12 @@
                                     slab_centers, slab_width,
                                     nan_mask=0.0)
 
-All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when 
-creating the dataset (see :ref:`additional_fits_options` above). In the 
-returned dataset, the different slabs will be different fields, with the field 
-names taken from the keys in ``slab_centers``. The WCS coordinates on the 
-spectral axis are reset so that the center of the domain along this axis is 
-zero, and the left and right edges of the domain along this axis are 
+All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when
+creating the dataset (see :ref:`additional_fits_options` above). In the
+returned dataset, the different slabs will be different fields, with the field
+names taken from the keys in ``slab_centers``. The WCS coordinates on the
+spectral axis are reset so that the center of the domain along this axis is
+zero, and the left and right edges of the domain along this axis are
 :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data
@@ -991,10 +991,10 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of
 a plot file, checkpoint file, or particle file. Particle files require special handling
-depending on the situation, the main issue being that they typically lack grid information. 
-The first case is when you have a plotfile and a particle file that you would like to 
+depending on the situation, the main issue being that they typically lack grid information.
+The first case is when you have a plotfile and a particle file that you would like to
 load together. In the simplest case, this occurs automatically. For instance, if you
 were in a directory with the following files:
 
@@ -1003,8 +1003,8 @@
    radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
    radio_halo_1kpc_hdf5_part_0100 # particle file
 
-where the plotfile and the particle file were created at the same time (therefore having 
-particle data consistent with the grid structure of the former). Notice also that the 
+where the plotfile and the particle file were created at the same time (therefore having
+particle data consistent with the grid structure of the former). Notice also that the
 prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
 the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
 This also works when loading a number of files in a time series.
@@ -1018,10 +1018,10 @@
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
-However, if you don't have a corresponding plotfile for a particle file, but would still 
-like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+However, if you don't have a corresponding plotfile for a particle file, but would still
+like to load the particle data, you can still call ``yt.load`` on the file. However, the
 grid information will not be available, and the particle data will be loaded in a fashion
-similar to SPH data. 
+similar to SPH data.
 
 .. rubric:: Caveats
 
@@ -1349,7 +1349,7 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays to each 
+Particle fields are supported by adding 1-dimensional arrays to each
 ``grid``'s dict:
 
 .. code-block:: python
@@ -1394,7 +1394,7 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are added as one-dimensional arrays in a similar manner as the 
+Particle fields are added as one-dimensional arrays in a similar manner as the
 three-dimensional grid fields:
 
 .. code-block:: python
@@ -1562,7 +1562,7 @@
 
    # only plot the second
    sl = yt.SlicePlot(ds, 'z', ('connect2', 'test'))
-   
+
    # plot both
    sl = yt.SlicePlot(ds, 'z', ('all', 'test'))
 
@@ -1631,10 +1631,10 @@
 Gizmo Data
 ----------
 
-Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
-manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
-and then uses smoothing kernels to deposit those fields to an underlying 
-grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields
+and then uses smoothing kernels to deposit those fields to an underlying
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.
 To load Gizmo datasets using the standard HDF5 output format::
 
    import yt
@@ -1642,11 +1642,11 @@
 
 Because the Gizmo output format is similar to the Gadget format, yt
 may load Gizmo datasets as Gadget depending on the circumstances, but this
-should not pose a problem in most situations.  FIRE outputs will be loaded 
-accordingly due to the number of metallicity fields found (11 or 17).  
+should not pose a problem in most situations.  FIRE outputs will be loaded
+accordingly due to the number of metallicity fields found (11 or 17).
 
 For Gizmo outputs written as raw binary outputs, you may have to specify
-a bounding box, field specification, and units as are done for standard 
+a bounding box, field specification, and units as are done for standard
 Gadget outputs.  See :ref:`loading-gadget-data` for more information.
 
 .. _halo-catalog-data:
@@ -2001,7 +2001,8 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify
+
+yt will attempt to guess the fields in the file. You may also specify
 a list of hydro fields by supplying the ``fields`` keyword in your
 call to ``load``. It is also possible to provide a list of *extra*
 particle fields by supplying the ``extra_particle_fields``:
@@ -2013,6 +2014,13 @@
    ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
+yt also support the new way particles are handled introduced after
+version `stable_17_09` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file `part_file_descriptor.txt`
+containing the different fields in the particle files will be read. If
+you use a custom version of RAMSES, make sure this file is up-to-date
+and reflects the true layout of the particles.
+
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
@@ -2030,6 +2038,21 @@
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
+.. rubric:: Particle automatic filtering
+
+If your RAMSES version is more recent than `stable_17_09`, it is
+possible to tell yt to filter the particles in your dataset. This is
+not done by default is it requires to read all the particles and may
+be small. To use this feature, run
+
+.. code-block:: python
+
+   ds = yt.load('ramses_new_format/output_00011/info_00011.txt')
+
+   # This will load the particle types automatically
+   ds.add_ptypes()
+
+
 .. _loading-sph-data:
 
 SPH Particle Data
@@ -2113,6 +2136,3 @@
 
     import yt
     ds = yt.load(filename, cosmology_parameters={})
-
-
-


https://bitbucket.org/yt_analysis/yt/commits/1399508ef52f/
Changeset:   1399508ef52f
User:        Corentin Cadiou
Date:        2017-11-11 13:37:29+00:00
Summary:     add test
Affected #:  1 file

diff -r ab3f4ab82d2bc6f8c98414b4847d82843f07cb2d -r 1399508ef52f299bbae252f1826dc79fcbc03eac yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -204,3 +204,34 @@
 
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
+
+
+ramses_new_format = "ramses_new_format/output_00011/info_00011.txt"
+ at requires_file(ramses_new_format)
+def test_new_format():
+    expected_particle_fields = [
+        ('io', 'particle_birth_time'),
+        ('io', 'particle_family'),
+        ('io', 'particle_identity'),
+        ('io', 'particle_levelp'),
+        ('io', 'particle_mass'),
+        ('io', 'particle_metallicity'),
+        ('io', 'particle_position_x'),
+        ('io', 'particle_position_y'),
+        ('io', 'particle_position_z'),
+        ('io', 'particle_tag'),
+        ('io', 'particle_velocity_x'),
+        ('io', 'particle_velocity_y'),
+        ('io', 'particle_velocity_z')]
+
+    ds = yt.load(ramses_new_format)
+    ad = ds.all_data()
+
+    # Check all the expected fields exist and can be accessed
+    for f in expected_particle_fields:
+        assert(f in ds.field_list)
+        ad[f]
+
+    # Check there is only stars with tag 0 (it should be right)
+    assert(all(ad['particle_family'] == 2))
+    assert(all(ad['particle_tag'] == 0))


https://bitbucket.org/yt_analysis/yt/commits/9a3ad342192d/
Changeset:   9a3ad342192d
User:        Corentin Cadiou
Date:        2017-11-11 13:45:55+00:00
Summary:     more doc
Affected #:  2 files

diff -r 1399508ef52f299bbae252f1826dc79fcbc03eac -r 9a3ad342192db5fa53f632eb0fc9fdaa51fff94d doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2038,7 +2038,8 @@
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
-.. rubric:: Particle automatic filtering
+Particle automatic filtering
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 If your RAMSES version is more recent than `stable_17_09`, it is
 possible to tell yt to filter the particles in your dataset. This is
@@ -2053,6 +2054,19 @@
    ds.add_ptypes()
 
 
+Adding custom particle fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to add support for particle fields. For this, one
+should tweak
+:func:`~yt.frontends.ramses.io._read_part_file_descriptor` to include
+the field as well as its data type to the assoc list, following the
+convention from
+`python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+For example, to add support for a longint field named
+`my_custom_field`, one would add `('my_custom_field', 'l')` to `assoc`.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data

diff -r 1399508ef52f299bbae252f1826dc79fcbc03eac -r 9a3ad342192db5fa53f632eb0fc9fdaa51fff94d yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -174,8 +174,8 @@
     assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
             [('velocity_%s' % k, 'd') for k in 'xyz'] + \
             [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-            ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
-            ('metallicity', 'd')]
+             ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
+             ('metallicity', 'd')]
 
     assoc = {k: v for k, v in assoc}
     if True: #with open(fname, 'r') as f:


https://bitbucket.org/yt_analysis/yt/commits/2ad1cd87badc/
Changeset:   2ad1cd87badc
User:        Corentin Cadiou
Date:        2017-11-13 17:07:11+00:00
Summary:     automatic loading of particles
Affected #:  1 file

diff -r 9a3ad342192db5fa53f632eb0fc9fdaa51fff94d -r 2ad1cd87badc28861a014d32a3576bfb9fa79514 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -698,7 +698,12 @@
                                 filtered_type='io', requires=['particle_family'])
 
 
-    def add_ptypes(self):
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        Dataset.create_field_info(self, *args, **kwa)
+        self._add_ptypes()
+
+    def _add_ptypes(self):
         for k in particle_families.keys():
             mylog.info('Adding particle_type: %s' % k)
             self.add_particle_filter('%s' % k)


https://bitbucket.org/yt_analysis/yt/commits/75218a8eb986/
Changeset:   75218a8eb986
User:        Corentin Cadiou
Date:        2017-11-13 17:14:30+00:00
Summary:     remove 'old' doc
Affected #:  1 file

diff -r 2ad1cd87badc28861a014d32a3576bfb9fa79514 -r 75218a8eb986e1414fa9a21a299d8896a57686d4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2038,22 +2038,6 @@
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
-Particle automatic filtering
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If your RAMSES version is more recent than `stable_17_09`, it is
-possible to tell yt to filter the particles in your dataset. This is
-not done by default is it requires to read all the particles and may
-be small. To use this feature, run
-
-.. code-block:: python
-
-   ds = yt.load('ramses_new_format/output_00011/info_00011.txt')
-
-   # This will load the particle types automatically
-   ds.add_ptypes()
-
-
 Adding custom particle fields
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 


https://bitbucket.org/yt_analysis/yt/commits/194d9bfa922e/
Changeset:   194d9bfa922e
User:        Corentin Cadiou
Date:        2017-11-13 17:14:39+00:00
Summary:     use more specific exception
Affected #:  1 file

diff -r 75218a8eb986e1414fa9a21a299d8896a57686d4 -r 194d9bfa922e0bcdb96015ab989961f41c17409e yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,6 +23,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
+from yt.utilities.exceptions import YTFieldTypeNotFound
 from yt.extern.six import PY3
 import re
 
@@ -157,7 +158,7 @@
 
             else:
                 # Raise here an exception
-                raise Exception('Unknown particle type %s' % ptype)
+                raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
                 fname, foffsets, data_types, subset, subs_fields))


https://bitbucket.org/yt_analysis/yt/commits/c2e49ccdcbdf/
Changeset:   c2e49ccdcbdf
User:        Corentin Cadiou
Date:        2017-11-13 17:21:36+00:00
Summary:     add explicit exceptions
Affected #:  2 files

diff -r 194d9bfa922e0bcdb96015ab989961f41c17409e -r c2e49ccdcbdf521e8dab37c3aad19d35240ae258 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,8 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
-from yt.utilities.exceptions import YTFieldTypeNotFound
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTOutputFormatNotImplemented, \
+    YTNotParsableFile
 from yt.extern.six import PY3
 import re
 
@@ -195,7 +196,7 @@
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
                 if not tmp:
-                    raise Exception('Error while reading %s at line %s' % (fname, i+1))
+                    raise YTNotParsableFile(fname, i+1)
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)
@@ -207,6 +208,6 @@
 
                 fields.append(("particle_%s" % varname, dtype))
         else:
-            raise Exception('Unrecognized particle file descriptor version: %s' % version)
+            raise YTOutputFormatNotImplemented()
 
     return fields

diff -r 194d9bfa922e0bcdb96015ab989961f41c17409e -r c2e49ccdcbdf521e8dab37c3aad19d35240ae258 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,6 +424,19 @@
         v += r"'%s'."
         return v % (self.obj_name, self.ds)
 
+class YTParticleOutputFormatNotImplemented(YTException):
+    def __str__(self):
+        return "The particle output format is not supported."
+
+class YTNotParsableFile(YTException):
+    def __init__(self, fname, line):
+        self.fname = fname
+        self.line = line
+
+    def __str__(self):
+        v = r"Error while parsing file %s at line %s"
+        return v % (self.fname, self.line)
+
 class YTRockstarMultiMassNotSupported(YTException):
     def __init__(self, mi, ma, ptype):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt/commits/0511ea33f038/
Changeset:   0511ea33f038
User:        Corentin Cadiou
Date:        2017-11-25 10:46:03+00:00
Summary:     Update to new-new format
Affected #:  1 file

diff -r c2e49ccdcbdf521e8dab37c3aad19d35240ae258 -r 0511ea33f038bb66e6086ff3d0dc0840664e11c8 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,7 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
-from yt.utilities.exceptions import YTFieldTypeNotFound, YTOutputFormatNotImplemented, \
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
     YTNotParsableFile
 from yt.extern.six import PY3
 import re
@@ -166,32 +166,42 @@
 
         return tr
 
-VERSION_RE = re.compile(' *version: *(\d+)')
-VAR_DESC_RE = re.compile(' *variable # *(\d+): *(\w+)')
+VERSION_RE = re.compile('# version: *(\d+)')
+VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
 def _read_part_file_descriptor(fname):
     """
     Read the particle file descriptor and returns the array of the fields found.
     """
-    # The kind of the known types
-    assoc = [('position_%s' % k, 'd') for k in 'xyz'] + \
-            [('velocity_%s' % k, 'd') for k in 'xyz'] + \
-            [('mass', 'd'), ('identity', 'i'), ('levelp', 'i'),
-             ('family', 'b'), ('tag', 'b'), ('birth_time', 'd'),
-             ('metallicity', 'd')]
+    # Mapping
+    mapping = [
+        ('position_x', 'particle_position_x'),
+        ('position_y', 'particle_position_y'),
+        ('position_z', 'particle_position_z'),
+        ('velocity_x', 'particle_velocity_x'),
+        ('velocity_y', 'particle_velocity_y'),
+        ('velocity_z', 'particle_velocity_z'),
+        ('mass', 'particle_mass'),
+        ('identity', 'particle_identity'),
+        ('levelp', 'particle_level'),
+        ('family', 'particle_family'),
+        ('tag', 'particle_tag')
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
 
-    assoc = {k: v for k, v in assoc}
     if True: #with open(fname, 'r') as f:
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
         mylog.info('Reading part file descriptor.')
         if not tmp:
-            print(line)
-            raise Exception('File format not understood')
+            raise YTParticleOutputFormatNotImplemented()
 
         version = int(tmp.group(1))
 
         if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
             fields = []
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
@@ -200,14 +210,15 @@
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)
+                dtype = tmp.group(3)
 
-                if varname in assoc:
-                    dtype = assoc[varname]
+                if varname in mapping:
+                    varname = mapping[varname]
                 else:
-                    dtype = 'd'
+                    varname = 'particle_%s' % varname
 
-                fields.append(("particle_%s" % varname, dtype))
+                fields.append((varname, dtype))
         else:
-            raise YTOutputFormatNotImplemented()
+            raise YTParticleOutputFormatNotImplemented()
 
     return fields


https://bitbucket.org/yt_analysis/yt/commits/ac05d12aef94/
Changeset:   ac05d12aef94
User:        Corentin Cadiou
Date:        2017-11-25 11:14:50+00:00
Summary:     more idiomatic name
Affected #:  2 files

diff -r 0511ea33f038bb66e6086ff3d0dc0840664e11c8 -r ac05d12aef9431cfcdbb3c0c38954e9ec8ae8b52 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -24,7 +24,7 @@
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
 from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
-    YTNotParsableFile
+    YTFileNotParseable
 from yt.extern.six import PY3
 import re
 
@@ -206,7 +206,7 @@
             for i, line in enumerate(f.readlines()):
                 tmp = VAR_DESC_RE.match(line)
                 if not tmp:
-                    raise YTNotParsableFile(fname, i+1)
+                    raise YTFileNotParseable(fname, i+1)
 
                 # ivar = tmp.group(1)
                 varname = tmp.group(2)

diff -r 0511ea33f038bb66e6086ff3d0dc0840664e11c8 -r ac05d12aef9431cfcdbb3c0c38954e9ec8ae8b52 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -428,7 +428,7 @@
     def __str__(self):
         return "The particle output format is not supported."
 
-class YTNotParsableFile(YTException):
+class YTFileNotParseable(YTException):
     def __init__(self, fname, line):
         self.fname = fname
         self.line = line


https://bitbucket.org/yt_analysis/yt/commits/6f582b4ebffb/
Changeset:   6f582b4ebffb
User:        Corentin Cadiou
Date:        2017-11-25 11:55:49+00:00
Summary:     only add particle types when 'particle_family' is found
Affected #:  1 file

diff -r ac05d12aef9431cfcdbb3c0c38954e9ec8ae8b52 -r 6f582b4ebffb8877c08f666e8138f7c564cab83d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -687,15 +687,16 @@
         self.storage_filename = storage_filename
 
         # Add particles filters
-        for fname, value in particle_families.items():
-            def loc(val):
-                def closure(pfilter, data):
-                    filter = data[(pfilter.filtered_type, "particle_family")] == val
-                    return filter
+        if ('io', 'particle_family') in self.field_list:
+            for fname, value in particle_families.items():
+                def loc(val):
+                    def closure(pfilter, data):
+                        filter = data[(pfilter.filtered_type, "particle_family")] == val
+                        return filter
 
-                return closure
-            add_particle_filter(fname, loc(value),
-                                filtered_type='io', requires=['particle_family'])
+                    return closure
+                add_particle_filter(fname, loc(value),
+                                    filtered_type='io', requires=['particle_family'])
 
 
     def create_field_info(self, *args, **kwa):
@@ -704,9 +705,10 @@
         self._add_ptypes()
 
     def _add_ptypes(self):
-        for k in particle_families.keys():
-            mylog.info('Adding particle_type: %s' % k)
-            self.add_particle_filter('%s' % k)
+        if ('io', 'particle_family') in self.field_list:
+            for k in particle_families.keys():
+                mylog.info('Adding particle_type: %s' % k)
+                self.add_particle_filter('%s' % k)
 
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]


https://bitbucket.org/yt_analysis/yt/commits/070f5c48f8d0/
Changeset:   070f5c48f8d0
User:        Corentin Cadiou
Date:        2017-11-25 11:56:05+00:00
Summary:     be a bit more precise + updated dataset
Affected #:  1 file

diff -r 6f582b4ebffb8877c08f666e8138f7c564cab83d -r 070f5c48f8d001d341cc7b4d4b4fa67d283d3e42 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -206,32 +206,31 @@
         assert(('sink', 'field') not in ds.field_list)
 
 
-ramses_new_format = "ramses_new_format/output_00011/info_00011.txt"
+ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
 @requires_file(ramses_new_format)
 def test_new_format():
     expected_particle_fields = [
-        ('io', 'particle_birth_time'),
-        ('io', 'particle_family'),
-        ('io', 'particle_identity'),
-        ('io', 'particle_levelp'),
-        ('io', 'particle_mass'),
-        ('io', 'particle_metallicity'),
-        ('io', 'particle_position_x'),
-        ('io', 'particle_position_y'),
-        ('io', 'particle_position_z'),
-        ('io', 'particle_tag'),
-        ('io', 'particle_velocity_x'),
-        ('io', 'particle_velocity_y'),
-        ('io', 'particle_velocity_z')]
+        ('star', 'particle_identity'),
+        ('star', 'particle_level'),
+        ('star', 'particle_mass'),
+        ('star', 'particle_metallicity'),
+        ('star', 'particle_position_x'),
+        ('star', 'particle_position_y'),
+        ('star', 'particle_position_z'),
+        ('star', 'particle_tag'),
+        ('star', 'particle_velocity_x'),
+        ('star', 'particle_velocity_y'),
+        ('star', 'particle_velocity_z')]
 
     ds = yt.load(ramses_new_format)
     ad = ds.all_data()
 
     # Check all the expected fields exist and can be accessed
     for f in expected_particle_fields:
-        assert(f in ds.field_list)
+        assert(f in ds.derived_field_list)
         ad[f]
 
     # Check there is only stars with tag 0 (it should be right)
-    assert(all(ad['particle_family'] == 2))
-    assert(all(ad['particle_tag'] == 0))
+    assert(all(ad['star', 'particle_family'] == 2))
+    assert(all(ad['star', 'particle_tag'] == 0))
+    assert(len(ad['star', 'particle_tag']) == 600)


https://bitbucket.org/yt_analysis/yt/commits/a3f0d3ab8e8d/
Changeset:   a3f0d3ab8e8d
User:        Corentin Cadiou
Date:        2017-11-25 12:12:53+00:00
Summary:     remove debug stmt
Affected #:  1 file

diff -r 070f5c48f8d001d341cc7b4d4b4fa67d283d3e42 -r a3f0d3ab8e8d7ba25434084e25554439f626a6fd yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -189,7 +189,7 @@
     # Convert in dictionary
     mapping = {k: v for k, v in mapping}
 
-    if True: #with open(fname, 'r') as f:
+    with open(fname, 'r') as f:
         f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)


https://bitbucket.org/yt_analysis/yt/commits/7e596f904f0a/
Changeset:   7e596f904f0a
User:        Corentin Cadiou
Date:        2017-11-30 18:58:26+00:00
Summary:     use more explicit import
Affected #:  1 file

diff -r a3f0d3ab8e8d7ba25434084e25554439f626a6fd -r 7e596f904f0a54fb12874a486dcce5d142e960e8 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -34,7 +34,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
-from yt import add_particle_filter
+from yt.data_objects.particle_filters import add_particle_filter
 
 from .definitions import ramses_header, field_aliases, particle_families
 from .io import _read_part_file_descriptor


https://bitbucket.org/yt_analysis/yt/commits/e8758457871f/
Changeset:   e8758457871f
User:        Corentin Cadiou
Date:        2017-11-30 19:04:20+00:00
Summary:     Moving magic particle filtering to `create_field_info` method
Affected #:  1 file

diff -r 7e596f904f0a54fb12874a486dcce5d142e960e8 -r e8758457871f0742a7e8932e6644bb5904051364 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -686,7 +686,11 @@
 
         self.storage_filename = storage_filename
 
-        # Add particles filters
+
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        super(RAMSESDataset, self).create_field_info(*args, **kwa)
+        # Register particle filters
         if ('io', 'particle_family') in self.field_list:
             for fname, value in particle_families.items():
                 def loc(val):
@@ -698,14 +702,6 @@
                 add_particle_filter(fname, loc(value),
                                     filtered_type='io', requires=['particle_family'])
 
-
-    def create_field_info(self, *args, **kwa):
-        """Extend create_field_info to add the particles types."""
-        Dataset.create_field_info(self, *args, **kwa)
-        self._add_ptypes()
-
-    def _add_ptypes(self):
-        if ('io', 'particle_family') in self.field_list:
             for k in particle_families.keys():
                 mylog.info('Adding particle_type: %s' % k)
                 self.add_particle_filter('%s' % k)


https://bitbucket.org/yt_analysis/yt/commits/01541439c727/
Changeset:   01541439c727
User:        Corentin Cadiou
Date:        2017-11-30 22:34:37+00:00
Summary:     use one-to-one filter registry
Affected #:  2 files

diff -r e8758457871f0742a7e8932e6644bb5904051364 -r 01541439c72793fc5543e4a4dd81760ac9cceee3 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -24,7 +24,7 @@
 from yt.utilities.exceptions import YTIllDefinedFilter
 
 # One to many mapping
-filter_registry = defaultdict(list)
+filter_registry = defaultdict(None)
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -131,7 +131,7 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    filter_registry[name].append(filter)
+    filter_registry[name] = filter
 
 
 def particle_filter(name=None, requires=None, filtered_type='all'):

diff -r e8758457871f0742a7e8932e6644bb5904051364 -r 01541439c72793fc5543e4a4dd81760ac9cceee3 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,11 +663,10 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            for f in filter_registry[filter]:
-                used = self._setup_filtered_type(f)
-                if used:
-                    filter = f
-                    break
+            f = filter_registry[filter]
+            used = self._setup_filtered_type(f)
+            if used:
+                filter = f
         else:
             used = self._setup_filtered_type(filter)
         if not used:


https://bitbucket.org/yt_analysis/yt/commits/38134ad87c54/
Changeset:   38134ad87c54
User:        Corentin Cadiou
Date:        2017-11-30 22:34:51+00:00
Summary:     notify user when overriding a particle filter
Affected #:  1 file

diff -r 01541439c72793fc5543e4a4dd81760ac9cceee3 -r 38134ad87c54767885c11928498e69ea28dfe895 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -22,6 +22,7 @@
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import mylog
 
 # One to many mapping
 filter_registry = defaultdict(None)
@@ -131,6 +132,8 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
+    if filter_registry[name] is not None:
+        mylog.warning('The %s particle filter already exists. Overriding.' % name)
     filter_registry[name] = filter
 
 


https://bitbucket.org/yt_analysis/yt/commits/65110ec32ef5/
Changeset:   65110ec32ef5
User:        Corentin Cadiou
Date:        2017-11-30 23:00:14+00:00
Summary:     Use standard dict for filter registry
Affected #:  2 files

diff -r 38134ad87c54767885c11928498e69ea28dfe895 -r 65110ec32ef522aa479ac373031a073fd88f93d8 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -15,7 +15,6 @@
 #-----------------------------------------------------------------------------
 
 import copy
-from collections import defaultdict
 
 from contextlib import contextmanager
 
@@ -24,8 +23,8 @@
 from yt.utilities.exceptions import YTIllDefinedFilter
 from yt.funcs import mylog
 
-# One to many mapping
-filter_registry = defaultdict(None)
+# One to one mapping
+filter_registry = {}
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -132,7 +131,7 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    if filter_registry[name] is not None:
+    if filter_registry.get(name, None) is not None:
         mylog.warning('The %s particle filter already exists. Overriding.' % name)
     filter_registry[name] = filter
 

diff -r 38134ad87c54767885c11928498e69ea28dfe895 -r 65110ec32ef522aa479ac373031a073fd88f93d8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,7 +663,9 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            f = filter_registry[filter]
+            f = filter_registry.get(filter, None)
+            if f is None:
+                return False
             used = self._setup_filtered_type(f)
             if used:
                 filter = f


https://bitbucket.org/yt_analysis/yt/commits/7777cafd00e5/
Changeset:   7777cafd00e5
User:        Corentin Cadiou
Date:        2017-11-30 23:00:31+00:00
Summary:     Test overriding of particle_filter
Affected #:  1 file

diff -r 65110ec32ef522aa479ac373031a073fd88f93d8 -r 7777cafd00e5dcaed3ed5e9961c2a618abea2e4b yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -33,6 +33,51 @@
     ad['deposit', 'stars_cic']
     assert True
 
+
+def test_add_particle_filter_overriding():
+    """Test the add_particle_filter overriding"""
+    from yt.data_objects.particle_filters import filter_registry
+    from yt.funcs import mylog
+
+    def star_0(pfilter, data):
+        pass
+
+    def star_1(pfilter, data):
+        pass
+
+    # Use a closure to store whether the warning was called
+    def closure(status):
+        def warning_patch(*args, **kwargs):
+            print('I am called!')
+            status[0] = True
+
+        def was_called():
+            return status[0]
+
+        return warning_patch, was_called
+
+    ## Test 1: we add a dummy particle filter
+    add_particle_filter("dummy", function=star_0, filtered_type='all',
+                        requires=["creation_time"])
+    assert 'dummy' in filter_registry
+    assert_equal(filter_registry['dummy'].function, star_0)
+
+    ## Test 2: we add another dummy particle filter.
+    ##         a warning is expected. We use the above closure to
+    ##         check that.
+    # Store the original warning function
+    warning = mylog.warning
+    monkey_warning, monkey_patch_was_called = closure([False])
+    mylog.warning = monkey_warning
+    add_particle_filter("dummy", function=star_1, filtered_type='all',
+                        requires=["creation_time"])
+    assert_equal(filter_registry['dummy'].function, star_1)
+    assert_equal(monkey_patch_was_called(), True)
+
+    # Restore the original warning function
+    mylog.warning = warning
+
+
 @requires_file(iso_galaxy)
 def test_particle_filter():
     """Test the particle_filter decorator"""


https://bitbucket.org/yt_analysis/yt/commits/4d9c249cf07d/
Changeset:   4d9c249cf07d
User:        Corentin Cadiou
Date:        2017-12-03 13:05:12+00:00
Summary:     remove call stmt
Affected #:  1 file

diff -r 7777cafd00e5dcaed3ed5e9961c2a618abea2e4b -r 4d9c249cf07db368041df2c6b886837b1c91fc5c yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -48,7 +48,6 @@
     # Use a closure to store whether the warning was called
     def closure(status):
         def warning_patch(*args, **kwargs):
-            print('I am called!')
             status[0] = True
 
         def was_called():
@@ -106,7 +105,7 @@
 
     for grid in ds.index.grids[20:31]:
         cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
-        
+
         assert_equal(cg['stars', 'particle_ones'].shape[0],
                      grid['stars', 'particle_ones'].shape[0])
         assert_equal(cg['stars', 'particle_mass'].shape[0],


https://bitbucket.org/yt_analysis/yt/commits/fc702fa58ff5/
Changeset:   fc702fa58ff5
User:        Corentin Cadiou
Date:        2017-12-04 15:05:19+00:00
Summary:     remove erroneous line
Affected #:  1 file

diff -r 4d9c249cf07db368041df2c6b886837b1c91fc5c -r fc702fa58ff563039ad3c927b287bb0cec79feea yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -190,7 +190,6 @@
     mapping = {k: v for k, v in mapping}
 
     with open(fname, 'r') as f:
-        f = open(fname, 'r')
         line = f.readline()
         tmp = VERSION_RE.match(line)
         mylog.info('Reading part file descriptor.')


https://bitbucket.org/yt_analysis/yt/commits/fac9cc03df6b/
Changeset:   fac9cc03df6b
User:        Corentin Cadiou
Date:        2017-12-04 15:07:34+00:00
Summary:     cleanup global scope
Affected #:  1 file

diff -r fc702fa58ff563039ad3c927b287bb0cec79feea -r fac9cc03df6b0eebe8cdbf0197372561eb6a0dd6 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -166,12 +166,13 @@
 
         return tr
 
-VERSION_RE = re.compile('# version: *(\d+)')
-VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
 def _read_part_file_descriptor(fname):
     """
     Read the particle file descriptor and returns the array of the fields found.
     """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
     # Mapping
     mapping = [
         ('position_x', 'particle_position_x'),


https://bitbucket.org/yt_analysis/yt/commits/12726667c23f/
Changeset:   12726667c23f
User:        Corentin Cadiou
Date:        2017-12-04 15:08:01+00:00
Summary:     only one line necessary
Affected #:  1 file

diff -r fac9cc03df6b0eebe8cdbf0197372561eb6a0dd6 -r 12726667c23f73a9a524474d35ef2eb934e88b6e yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -249,7 +249,6 @@
         if self._has_part_descriptor:
             particle_fields = (
                 _read_part_file_descriptor(self._part_file_descriptor))
-            ptype = 'io'
         else:
             particle_fields = [
                 ("particle_position_x", "d"),
@@ -265,8 +264,7 @@
             if self.ds._extra_particle_fields is not None:
                 particle_fields += self.ds._extra_particle_fields
 
-            ptype = 'io'
-
+        ptype = 'io'
 
         field_offsets = {}
         _pfields = {}


https://bitbucket.org/yt_analysis/yt/commits/8469833c7b2e/
Changeset:   8469833c7b2e
User:        Corentin Cadiou
Date:        2017-12-04 15:23:59+00:00
Summary:     ` -> ``
Affected #:  1 file

diff -r 12726667c23f73a9a524474d35ef2eb934e88b6e -r 8469833c7b2e5d6e889c65e7a0ce187ec7f523d2 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2015,8 +2015,8 @@
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
 yt also support the new way particles are handled introduced after
-version `stable_17_09` (the version introduced after the 2017 Ramses
-User Meeting). In this case, the file `part_file_descriptor.txt`
+version ``stable_17_091` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file ``part_file_descriptor.txt``
 containing the different fields in the particle files will be read. If
 you use a custom version of RAMSES, make sure this file is up-to-date
 and reflects the true layout of the particles.
@@ -2048,7 +2048,7 @@
 convention from
 `python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
 For example, to add support for a longint field named
-`my_custom_field`, one would add `('my_custom_field', 'l')` to `assoc`.
+``my_custom_field``, one would add ``('my_custom_field', 'l')`` to ``assoc``.
 
 
 .. _loading-sph-data:


https://bitbucket.org/yt_analysis/yt/commits/8b206b01bed6/
Changeset:   8b206b01bed6
User:        ngoldbaum
Date:        2017-12-04 17:20:18+00:00
Summary:     fix backtick
Affected #:  1 file

diff -r 8469833c7b2e5d6e889c65e7a0ce187ec7f523d2 -r 8b206b01bed6f3878e15131aca9d00694ee9e66e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2015,7 +2015,7 @@
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
 yt also support the new way particles are handled introduced after
-version ``stable_17_091` (the version introduced after the 2017 Ramses
+version ``stable_17_091`` (the version introduced after the 2017 Ramses
 User Meeting). In this case, the file ``part_file_descriptor.txt``
 containing the different fields in the particle files will be read. If
 you use a custom version of RAMSES, make sure this file is up-to-date


https://bitbucket.org/yt_analysis/yt/commits/c2298bef11d3/
Changeset:   c2298bef11d3
User:        ngoldbaum
Date:        2017-12-04 17:22:22+00:00
Summary:     Merge pull request #1616 from cphyc/ramses-new-particles

RAMSES new particles
Affected #:  10 files

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -318,7 +318,7 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt supports AMReX/BoxLib particle data stored in the standard format used 
+* yt supports AMReX/BoxLib particle data stored in the standard format used
   by Nyx and WarpX, and optionally Castro. It currently does not support the ASCII particle
   data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
@@ -331,7 +331,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Most AMReX/BoxLib codes output cell-centered data. If the underlying discretization
-is not cell-centered, then fields are typically averaged to cell centers before 
+is not cell-centered, then fields are typically averaged to cell centers before
 they are written to plot files for visualization. WarpX, however, has the option
 to output the raw (i.e., not averaged to cell centers) data as well.  If you
 run your WarpX simulation with ``warpx.plot_raw_fields = 1`` in your inputs
@@ -348,10 +348,10 @@
 The raw fields in WarpX are nodal in at least one direction. We define a field
 to be "nodal" in a given direction if the field data is defined at the "low"
 and "high" sides of the cell in that direction, rather than at the cell center.
-Instead of returning one field value per cell selected, nodal fields return a 
+Instead of returning one field value per cell selected, nodal fields return a
 number of values, depending on their centering. This centering is marked by
 a `nodal_flag` that describes whether the fields is nodal in each dimension.
-``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while 
+``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while
 ``nodal_flag = [0, 0, 1]`` means that the field is nodal in the z direction
 and cell centered in the others, i.e. it is defined on the z faces of each cell.
 ``nodal_flag = [1, 1, 0]`` would mean that the field is centered in the z direction,
@@ -371,7 +371,7 @@
 
 Here, the field ``('raw', 'Ex')`` is nodal in two directions, so four values per cell
 are returned, corresponding to the four edges in each cell on which the variable
-is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned 
+is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned
 per cell. The standard, averaged-to-cell-centers fields are still available.
 
 Currently, slices and data selection are implemented for nodal fields. Projections,
@@ -684,7 +684,7 @@
 * fits.gz
 * fts.gz
 
-yt can currently read two kinds of FITS files: FITS image files and FITS 
+yt can currently read two kinds of FITS files: FITS image files and FITS
 binary table files containing positions, times, and energies of X-ray events.
 
 Though a FITS image is composed of a single array in the FITS file,
@@ -760,14 +760,14 @@
 If your data is of the first case, yt will determine the length units based
 on the information in the header. If your data is of the second or third
 cases, no length units will be assigned, but the world coordinate information
-about the axes will be stored in separate fields. If your data is of the 
-fourth type, the coordinates of the first three axes will be determined 
+about the axes will be stored in separate fields. If your data is of the
+fourth type, the coordinates of the first three axes will be determined
 according to cases 1-3.
 
 .. note::
 
-  Linear length-based coordinates (Case 1 above) are only supported if all 
-  dimensions have the same value for ``CUNITx``. WCS coordinates are only 
+  Linear length-based coordinates (Case 1 above) are only supported if all
+  dimensions have the same value for ``CUNITx``. WCS coordinates are only
   supported for Cases 2-4.
 
 FITS Data Decomposition
@@ -791,8 +791,8 @@
              512	  981940800
 
 For 3D spectral-cube data, the decomposition into grids will be done along the
-spectral axis since this will speed up many common operations for this 
-particular type of dataset. 
+spectral axis since this will speed up many common operations for this
+particular type of dataset.
 
 yt will generate its own domain decomposition, but the number of grids can be
 set manually by passing the ``nprocs`` parameter to the ``load`` call:
@@ -830,10 +830,10 @@
 will be generated from the pixel coordinates in the file using the WCS
 transformations provided by AstroPy.
 
-X-ray event data will be loaded as particle fields in yt, but a grid will be 
-constructed from the WCS information in the FITS header. There is a helper 
-function, ``setup_counts_fields``, which may be used to make deposited image 
-fields from the event data for different energy bands (for an example see 
+X-ray event data will be loaded as particle fields in yt, but a grid will be
+constructed from the WCS information in the FITS header. There is a helper
+function, ``setup_counts_fields``, which may be used to make deposited image
+fields from the event data for different energy bands (for an example see
 :ref:`xray_fits`).
 
 .. note::
@@ -848,7 +848,7 @@
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
-The following are additional options that may be passed to the ``load`` command 
+The following are additional options that may be passed to the ``load`` command
 when analyzing FITS data:
 
 ``nan_mask``
@@ -888,9 +888,9 @@
 Miscellaneous Tools for Use with FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-A number of tools have been prepared for use with FITS data that enhance yt's 
-visualization and analysis capabilities for this particular type of data. These 
-are included in the ``yt.frontends.fits.misc`` module, and can be imported like 
+A number of tools have been prepared for use with FITS data that enhance yt's
+visualization and analysis capabilities for this particular type of data. These
+are included in the ``yt.frontends.fits.misc`` module, and can be imported like
 so:
 
 .. code-block:: python
@@ -900,7 +900,7 @@
 ``setup_counts_fields``
 """""""""""""""""""""""
 
-This function can be used to create image fields from X-ray counts data in 
+This function can be used to create image fields from X-ray counts data in
 different energy bands:
 
 .. code-block:: python
@@ -914,9 +914,9 @@
 ``ds9_region``
 """"""""""""""
 
-This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and 
-creates a "cut region" data container from it, that can be used to select 
-the cells in the FITS dataset that fall within the region. To use this 
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and
+creates a "cut region" data container from it, that can be used to select
+the cells in the FITS dataset that fall within the region. To use this
 functionality, the `pyregion <https://github.com/astropy/pyregion/>`_
 package must be installed.
 
@@ -930,8 +930,8 @@
 ``PlotWindowWCS``
 """""""""""""""""
 
-This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS 
-data and adds celestial coordinates to the plot axes. To use it, a 
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS
+data and adds celestial coordinates to the plot axes. To use it, a
 version of AstroPy >= 1.3 must be installed.
 
 .. code-block:: python
@@ -940,7 +940,7 @@
   wcs_slc.show() # for the IPython notebook
   wcs_slc.save()
 
-``WCSAxes`` is still in an experimental state, but as its functionality 
+``WCSAxes`` is still in an experimental state, but as its functionality
 improves it will be utilized more here.
 
 ``create_spectral_slabs``
@@ -948,14 +948,14 @@
 
 .. note::
 
-  The following functionality requires the 
-  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be 
+  The following functionality requires the
+  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be
   installed.
 
 If you have a spectral intensity dataset of some sort, and would like to
-extract emission in particular slabs along the spectral axis of a certain 
-width, ``create_spectral_slabs`` can be used to generate a dataset with 
-these slabs as different fields. In this example, we use it to extract 
+extract emission in particular slabs along the spectral axis of a certain
+width, ``create_spectral_slabs`` can be used to generate a dataset with
+these slabs as different fields. In this example, we use it to extract
 individual lines from an intensity cube:
 
 .. code-block:: python
@@ -968,12 +968,12 @@
                                     slab_centers, slab_width,
                                     nan_mask=0.0)
 
-All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when 
-creating the dataset (see :ref:`additional_fits_options` above). In the 
-returned dataset, the different slabs will be different fields, with the field 
-names taken from the keys in ``slab_centers``. The WCS coordinates on the 
-spectral axis are reset so that the center of the domain along this axis is 
-zero, and the left and right edges of the domain along this axis are 
+All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when
+creating the dataset (see :ref:`additional_fits_options` above). In the
+returned dataset, the different slabs will be different fields, with the field
+names taken from the keys in ``slab_centers``. The WCS coordinates on the
+spectral axis are reset so that the center of the domain along this axis is
+zero, and the left and right edges of the domain along this axis are
 :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data
@@ -991,10 +991,10 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of
 a plot file, checkpoint file, or particle file. Particle files require special handling
-depending on the situation, the main issue being that they typically lack grid information. 
-The first case is when you have a plotfile and a particle file that you would like to 
+depending on the situation, the main issue being that they typically lack grid information.
+The first case is when you have a plotfile and a particle file that you would like to
 load together. In the simplest case, this occurs automatically. For instance, if you
 were in a directory with the following files:
 
@@ -1003,8 +1003,8 @@
    radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
    radio_halo_1kpc_hdf5_part_0100 # particle file
 
-where the plotfile and the particle file were created at the same time (therefore having 
-particle data consistent with the grid structure of the former). Notice also that the 
+where the plotfile and the particle file were created at the same time (therefore having
+particle data consistent with the grid structure of the former). Notice also that the
 prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
 the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
 This also works when loading a number of files in a time series.
@@ -1018,10 +1018,10 @@
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
-However, if you don't have a corresponding plotfile for a particle file, but would still 
-like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+However, if you don't have a corresponding plotfile for a particle file, but would still
+like to load the particle data, you can still call ``yt.load`` on the file. However, the
 grid information will not be available, and the particle data will be loaded in a fashion
-similar to SPH data. 
+similar to SPH data.
 
 .. rubric:: Caveats
 
@@ -1349,7 +1349,7 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays to each 
+Particle fields are supported by adding 1-dimensional arrays to each
 ``grid``'s dict:
 
 .. code-block:: python
@@ -1394,7 +1394,7 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are added as one-dimensional arrays in a similar manner as the 
+Particle fields are added as one-dimensional arrays in a similar manner as the
 three-dimensional grid fields:
 
 .. code-block:: python
@@ -1562,7 +1562,7 @@
 
    # only plot the second
    sl = yt.SlicePlot(ds, 'z', ('connect2', 'test'))
-   
+
    # plot both
    sl = yt.SlicePlot(ds, 'z', ('all', 'test'))
 
@@ -1631,10 +1631,10 @@
 Gizmo Data
 ----------
 
-Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
-manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
-and then uses smoothing kernels to deposit those fields to an underlying 
-grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields
+and then uses smoothing kernels to deposit those fields to an underlying
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.
 To load Gizmo datasets using the standard HDF5 output format::
 
    import yt
@@ -1642,11 +1642,11 @@
 
 Because the Gizmo output format is similar to the Gadget format, yt
 may load Gizmo datasets as Gadget depending on the circumstances, but this
-should not pose a problem in most situations.  FIRE outputs will be loaded 
-accordingly due to the number of metallicity fields found (11 or 17).  
+should not pose a problem in most situations.  FIRE outputs will be loaded
+accordingly due to the number of metallicity fields found (11 or 17).
 
 For Gizmo outputs written as raw binary outputs, you may have to specify
-a bounding box, field specification, and units as are done for standard 
+a bounding box, field specification, and units as are done for standard
 Gadget outputs.  See :ref:`loading-gadget-data` for more information.
 
 .. _halo-catalog-data:
@@ -2001,7 +2001,8 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify
+
+yt will attempt to guess the fields in the file. You may also specify
 a list of hydro fields by supplying the ``fields`` keyword in your
 call to ``load``. It is also possible to provide a list of *extra*
 particle fields by supplying the ``extra_particle_fields``:
@@ -2013,6 +2014,13 @@
    ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
+yt also support the new way particles are handled introduced after
+version ``stable_17_091`` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file ``part_file_descriptor.txt``
+containing the different fields in the particle files will be read. If
+you use a custom version of RAMSES, make sure this file is up-to-date
+and reflects the true layout of the particles.
+
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
@@ -2030,6 +2038,19 @@
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
+Adding custom particle fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to add support for particle fields. For this, one
+should tweak
+:func:`~yt.frontends.ramses.io._read_part_file_descriptor` to include
+the field as well as its data type to the assoc list, following the
+convention from
+`python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+For example, to add support for a longint field named
+``my_custom_field``, one would add ``('my_custom_field', 'l')`` to ``assoc``.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data
@@ -2113,6 +2134,3 @@
 
     import yt
     ds = yt.load(filename, cosmology_parameters={})
-
-
-

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -15,16 +15,16 @@
 #-----------------------------------------------------------------------------
 
 import copy
-from collections import defaultdict
 
 from contextlib import contextmanager
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import mylog
 
-# One to many mapping
-filter_registry = defaultdict(list)
+# One to one mapping
+filter_registry = {}
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -131,7 +131,9 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    filter_registry[name].append(filter)
+    if filter_registry.get(name, None) is not None:
+        mylog.warning('The %s particle filter already exists. Overriding.' % name)
+    filter_registry[name] = filter
 
 
 def particle_filter(name=None, requires=None, filtered_type='all'):

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,11 +663,12 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            for f in filter_registry[filter]:
-                used = self._setup_filtered_type(f)
-                if used:
-                    filter = f
-                    break
+            f = filter_registry.get(filter, None)
+            if f is None:
+                return False
+            used = self._setup_filtered_type(f)
+            if used:
+                filter = f
         else:
             used = self._setup_filtered_type(filter)
         if not used:

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -33,6 +33,50 @@
     ad['deposit', 'stars_cic']
     assert True
 
+
+def test_add_particle_filter_overriding():
+    """Test the add_particle_filter overriding"""
+    from yt.data_objects.particle_filters import filter_registry
+    from yt.funcs import mylog
+
+    def star_0(pfilter, data):
+        pass
+
+    def star_1(pfilter, data):
+        pass
+
+    # Use a closure to store whether the warning was called
+    def closure(status):
+        def warning_patch(*args, **kwargs):
+            status[0] = True
+
+        def was_called():
+            return status[0]
+
+        return warning_patch, was_called
+
+    ## Test 1: we add a dummy particle filter
+    add_particle_filter("dummy", function=star_0, filtered_type='all',
+                        requires=["creation_time"])
+    assert 'dummy' in filter_registry
+    assert_equal(filter_registry['dummy'].function, star_0)
+
+    ## Test 2: we add another dummy particle filter.
+    ##         a warning is expected. We use the above closure to
+    ##         check that.
+    # Store the original warning function
+    warning = mylog.warning
+    monkey_warning, monkey_patch_was_called = closure([False])
+    mylog.warning = monkey_warning
+    add_particle_filter("dummy", function=star_1, filtered_type='all',
+                        requires=["creation_time"])
+    assert_equal(filter_registry['dummy'].function, star_1)
+    assert_equal(monkey_patch_was_called(), True)
+
+    # Restore the original warning function
+    mylog.warning = warning
+
+
 @requires_file(iso_galaxy)
 def test_particle_filter():
     """Test the particle_filter decorator"""
@@ -61,7 +105,7 @@
 
     for grid in ds.index.grids[20:31]:
         cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
-        
+
         assert_equal(cg['stars', 'particle_ones'].shape[0],
                      grid['stars', 'particle_ones'].shape[0])
         assert_equal(cg['stars', 'particle_mass'].shape[0],

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -34,8 +34,10 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt.data_objects.particle_filters import add_particle_filter
 
-from .definitions import ramses_header, field_aliases
+from .definitions import ramses_header, field_aliases, particle_families
+from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
@@ -58,12 +60,14 @@
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
+        basedir = os.path.abspath(
+            os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
+            basedir, num, domain_id)
+        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
         for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
             setattr(self, "%s_fn" % t, basename % t)
+        self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
         self._read_particle_header()
@@ -91,6 +95,13 @@
         return os.path.exists(self.sink_fn)
 
     @property
+    def _has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self._part_file_descriptor)
+
+    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -214,6 +225,7 @@
             self.local_particle_count = 0
             self.particle_field_offsets = {}
             return
+
         f = open(self.part_fn, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -233,7 +245,12 @@
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
 
-        particle_fields = [
+        # Try reading particle file descriptor
+        if self._has_part_descriptor:
+            particle_fields = (
+                _read_part_file_descriptor(self._part_file_descriptor))
+        else:
+            particle_fields = [
                 ("particle_position_x", "d"),
                 ("particle_position_y", "d"),
                 ("particle_position_z", "d"),
@@ -244,13 +261,14 @@
                 ("particle_identifier", "i"),
                 ("particle_refinement_level", "I")]
 
-        if self.ds._extra_particle_fields is not None:
-            particle_fields += self.ds._extra_particle_fields
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
+
+        ptype = 'io'
 
         field_offsets = {}
         _pfields = {}
 
-        ptype = 'io'
 
         # Read offsets
         for field, vtype in particle_fields:
@@ -667,6 +685,25 @@
         self.storage_filename = storage_filename
 
 
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        super(RAMSESDataset, self).create_field_info(*args, **kwa)
+        # Register particle filters
+        if ('io', 'particle_family') in self.field_list:
+            for fname, value in particle_families.items():
+                def loc(val):
+                    def closure(pfilter, data):
+                        filter = data[(pfilter.filtered_type, "particle_family")] == val
+                        return filter
+
+                    return closure
+                add_particle_filter(fname, loc(value),
+                                    filtered_type='io', requires=['particle_family'])
+
+            for k in particle_families.keys():
+                mylog.info('Adding particle_type: %s' % k)
+                self.add_particle_filter('%s' % k)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -61,3 +61,14 @@
                           'Metallicity'),
 
 }
+
+particle_families = {
+    'DM': 1,
+    'star': 2,
+    'cloud': 3,
+    'dust': 4,
+    'star_tracer': -2,
+    'cloud_tracer': -3,
+    'dust_tracer': -4,
+    'gas_tracer': 0
+}

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -64,6 +64,7 @@
 _X = 0.76 # H fraction, hardcoded
 _Y = 0.24 # He fraction, hardcoded
 
+
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),
@@ -88,7 +89,10 @@
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
         ("particle_age", ("code_time", ['age'], None)),
+        ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
+        ("particle_family", ("", [], None)),
+        ("particle_tag", ("", [], None))
     )
 
     known_sink_fields = (

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,10 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
+    YTFileNotParseable
 from yt.extern.six import PY3
+import re
 
 if PY3:
     from io import BytesIO as IO
@@ -156,9 +159,66 @@
 
             else:
                 # Raise here an exception
-                raise Exception('Unknown particle type %s' % ptype)
+                raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
                 fname, foffsets, data_types, subset, subs_fields))
 
         return tr
+
+def _read_part_file_descriptor(fname):
+    """
+    Read the particle file descriptor and returns the array of the fields found.
+    """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
+    # Mapping
+    mapping = [
+        ('position_x', 'particle_position_x'),
+        ('position_y', 'particle_position_y'),
+        ('position_z', 'particle_position_z'),
+        ('velocity_x', 'particle_velocity_x'),
+        ('velocity_y', 'particle_velocity_y'),
+        ('velocity_z', 'particle_velocity_z'),
+        ('mass', 'particle_mass'),
+        ('identity', 'particle_identity'),
+        ('levelp', 'particle_level'),
+        ('family', 'particle_family'),
+        ('tag', 'particle_tag')
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
+
+    with open(fname, 'r') as f:
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        mylog.info('Reading part file descriptor.')
+        if not tmp:
+            raise YTParticleOutputFormatNotImplemented()
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise YTFileNotParseable(fname, i+1)
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+                dtype = tmp.group(3)
+
+                if varname in mapping:
+                    varname = mapping[varname]
+                else:
+                    varname = 'particle_%s' % varname
+
+                fields.append((varname, dtype))
+        else:
+            raise YTParticleOutputFormatNotImplemented()
+
+    return fields

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -204,3 +204,33 @@
 
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
+
+
+ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
+ at requires_file(ramses_new_format)
+def test_new_format():
+    expected_particle_fields = [
+        ('star', 'particle_identity'),
+        ('star', 'particle_level'),
+        ('star', 'particle_mass'),
+        ('star', 'particle_metallicity'),
+        ('star', 'particle_position_x'),
+        ('star', 'particle_position_y'),
+        ('star', 'particle_position_z'),
+        ('star', 'particle_tag'),
+        ('star', 'particle_velocity_x'),
+        ('star', 'particle_velocity_y'),
+        ('star', 'particle_velocity_z')]
+
+    ds = yt.load(ramses_new_format)
+    ad = ds.all_data()
+
+    # Check all the expected fields exist and can be accessed
+    for f in expected_particle_fields:
+        assert(f in ds.derived_field_list)
+        ad[f]
+
+    # Check there is only stars with tag 0 (it should be right)
+    assert(all(ad['star', 'particle_family'] == 2))
+    assert(all(ad['star', 'particle_tag'] == 0))
+    assert(len(ad['star', 'particle_tag']) == 600)

diff -r befe05adfea96a3285199c09543b6c22295d946f -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,6 +424,19 @@
         v += r"'%s'."
         return v % (self.obj_name, self.ds)
 
+class YTParticleOutputFormatNotImplemented(YTException):
+    def __str__(self):
+        return "The particle output format is not supported."
+
+class YTFileNotParseable(YTException):
+    def __init__(self, fname, line):
+        self.fname = fname
+        self.line = line
+
+    def __str__(self):
+        v = r"Error while parsing file %s at line %s"
+        return v % (self.fname, self.line)
+
 class YTRockstarMultiMassNotSupported(YTException):
     def __init__(self, mi, ma, ptype):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt/commits/1677894c7bf1/
Changeset:   1677894c7bf1
User:        Corentin Cadiou
Date:        2017-11-27 21:31:35+00:00
Summary:     Add file to handle hilbert ordering
Affected #:  2 files

diff -r befe05adfea96a3285199c09543b6c22295d946f -r 1677894c7bf1ba0d682890220e6b01c0507ebc10 yt/frontends/ramses/hilbert.py
--- /dev/null
+++ b/yt/frontends/ramses/hilbert.py
@@ -0,0 +1,180 @@
+import numpy as np
+
+def hilbert3d(X, bit_length):
+    '''Compute the order using Hilbert indexing.
+
+    Arguments
+    ---------
+    * X: (N, ndim) float array
+      The positions
+    * bit_length: integer
+      The bit_length for the indexing.
+    '''
+    X = np.atleast_2d(X)
+    state_diagram = np.array([
+        1, 2, 3, 2, 4, 5, 3, 5,
+        0, 1, 3, 2, 7, 6, 4, 5,
+        2, 6, 0, 7, 8, 8, 0, 7,
+        0, 7, 1, 6, 3, 4, 2, 5,
+        0, 9,10, 9, 1, 1,11,11,
+        0, 3, 7, 4, 1, 2, 6, 5,
+        6, 0, 6,11, 9, 0, 9, 8,
+        2, 3, 1, 0, 5, 4, 6, 7,
+        11,11, 0, 7, 5, 9, 0, 7,
+        4, 3, 5, 2, 7, 0, 6, 1,
+        4, 4, 8, 8, 0, 6,10, 6,
+        6, 5, 1, 2, 7, 4, 0, 3,
+        5, 7, 5, 3, 1, 1,11,11,
+        4, 7, 3, 0, 5, 6, 2, 1,
+        6, 1, 6,10, 9, 4, 9,10,
+        6, 7, 5, 4, 1, 0, 2, 3,
+        10, 3, 1, 1,10, 3, 5, 9,
+        2, 5, 3, 4, 1, 6, 0, 7,
+        4, 4, 8, 8, 2, 7, 2, 3,
+        2, 1, 5, 6, 3, 0, 4, 7,
+        7, 2,11, 2, 7, 5, 8, 5,
+        4, 5, 7, 6, 3, 2, 0, 1,
+        10, 3, 2, 6,10, 3, 4, 4,
+        6, 1, 7, 0, 5, 2, 4, 3]).reshape(12, 2, 8).T
+
+    x_bit_mask, y_bit_mask, z_bit_mask = [np.zeros(bit_length, dtype=bool) for _ in range(3)]
+    i_bit_mask = np.zeros(3*bit_length, dtype=bool)
+
+    npoint = X.shape[0]
+    order = np.zeros(npoint)
+
+    # Convert positions to binary
+    for ip in range(npoint):
+        for i in range(bit_length):
+            mask = 0b01 << i
+            x_bit_mask[i] = X[ip, 0] & mask
+            y_bit_mask[i] = X[ip, 1] & mask
+            z_bit_mask[i] = X[ip, 2] & mask
+
+        for i in range(bit_length):
+            # Interleave bits
+            i_bit_mask[3*i+2] = x_bit_mask[i]
+            i_bit_mask[3*i+1] = y_bit_mask[i]
+            i_bit_mask[3*i  ] = z_bit_mask[i]
+
+        # Build Hilbert ordering using state diagram
+        cstate = 0
+        for i in range(bit_length-1, -1, -1):
+            sdigit = (4 * i_bit_mask[3*i+2] +
+                      2 * i_bit_mask[3*i+1] +
+                      1 * i_bit_mask[3*i  ])
+            nstate = state_diagram[sdigit, 0, cstate]
+            hdigit = state_diagram[sdigit, 1, cstate]
+
+            i_bit_mask[3*i+2] = hdigit & 0b100
+            i_bit_mask[3*i+1] = hdigit & 0b010
+            i_bit_mask[3*i  ] = hdigit & 0b001
+
+            cstate = nstate
+
+        # Compute ordering
+        for i in range(3*bit_length):
+            order[ip] = order[ip] + i_bit_mask[i]*2**i
+
+    return order
+
+def get_cpu_list(ds, X):
+    '''
+    Return the list of the CPU intersecting with the positions
+    given. Note that it will be 0-indexed.
+
+    Parameters
+    ----------
+    * ds: Dataset
+      The dataset containing the information
+    * X: (N, ndim) float array
+      An array containing positions. They should be between 0 and 1.
+    '''
+    X = np.atleast_2d(X)
+    if X.shape[1] != 3:
+        raise NotImplementedError('This function is only implemented in 3D.')
+
+    levelmax = ds.parameters['levelmax']
+    ncpu = ds.parameters['ncpu']
+    ndim = ds.parameters['ndim']
+
+    xmin, ymin, zmin = X.min(axis=0)
+    xmax, ymax, zmax = X.max(axis=0)
+
+    dmax = max(xmax-xmin, ymax-ymin, zmax-zmin)
+    ilevel = 0
+    deltax = dmax * 2
+
+    while deltax >= dmax:
+        ilevel += 1
+        deltax = 0.5**ilevel
+
+    lmin = ilevel
+    bit_length = lmin - 1
+    maxdom = 2**bit_length
+
+    imin, imax, jmin, jmax, kmin, kmax = 0, 0, 0, 0, 0, 0
+    if bit_length > 0:
+        imin = int(xmin * maxdom)
+        imax = imin + 1
+        jmin = int(ymin * maxdom)
+        jmax = jmin + 1
+        kmin = int(zmin * maxdom)
+        kmax = kmin + 1
+
+
+    dkey = (2**(levelmax+1) / maxdom)**ndim
+    ndom = 1
+    if (bit_length > 0): ndom = 8
+
+    idom, jdom, kdom = [np.zeros(8, dtype=int) for _ in range(3)]
+
+    idom[0], idom[1] = imin, imax
+    idom[2], idom[3] = imin, imax
+    idom[4], idom[5] = imin, imax
+    idom[6], idom[7] = imin, imax
+
+    jdom[0], jdom[1] = jmin, jmin
+    jdom[2], jdom[3] = jmax, jmax
+    jdom[4], jdom[5] = jmin, jmin
+    jdom[6], jdom[7] = jmax, jmax
+
+    kdom[0], kdom[1] = kmin, kmin
+    kdom[2], kdom[3] = kmin, kmin
+    kdom[4], kdom[5] = kmax, kmax
+    kdom[6], kdom[7] = kmax, kmax
+
+    bounding_min, bounding_max = np.zeros(ndom), np.zeros(ndom)
+    for i in range(ndom):
+        if bit_length > 0:
+            order_min = hilbert3d([idom[i], jdom[i], kdom[i]], bit_length)
+        else:
+            order_min = 0
+        bounding_min[i] = (order_min  )*dkey
+        bounding_max[i] = (order_min+1)*dkey
+
+    bound_key = {}
+    for icpu in range(1, ncpu+1):
+        bound_key[icpu-1], bound_key[icpu] = ds.hilbert_indices[icpu]
+
+    cpu_min, cpu_max = [np.zeros(ncpu + 1, dtype=np.int) for _ in range(2)]
+    for icpu in range(1, ncpu+1):
+        for i in range(ndom):
+            if (bound_key[icpu - 1] <= bounding_min[i] and
+                bound_key[icpu    ] >  bounding_min[i]):
+                cpu_min[i] = icpu-1
+            if (bound_key[icpu - 1] <  bounding_max[i] and
+                bound_key[icpu    ] >= bounding_max[i]):
+                cpu_max[i] = icpu
+
+    ncpu_read = 0
+    cpu_list = []
+    cpu_read = np.zeros(ncpu, dtype=np.bool)
+    for i in range(ndom):
+        for j in range(cpu_min[i], cpu_max[i]):
+            if not cpu_read[j]:
+                ncpu_read += 1
+                cpu_list.append(j)
+                cpu_read[j] = True
+
+    return sorted(cpu_list)

diff -r befe05adfea96a3285199c09543b6c22295d946f -r 1677894c7bf1ba0d682890220e6b01c0507ebc10 yt/frontends/ramses/tests/test_hilbert.py
--- /dev/null
+++ b/yt/frontends/ramses/tests/test_hilbert.py
@@ -0,0 +1,48 @@
+from yt.frontends.ramses.hilbert import get_cpu_list, hilbert3d
+from yt.testing import \
+    assert_equal, \
+    requires_file
+import numpy as np
+import yt
+
+def test_hilbert3d():
+    inputs = [[0, 0, 0],
+              [1, 0, 0],
+              [0, 1, 0],
+              [1, 1, 0],
+              [0, 0, 1],
+              [1, 0, 1],
+              [0, 1, 1],
+              [1, 1, 1]]
+    outputs = [0, 1, 7, 6, 3, 2, 4, 5]
+
+    for i, o in zip(inputs, outputs):
+        assert_equal(int(hilbert3d(i, 3)), o)
+
+
+output_00080 = "output_00080/info_00080.txt"
+ at requires_file(output_00080)
+def test_get_cpu_list():
+    ds = yt.load(output_00080)
+
+    np.random.seed(16091992)
+    # These are randomly generated outputs, checked against RAMSES' own implementation
+    inputs = (
+        [[ 0.27747276,  0.30018937,  0.17916189], [ 0.42656026,  0.40509483,  0.29927838]],
+        [[ 0.90660856,  0.44201328,  0.22770587], [ 1.09175462,  0.58017918,  0.2836648 ]],
+        [[ 0.98542323,  0.58543376,  0.45858327], [ 1.04441105,  0.62079207,  0.58919283]],
+        [[ 0.42274841,  0.44887745,  0.87793679], [ 0.52066634,  0.58936331,  1.00666222]],
+        [[ 0.69964803,  0.65893669,  0.03660775], [ 0.80565696,  0.67409752,  0.11434604]])
+    outputs = (
+        [0, 15],
+        [0, 15],
+        [0, 1, 15],
+        [0, 13, 14, 15],
+        [0]
+    )
+
+    for i, o in zip(inputs, outputs):
+        bbox = i
+        ls = get_cpu_list(ds, bbox)
+        assert(len(ls) > 0)
+        assert(all(np.array(o) == np.array(ls)))


https://bitbucket.org/yt_analysis/yt/commits/89d46f8f7670/
Changeset:   89d46f8f7670
User:        Corentin Cadiou
Date:        2017-11-27 21:41:28+00:00
Summary:     correct stripping
Affected #:  1 file

diff -r 1677894c7bf1ba0d682890220e6b01c0507ebc10 -r 89d46f8f7670d08a795300090b25a6d9e1ef39e0 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -723,9 +723,9 @@
         rheader = {}
         f = open(self.parameter_filename)
         def read_rhs(cast):
-            line = f.readline()
+            line = f.readline().replace('\n', '')
             p, v = line.split("=")
-            rheader[p.strip()] = cast(v)
+            rheader[p.strip()] = cast(v.strip())
         for i in range(6): read_rhs(int)
         f.readline()
         for i in range(11): read_rhs(float)


https://bitbucket.org/yt_analysis/yt/commits/3042a6f4c0e1/
Changeset:   3042a6f4c0e1
User:        Corentin Cadiou
Date:        2017-11-27 21:58:58+00:00
Summary:     support reading a subpart of the box
Affected #:  1 file

diff -r 89d46f8f7670d08a795300090b25a6d9e1ef39e0 -r 3042a6f4c0e194d67d862f1e4069020a2cfe9c1b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -39,6 +39,7 @@
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
+from .hilbert import get_cpu_list
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
@@ -455,8 +456,13 @@
         super(RAMSESIndex, self).__init__(ds, dataset_type)
 
     def _initialize_oct_handler(self):
+        if self.ds._bbox:
+            cpu_list = get_cpu_list(self.dataset, self.dataset._bbox)
+        else:
+            cpu_list = range(self.dataset['ncpu'])
+
         self.domains = [RAMSESDomainFile(self.dataset, i + 1)
-                        for i in range(self.dataset['ncpu'])]
+                        for i in cpu_list]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
         self.max_level = max(dom.max_level for dom in self.domains)
@@ -645,7 +651,8 @@
     def __init__(self, filename, dataset_type='ramses',
                  fields=None, storage_filename=None,
                  units_override=None, unit_system="cgs",
-                 extra_particle_fields=None, cosmological=None):
+                 extra_particle_fields=None, cosmological=None,
+                 bbox=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
@@ -661,6 +668,7 @@
         self._extra_particle_fields = extra_particle_fields
         self._warn_extra_fields = False
         self.force_cosmological = cosmological
+        self._bbox = bbox
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
 


https://bitbucket.org/yt_analysis/yt/commits/f59729ed9498/
Changeset:   f59729ed9498
User:        Corentin Cadiou
Date:        2017-11-27 22:17:30+00:00
Summary:     don't use buffered IO for AMR
Affected #:  1 file

diff -r 3042a6f4c0e194d67d862f1e4069020a2cfe9c1b -r f59729ed94980548cda4b9cd6dab45531c636f45 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -20,7 +20,6 @@
 import numpy as np
 import stat
 import weakref
-from io import BytesIO
 
 from yt.extern.six import string_types
 from yt.funcs import \
@@ -320,9 +319,7 @@
         self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
         fb = open(self.amr_fn, "rb")
         fb.seek(self.amr_offset)
-        f = BytesIO()
-        f.write(fb.read())
-        f.seek(0)
+        f = fb
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
             self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
         def _ng(c, l):


https://bitbucket.org/yt_analysis/yt/commits/70e128ce66dc/
Changeset:   70e128ce66dc
User:        Corentin Cadiou
Date:        2017-11-29 16:44:19+00:00
Summary:     add copyright + reference to RAMSES
Affected #:  1 file

diff -r f59729ed94980548cda4b9cd6dab45531c636f45 -r 70e128ce66dc632aa5929035f07deec529e04252 yt/frontends/ramses/hilbert.py
--- a/yt/frontends/ramses/hilbert.py
+++ b/yt/frontends/ramses/hilbert.py
@@ -1,3 +1,21 @@
+"""
+RAMSES-specific hilbert ordering routines.
+
+These functions were translated from their original files from the
+RAMSES project. See https://bitbucket.org/rteyssie/ramses.
+
+
+"""
+from __future__ import print_function, absolute_import
+
+#-----------------------------------------------------------------------------
+# Copyright (c) yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 import numpy as np
 
 def hilbert3d(X, bit_length):


https://bitbucket.org/yt_analysis/yt/commits/bac565f3c2cf/
Changeset:   bac565f3c2cf
User:        Corentin Cadiou
Date:        2017-11-29 17:48:48+00:00
Summary:     check that we use bbox only when using hilbert ordering
Affected #:  1 file

diff -r 70e128ce66dc632aa5929035f07deec529e04252 -r bac565f3c2cf26a1d4079e6ee91fd437cb0842cd yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -749,6 +749,9 @@
             for n in range(rheader['ncpu']):
                 dom, mi, ma = f.readline().split()
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+
+        if rheader['ordering type'] != 'hilbert' and self.bbox:
+            raise NotImplementedError('The ordering %s is not compatible with the `bbox` argument.' % rheader['ordering type'])
         self.parameters.update(rheader)
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \


https://bitbucket.org/yt_analysis/yt/commits/7f3a51abd8b8/
Changeset:   7f3a51abd8b8
User:        Corentin Cadiou
Date:        2017-11-29 18:04:06+00:00
Summary:     refactor doc
Affected #:  1 file

diff -r bac565f3c2cf26a1d4079e6ee91fd437cb0842cd -r 7f3a51abd8b89cbb9800bee7bf2640f21458795e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2017,19 +2017,50 @@
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
 
-It is possible to force yt to treat the simulation as a cosmological
-simulation by providing the ``cosmological=True`` parameter (or
-``False`` to force non-cosmology). If left to ``None``, the kind of
-the simulation is inferred from the data.
-
-yt also support outputs that include sinks (RAMSES' name for black
-holes) when the folder contains files like ``sink_XXXXX.outYYYYY``.
 
 Note: for backward compatibility, particles from the
 ``particle_XXXXX.outYYYYY`` files have the particle type ``io`` by
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
+Arguments passed to the load function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+It is possible to provide extra arguments to the load function when loading RAMSES datasets. Here is a list of the ones specific to RAMSES:
+
+``fields``
+      A list of fields to read from the hydro files. For a hydro simulation with an extra custom field
+
+      .. code-block:: python
+
+          import yt
+          fields = ["Density",
+                    "x-velocity", "y-velocity", "z-velocity",
+                    "Pressure", "my-awesome-field"]
+	  ds = yt.load('output_00123/info_00123.txt', fields=fields)
+	  'my-awesome-field' in ds.field  # is True
+
+
+``extra_particle_fields``
+      A list of tuples describing extra particles fields to read in. By
+      default, yt will try to detect as many fields as possible,
+      assuming the extra ones to be double precision floats. This
+      argument is useful if you have extra fields that yt cannot
+      detect. For example, for a dataset containing two integer fields
+      in the particles, one would do
+
+      .. code-block:: python
+
+          import yt
+          extra_fields = [('family', 'I'), ('info', 'I')]
+          ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
+          # ('all', 'family') and ('all', 'info') now in ds.field_list
+
+      The format of the passed argument is as follow: ``[('field_name_1', 'type_1'), …, ('field_name_n', 'type_n')]`` where the ``type_n`` should follow the `python convention <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+
+``cosmological``
+      Force yt to consider a simulation to be cosmological or
+      not. This may be useful for some specific simulations e.g. that
+      run down to negative redshifts.
 .. _loading-sph-data:
 
 SPH Particle Data
@@ -2113,6 +2144,3 @@
 
     import yt
     ds = yt.load(filename, cosmology_parameters={})
-
-
-


https://bitbucket.org/yt_analysis/yt/commits/459861f67e0c/
Changeset:   459861f67e0c
User:        Corentin Cadiou
Date:        2017-11-29 18:04:12+00:00
Summary:     add doc for bbox
Affected #:  1 file

diff -r 7f3a51abd8b89cbb9800bee7bf2640f21458795e -r 459861f67e0c95fe44f097ec0db8e05f501ae1e1 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2061,6 +2061,44 @@
       Force yt to consider a simulation to be cosmological or
       not. This may be useful for some specific simulations e.g. that
       run down to negative redshifts.
+
+``bbox``
+      The subbox to load. Yt will only read CPUs intersecting with the
+      subbox. This is especially useful for large simulations or
+      zoom-in simulations, where you don't want to have access to data
+      outside of a small region of interest. This argument will prevent
+      yt from loading AMR files outside the subbox and will hence
+      spare memory and time.
+      For example, one could used
+
+      .. code-block:: python
+
+          import yt
+	  # Only load a small cube of size (0.1)**3
+	  bbox = [[0., 0., 0.], [0.1, 0.1, 0.1]]
+	  ds = yt.load('output_00001/info_00001.txt', bbox=bbox)
+
+	  # See the note below for the following examples
+	  ds.right_edge == [1, 1, 1]             # is True
+
+	  ad = ds.all_data()
+	  ad['particle_position_x'].max() > 0.1  # _may_ be True
+
+	  bb = ds.box(left_edge=bbox[0], right_edge=bbox[1])
+	  bb['particle_position_x'].max() < 0.1  # is True
+      .. note::
+	 When using the bbox argument, yt will read all the CPUs
+         intersecting with the subbox. However it may also read some
+         data *outside* the selected region. This is due to the fact
+         that domains have a complicated shape when using Hilbert
+         ordering. Internally, yt will hence assume the loaded dataset
+         covers the entire simulation. If you only want the data from
+         the selected region, you may want to use ``ds.box(…)``.
+
+      .. note::
+	 This feature is only available when using Hilbert ordering.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data


https://bitbucket.org/yt_analysis/yt/commits/5ddd702956d6/
Changeset:   5ddd702956d6
User:        Corentin Cadiou
Date:        2017-11-29 18:08:18+00:00
Summary:     minor edits to the doc
Affected #:  1 file

diff -r 459861f67e0c95fe44f097ec0db8e05f501ae1e1 -r 5ddd702956d6375fce70c2ce2c2306a850392c60 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -318,7 +318,7 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt supports AMReX/BoxLib particle data stored in the standard format used 
+* yt supports AMReX/BoxLib particle data stored in the standard format used
   by Nyx and WarpX, and optionally Castro. It currently does not support the ASCII particle
   data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
@@ -331,7 +331,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Most AMReX/BoxLib codes output cell-centered data. If the underlying discretization
-is not cell-centered, then fields are typically averaged to cell centers before 
+is not cell-centered, then fields are typically averaged to cell centers before
 they are written to plot files for visualization. WarpX, however, has the option
 to output the raw (i.e., not averaged to cell centers) data as well.  If you
 run your WarpX simulation with ``warpx.plot_raw_fields = 1`` in your inputs
@@ -348,10 +348,10 @@
 The raw fields in WarpX are nodal in at least one direction. We define a field
 to be "nodal" in a given direction if the field data is defined at the "low"
 and "high" sides of the cell in that direction, rather than at the cell center.
-Instead of returning one field value per cell selected, nodal fields return a 
+Instead of returning one field value per cell selected, nodal fields return a
 number of values, depending on their centering. This centering is marked by
 a `nodal_flag` that describes whether the fields is nodal in each dimension.
-``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while 
+``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while
 ``nodal_flag = [0, 0, 1]`` means that the field is nodal in the z direction
 and cell centered in the others, i.e. it is defined on the z faces of each cell.
 ``nodal_flag = [1, 1, 0]`` would mean that the field is centered in the z direction,
@@ -371,7 +371,7 @@
 
 Here, the field ``('raw', 'Ex')`` is nodal in two directions, so four values per cell
 are returned, corresponding to the four edges in each cell on which the variable
-is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned 
+is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned
 per cell. The standard, averaged-to-cell-centers fields are still available.
 
 Currently, slices and data selection are implemented for nodal fields. Projections,
@@ -684,7 +684,7 @@
 * fits.gz
 * fts.gz
 
-yt can currently read two kinds of FITS files: FITS image files and FITS 
+yt can currently read two kinds of FITS files: FITS image files and FITS
 binary table files containing positions, times, and energies of X-ray events.
 
 Though a FITS image is composed of a single array in the FITS file,
@@ -760,14 +760,14 @@
 If your data is of the first case, yt will determine the length units based
 on the information in the header. If your data is of the second or third
 cases, no length units will be assigned, but the world coordinate information
-about the axes will be stored in separate fields. If your data is of the 
-fourth type, the coordinates of the first three axes will be determined 
+about the axes will be stored in separate fields. If your data is of the
+fourth type, the coordinates of the first three axes will be determined
 according to cases 1-3.
 
 .. note::
 
-  Linear length-based coordinates (Case 1 above) are only supported if all 
-  dimensions have the same value for ``CUNITx``. WCS coordinates are only 
+  Linear length-based coordinates (Case 1 above) are only supported if all
+  dimensions have the same value for ``CUNITx``. WCS coordinates are only
   supported for Cases 2-4.
 
 FITS Data Decomposition
@@ -791,8 +791,8 @@
              512	  981940800
 
 For 3D spectral-cube data, the decomposition into grids will be done along the
-spectral axis since this will speed up many common operations for this 
-particular type of dataset. 
+spectral axis since this will speed up many common operations for this
+particular type of dataset.
 
 yt will generate its own domain decomposition, but the number of grids can be
 set manually by passing the ``nprocs`` parameter to the ``load`` call:
@@ -830,10 +830,10 @@
 will be generated from the pixel coordinates in the file using the WCS
 transformations provided by AstroPy.
 
-X-ray event data will be loaded as particle fields in yt, but a grid will be 
-constructed from the WCS information in the FITS header. There is a helper 
-function, ``setup_counts_fields``, which may be used to make deposited image 
-fields from the event data for different energy bands (for an example see 
+X-ray event data will be loaded as particle fields in yt, but a grid will be
+constructed from the WCS information in the FITS header. There is a helper
+function, ``setup_counts_fields``, which may be used to make deposited image
+fields from the event data for different energy bands (for an example see
 :ref:`xray_fits`).
 
 .. note::
@@ -848,7 +848,7 @@
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
-The following are additional options that may be passed to the ``load`` command 
+The following are additional options that may be passed to the ``load`` command
 when analyzing FITS data:
 
 ``nan_mask``
@@ -888,9 +888,9 @@
 Miscellaneous Tools for Use with FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-A number of tools have been prepared for use with FITS data that enhance yt's 
-visualization and analysis capabilities for this particular type of data. These 
-are included in the ``yt.frontends.fits.misc`` module, and can be imported like 
+A number of tools have been prepared for use with FITS data that enhance yt's
+visualization and analysis capabilities for this particular type of data. These
+are included in the ``yt.frontends.fits.misc`` module, and can be imported like
 so:
 
 .. code-block:: python
@@ -900,7 +900,7 @@
 ``setup_counts_fields``
 """""""""""""""""""""""
 
-This function can be used to create image fields from X-ray counts data in 
+This function can be used to create image fields from X-ray counts data in
 different energy bands:
 
 .. code-block:: python
@@ -914,9 +914,9 @@
 ``ds9_region``
 """"""""""""""
 
-This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and 
-creates a "cut region" data container from it, that can be used to select 
-the cells in the FITS dataset that fall within the region. To use this 
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and
+creates a "cut region" data container from it, that can be used to select
+the cells in the FITS dataset that fall within the region. To use this
 functionality, the `pyregion <https://github.com/astropy/pyregion/>`_
 package must be installed.
 
@@ -930,8 +930,8 @@
 ``PlotWindowWCS``
 """""""""""""""""
 
-This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS 
-data and adds celestial coordinates to the plot axes. To use it, a 
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS
+data and adds celestial coordinates to the plot axes. To use it, a
 version of AstroPy >= 1.3 must be installed.
 
 .. code-block:: python
@@ -940,7 +940,7 @@
   wcs_slc.show() # for the IPython notebook
   wcs_slc.save()
 
-``WCSAxes`` is still in an experimental state, but as its functionality 
+``WCSAxes`` is still in an experimental state, but as its functionality
 improves it will be utilized more here.
 
 ``create_spectral_slabs``
@@ -948,14 +948,14 @@
 
 .. note::
 
-  The following functionality requires the 
-  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be 
+  The following functionality requires the
+  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be
   installed.
 
 If you have a spectral intensity dataset of some sort, and would like to
-extract emission in particular slabs along the spectral axis of a certain 
-width, ``create_spectral_slabs`` can be used to generate a dataset with 
-these slabs as different fields. In this example, we use it to extract 
+extract emission in particular slabs along the spectral axis of a certain
+width, ``create_spectral_slabs`` can be used to generate a dataset with
+these slabs as different fields. In this example, we use it to extract
 individual lines from an intensity cube:
 
 .. code-block:: python
@@ -968,12 +968,12 @@
                                     slab_centers, slab_width,
                                     nan_mask=0.0)
 
-All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when 
-creating the dataset (see :ref:`additional_fits_options` above). In the 
-returned dataset, the different slabs will be different fields, with the field 
-names taken from the keys in ``slab_centers``. The WCS coordinates on the 
-spectral axis are reset so that the center of the domain along this axis is 
-zero, and the left and right edges of the domain along this axis are 
+All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when
+creating the dataset (see :ref:`additional_fits_options` above). In the
+returned dataset, the different slabs will be different fields, with the field
+names taken from the keys in ``slab_centers``. The WCS coordinates on the
+spectral axis are reset so that the center of the domain along this axis is
+zero, and the left and right edges of the domain along this axis are
 :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data
@@ -991,10 +991,10 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of
 a plot file, checkpoint file, or particle file. Particle files require special handling
-depending on the situation, the main issue being that they typically lack grid information. 
-The first case is when you have a plotfile and a particle file that you would like to 
+depending on the situation, the main issue being that they typically lack grid information.
+The first case is when you have a plotfile and a particle file that you would like to
 load together. In the simplest case, this occurs automatically. For instance, if you
 were in a directory with the following files:
 
@@ -1003,8 +1003,8 @@
    radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
    radio_halo_1kpc_hdf5_part_0100 # particle file
 
-where the plotfile and the particle file were created at the same time (therefore having 
-particle data consistent with the grid structure of the former). Notice also that the 
+where the plotfile and the particle file were created at the same time (therefore having
+particle data consistent with the grid structure of the former). Notice also that the
 prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
 the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
 This also works when loading a number of files in a time series.
@@ -1018,10 +1018,10 @@
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
-However, if you don't have a corresponding plotfile for a particle file, but would still 
-like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+However, if you don't have a corresponding plotfile for a particle file, but would still
+like to load the particle data, you can still call ``yt.load`` on the file. However, the
 grid information will not be available, and the particle data will be loaded in a fashion
-similar to SPH data. 
+similar to SPH data.
 
 .. rubric:: Caveats
 
@@ -1349,7 +1349,7 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays to each 
+Particle fields are supported by adding 1-dimensional arrays to each
 ``grid``'s dict:
 
 .. code-block:: python
@@ -1394,7 +1394,7 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are added as one-dimensional arrays in a similar manner as the 
+Particle fields are added as one-dimensional arrays in a similar manner as the
 three-dimensional grid fields:
 
 .. code-block:: python
@@ -1562,7 +1562,7 @@
 
    # only plot the second
    sl = yt.SlicePlot(ds, 'z', ('connect2', 'test'))
-   
+
    # plot both
    sl = yt.SlicePlot(ds, 'z', ('all', 'test'))
 
@@ -1631,10 +1631,10 @@
 Gizmo Data
 ----------
 
-Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
-manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
-and then uses smoothing kernels to deposit those fields to an underlying 
-grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields
+and then uses smoothing kernels to deposit those fields to an underlying
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.
 To load Gizmo datasets using the standard HDF5 output format::
 
    import yt
@@ -1642,11 +1642,11 @@
 
 Because the Gizmo output format is similar to the Gadget format, yt
 may load Gizmo datasets as Gadget depending on the circumstances, but this
-should not pose a problem in most situations.  FIRE outputs will be loaded 
-accordingly due to the number of metallicity fields found (11 or 17).  
+should not pose a problem in most situations.  FIRE outputs will be loaded
+accordingly due to the number of metallicity fields found (11 or 17).
 
 For Gizmo outputs written as raw binary outputs, you may have to specify
-a bounding box, field specification, and units as are done for standard 
+a bounding box, field specification, and units as are done for standard
 Gadget outputs.  See :ref:`loading-gadget-data` for more information.
 
 .. _halo-catalog-data:
@@ -2001,27 +2001,19 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify
-a list of hydro fields by supplying the ``fields`` keyword in your
-call to ``load``. It is also possible to provide a list of *extra*
-particle fields by supplying the ``extra_particle_fields``:
-
-.. code-block:: python
-
-   import yt
-   extra_fields = [('family', 'I'), ('info', 'I')]
-   ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
-   # ('all', 'family') and ('all', 'info') now in ds.field_list
+yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
 
-
-Note: for backward compatibility, particles from the
-``particle_XXXXX.outYYYYY`` files have the particle type ``io`` by
-default (including dark matter, stars, tracer particles, …). Sink
-particles have the particle type ``sink``.
+.. note::
+   for backward compatibility, particles from the
+   ``part_XXXXX.outYYYYY`` files have the particle type ``io`` by
+   default (including dark matter, stars, tracer particles, …). Sink
+   particles have the particle type ``sink``.
+
+.. _loading-ramses-data-args:
 
 Arguments passed to the load function
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^


https://bitbucket.org/yt_analysis/yt/commits/eb57d82d0f37/
Changeset:   eb57d82d0f37
User:        Corentin Cadiou
Date:        2017-11-29 18:10:49+00:00
Summary:     comment in test
Affected #:  1 file

diff -r 5ddd702956d6375fce70c2ce2c2306a850392c60 -r eb57d82d0f3776d4bb43ca573ed30b947653010f yt/frontends/ramses/tests/test_hilbert.py
--- a/yt/frontends/ramses/tests/test_hilbert.py
+++ b/yt/frontends/ramses/tests/test_hilbert.py
@@ -6,6 +6,7 @@
 import yt
 
 def test_hilbert3d():
+    # 8 different cases, checked against RAMSES' own implementation
     inputs = [[0, 0, 0],
               [1, 0, 0],
               [0, 1, 0],


https://bitbucket.org/yt_analysis/yt/commits/f1b82b81e572/
Changeset:   f1b82b81e572
User:        Corentin Cadiou
Date:        2017-11-30 08:06:53+00:00
Summary:     Minor fixes
Affected #:  3 files

diff -r eb57d82d0f3776d4bb43ca573ed30b947653010f -r f1b82b81e5722b292353f390e84138f885c4d4d9 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2029,7 +2029,7 @@
                     "x-velocity", "y-velocity", "z-velocity",
                     "Pressure", "my-awesome-field"]
 	  ds = yt.load('output_00123/info_00123.txt', fields=fields)
-	  'my-awesome-field' in ds.field  # is True
+	  'my-awesome-field' in ds.field_list  # is True
 
 
 ``extra_particle_fields``
@@ -2047,7 +2047,7 @@
           ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
           # ('all', 'family') and ('all', 'info') now in ds.field_list
 
-      The format of the passed argument is as follow: ``[('field_name_1', 'type_1'), …, ('field_name_n', 'type_n')]`` where the ``type_n`` should follow the `python convention <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+      The format of the passed argument is as follow: ``[('field_name_1', 'type_1'), …, ('field_name_n', 'type_n')]`` where the ``type_n`` is as follow `python convention <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
 
 ``cosmological``
       Force yt to consider a simulation to be cosmological or
@@ -2061,7 +2061,7 @@
       outside of a small region of interest. This argument will prevent
       yt from loading AMR files outside the subbox and will hence
       spare memory and time.
-      For example, one could used
+      For example, one could use
 
       .. code-block:: python
 

diff -r eb57d82d0f3776d4bb43ca573ed30b947653010f -r f1b82b81e5722b292353f390e84138f885c4d4d9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -317,9 +317,8 @@
                 self.ds.domain_left_edge, self.ds.domain_right_edge)
         root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
         self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
-        fb = open(self.amr_fn, "rb")
-        fb.seek(self.amr_offset)
-        f = fb
+        f = open(self.amr_fn, "rb")
+        f.seek(self.amr_offset)
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
             self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
         def _ng(c, l):
@@ -751,7 +750,9 @@
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
 
         if rheader['ordering type'] != 'hilbert' and self.bbox:
-            raise NotImplementedError('The ordering %s is not compatible with the `bbox` argument.' % rheader['ordering type'])
+            raise NotImplementedError(
+                'The ordering %s is not compatible with the `bbox` argument.'
+                % rheader['ordering type'])
         self.parameters.update(rheader)
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \

diff -r eb57d82d0f3776d4bb43ca573ed30b947653010f -r f1b82b81e5722b292353f390e84138f885c4d4d9 yt/frontends/ramses/hilbert.py
--- a/yt/frontends/ramses/hilbert.py
+++ b/yt/frontends/ramses/hilbert.py
@@ -2,7 +2,8 @@
 RAMSES-specific hilbert ordering routines.
 
 These functions were translated from their original files from the
-RAMSES project. See https://bitbucket.org/rteyssie/ramses.
+RAMSES project with the agreement of the original developer. See
+https://bitbucket.org/rteyssie/ramses.
 
 
 """


https://bitbucket.org/yt_analysis/yt/commits/68c36875d9b4/
Changeset:   68c36875d9b4
User:        Corentin Cadiou
Date:        2017-12-04 17:56:46+00:00
Summary:     Merge branch 'master' into add-bbox-feature2
Affected #:  10 files

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2003,6 +2003,13 @@
 
 yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
+yt also support the new way particles are handled introduced after
+version ``stable_17_091`` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file ``part_file_descriptor.txt``
+containing the different fields in the particle files will be read. If
+you use a custom version of RAMSES, make sure this file is up-to-date
+and reflects the true layout of the particles.
+
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
@@ -2091,6 +2098,19 @@
 	 This feature is only available when using Hilbert ordering.
 
 
+Adding custom particle fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to add support for particle fields. For this, one
+should tweak
+:func:`~yt.frontends.ramses.io._read_part_file_descriptor` to include
+the field as well as its data type to the assoc list, following the
+convention from
+`python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+For example, to add support for a longint field named
+``my_custom_field``, one would add ``('my_custom_field', 'l')`` to ``assoc``.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -15,16 +15,16 @@
 #-----------------------------------------------------------------------------
 
 import copy
-from collections import defaultdict
 
 from contextlib import contextmanager
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import mylog
 
-# One to many mapping
-filter_registry = defaultdict(list)
+# One to one mapping
+filter_registry = {}
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -131,7 +131,9 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    filter_registry[name].append(filter)
+    if filter_registry.get(name, None) is not None:
+        mylog.warning('The %s particle filter already exists. Overriding.' % name)
+    filter_registry[name] = filter
 
 
 def particle_filter(name=None, requires=None, filtered_type='all'):

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,11 +663,12 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            for f in filter_registry[filter]:
-                used = self._setup_filtered_type(f)
-                if used:
-                    filter = f
-                    break
+            f = filter_registry.get(filter, None)
+            if f is None:
+                return False
+            used = self._setup_filtered_type(f)
+            if used:
+                filter = f
         else:
             used = self._setup_filtered_type(filter)
         if not used:

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -33,6 +33,50 @@
     ad['deposit', 'stars_cic']
     assert True
 
+
+def test_add_particle_filter_overriding():
+    """Test the add_particle_filter overriding"""
+    from yt.data_objects.particle_filters import filter_registry
+    from yt.funcs import mylog
+
+    def star_0(pfilter, data):
+        pass
+
+    def star_1(pfilter, data):
+        pass
+
+    # Use a closure to store whether the warning was called
+    def closure(status):
+        def warning_patch(*args, **kwargs):
+            status[0] = True
+
+        def was_called():
+            return status[0]
+
+        return warning_patch, was_called
+
+    ## Test 1: we add a dummy particle filter
+    add_particle_filter("dummy", function=star_0, filtered_type='all',
+                        requires=["creation_time"])
+    assert 'dummy' in filter_registry
+    assert_equal(filter_registry['dummy'].function, star_0)
+
+    ## Test 2: we add another dummy particle filter.
+    ##         a warning is expected. We use the above closure to
+    ##         check that.
+    # Store the original warning function
+    warning = mylog.warning
+    monkey_warning, monkey_patch_was_called = closure([False])
+    mylog.warning = monkey_warning
+    add_particle_filter("dummy", function=star_1, filtered_type='all',
+                        requires=["creation_time"])
+    assert_equal(filter_registry['dummy'].function, star_1)
+    assert_equal(monkey_patch_was_called(), True)
+
+    # Restore the original warning function
+    mylog.warning = warning
+
+
 @requires_file(iso_galaxy)
 def test_particle_filter():
     """Test the particle_filter decorator"""
@@ -61,7 +105,7 @@
 
     for grid in ds.index.grids[20:31]:
         cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
-        
+
         assert_equal(cg['stars', 'particle_ones'].shape[0],
                      grid['stars', 'particle_ones'].shape[0])
         assert_equal(cg['stars', 'particle_mass'].shape[0],

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -33,8 +33,10 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt.data_objects.particle_filters import add_particle_filter
 
-from .definitions import ramses_header, field_aliases
+from .definitions import ramses_header, field_aliases, particle_families
+from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
@@ -58,12 +60,14 @@
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
+        basedir = os.path.abspath(
+            os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
+            basedir, num, domain_id)
+        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
         for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
             setattr(self, "%s_fn" % t, basename % t)
+        self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
         self._read_particle_header()
@@ -91,6 +95,13 @@
         return os.path.exists(self.sink_fn)
 
     @property
+    def _has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self._part_file_descriptor)
+
+    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -214,6 +225,7 @@
             self.local_particle_count = 0
             self.particle_field_offsets = {}
             return
+
         f = open(self.part_fn, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -233,7 +245,12 @@
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
 
-        particle_fields = [
+        # Try reading particle file descriptor
+        if self._has_part_descriptor:
+            particle_fields = (
+                _read_part_file_descriptor(self._part_file_descriptor))
+        else:
+            particle_fields = [
                 ("particle_position_x", "d"),
                 ("particle_position_y", "d"),
                 ("particle_position_z", "d"),
@@ -244,13 +261,14 @@
                 ("particle_identifier", "i"),
                 ("particle_refinement_level", "I")]
 
-        if self.ds._extra_particle_fields is not None:
-            particle_fields += self.ds._extra_particle_fields
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
+
+        ptype = 'io'
 
         field_offsets = {}
         _pfields = {}
 
-        ptype = 'io'
 
         # Read offsets
         for field, vtype in particle_fields:
@@ -671,6 +689,25 @@
         self.storage_filename = storage_filename
 
 
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        super(RAMSESDataset, self).create_field_info(*args, **kwa)
+        # Register particle filters
+        if ('io', 'particle_family') in self.field_list:
+            for fname, value in particle_families.items():
+                def loc(val):
+                    def closure(pfilter, data):
+                        filter = data[(pfilter.filtered_type, "particle_family")] == val
+                        return filter
+
+                    return closure
+                add_particle_filter(fname, loc(value),
+                                    filtered_type='io', requires=['particle_family'])
+
+            for k in particle_families.keys():
+                mylog.info('Adding particle_type: %s' % k)
+                self.add_particle_filter('%s' % k)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -61,3 +61,14 @@
                           'Metallicity'),
 
 }
+
+particle_families = {
+    'DM': 1,
+    'star': 2,
+    'cloud': 3,
+    'dust': 4,
+    'star_tracer': -2,
+    'cloud_tracer': -3,
+    'dust_tracer': -4,
+    'gas_tracer': 0
+}

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -64,6 +64,7 @@
 _X = 0.76 # H fraction, hardcoded
 _Y = 0.24 # He fraction, hardcoded
 
+
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),
@@ -88,7 +89,10 @@
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
         ("particle_age", ("code_time", ['age'], None)),
+        ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
+        ("particle_family", ("", [], None)),
+        ("particle_tag", ("", [], None))
     )
 
     known_sink_fields = (

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,10 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
+    YTFileNotParseable
 from yt.extern.six import PY3
+import re
 
 if PY3:
     from io import BytesIO as IO
@@ -156,9 +159,66 @@
 
             else:
                 # Raise here an exception
-                raise Exception('Unknown particle type %s' % ptype)
+                raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
                 fname, foffsets, data_types, subset, subs_fields))
 
         return tr
+
+def _read_part_file_descriptor(fname):
+    """
+    Read the particle file descriptor and returns the array of the fields found.
+    """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
+    # Mapping
+    mapping = [
+        ('position_x', 'particle_position_x'),
+        ('position_y', 'particle_position_y'),
+        ('position_z', 'particle_position_z'),
+        ('velocity_x', 'particle_velocity_x'),
+        ('velocity_y', 'particle_velocity_y'),
+        ('velocity_z', 'particle_velocity_z'),
+        ('mass', 'particle_mass'),
+        ('identity', 'particle_identity'),
+        ('levelp', 'particle_level'),
+        ('family', 'particle_family'),
+        ('tag', 'particle_tag')
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
+
+    with open(fname, 'r') as f:
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        mylog.info('Reading part file descriptor.')
+        if not tmp:
+            raise YTParticleOutputFormatNotImplemented()
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise YTFileNotParseable(fname, i+1)
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+                dtype = tmp.group(3)
+
+                if varname in mapping:
+                    varname = mapping[varname]
+                else:
+                    varname = 'particle_%s' % varname
+
+                fields.append((varname, dtype))
+        else:
+            raise YTParticleOutputFormatNotImplemented()
+
+    return fields

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -204,3 +204,33 @@
 
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
+
+
+ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
+ at requires_file(ramses_new_format)
+def test_new_format():
+    expected_particle_fields = [
+        ('star', 'particle_identity'),
+        ('star', 'particle_level'),
+        ('star', 'particle_mass'),
+        ('star', 'particle_metallicity'),
+        ('star', 'particle_position_x'),
+        ('star', 'particle_position_y'),
+        ('star', 'particle_position_z'),
+        ('star', 'particle_tag'),
+        ('star', 'particle_velocity_x'),
+        ('star', 'particle_velocity_y'),
+        ('star', 'particle_velocity_z')]
+
+    ds = yt.load(ramses_new_format)
+    ad = ds.all_data()
+
+    # Check all the expected fields exist and can be accessed
+    for f in expected_particle_fields:
+        assert(f in ds.derived_field_list)
+        ad[f]
+
+    # Check there is only stars with tag 0 (it should be right)
+    assert(all(ad['star', 'particle_family'] == 2))
+    assert(all(ad['star', 'particle_tag'] == 0))
+    assert(len(ad['star', 'particle_tag']) == 600)

diff -r f1b82b81e5722b292353f390e84138f885c4d4d9 -r 68c36875d9b4abc5c741d0e416976470a32b051c yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,6 +424,19 @@
         v += r"'%s'."
         return v % (self.obj_name, self.ds)
 
+class YTParticleOutputFormatNotImplemented(YTException):
+    def __str__(self):
+        return "The particle output format is not supported."
+
+class YTFileNotParseable(YTException):
+    def __init__(self, fname, line):
+        self.fname = fname
+        self.line = line
+
+    def __str__(self):
+        v = r"Error while parsing file %s at line %s"
+        return v % (self.fname, self.line)
+
 class YTRockstarMultiMassNotSupported(YTException):
     def __init__(self, mi, ma, ptype):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt/commits/2135d7e7b122/
Changeset:   2135d7e7b122
User:        Corentin Cadiou
Date:        2017-12-04 17:57:51+00:00
Summary:     Merge branch 'master' into add-bbox-feature2
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt/commits/691d304f219c/
Changeset:   691d304f219c
User:        ngoldbaum
Date:        2017-12-05 15:11:12+00:00
Summary:     Merge pull request #1637 from cphyc/add-bbox-feature2

RAMSES bbox feature
Affected #:  4 files

diff -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb -r 691d304f219cc3b5db48ba9b93c7eb02ecd6c604 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -2001,18 +2001,7 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-
-yt will attempt to guess the fields in the file. You may also specify
-a list of hydro fields by supplying the ``fields`` keyword in your
-call to ``load``. It is also possible to provide a list of *extra*
-particle fields by supplying the ``extra_particle_fields``:
-
-.. code-block:: python
-
-   import yt
-   extra_fields = [('family', 'I'), ('info', 'I')]
-   ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
-   # ('all', 'family') and ('all', 'info') now in ds.field_list
+yt will attempt to guess the fields in the file. For more control over the hydro fields or the particle fields, see :ref:`loading-ramses-data-args`.
 
 yt also support the new way particles are handled introduced after
 version ``stable_17_091`` (the version introduced after the 2017 Ramses
@@ -2025,18 +2014,89 @@
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
 
-It is possible to force yt to treat the simulation as a cosmological
-simulation by providing the ``cosmological=True`` parameter (or
-``False`` to force non-cosmology). If left to ``None``, the kind of
-the simulation is inferred from the data.
-
-yt also support outputs that include sinks (RAMSES' name for black
-holes) when the folder contains files like ``sink_XXXXX.outYYYYY``.
-
-Note: for backward compatibility, particles from the
-``particle_XXXXX.outYYYYY`` files have the particle type ``io`` by
-default (including dark matter, stars, tracer particles, …). Sink
-particles have the particle type ``sink``.
+.. note::
+   for backward compatibility, particles from the
+   ``part_XXXXX.outYYYYY`` files have the particle type ``io`` by
+   default (including dark matter, stars, tracer particles, …). Sink
+   particles have the particle type ``sink``.
+
+.. _loading-ramses-data-args:
+
+Arguments passed to the load function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+It is possible to provide extra arguments to the load function when loading RAMSES datasets. Here is a list of the ones specific to RAMSES:
+
+``fields``
+      A list of fields to read from the hydro files. For a hydro simulation with an extra custom field
+
+      .. code-block:: python
+
+          import yt
+          fields = ["Density",
+                    "x-velocity", "y-velocity", "z-velocity",
+                    "Pressure", "my-awesome-field"]
+	  ds = yt.load('output_00123/info_00123.txt', fields=fields)
+	  'my-awesome-field' in ds.field_list  # is True
+
+
+``extra_particle_fields``
+      A list of tuples describing extra particles fields to read in. By
+      default, yt will try to detect as many fields as possible,
+      assuming the extra ones to be double precision floats. This
+      argument is useful if you have extra fields that yt cannot
+      detect. For example, for a dataset containing two integer fields
+      in the particles, one would do
+
+      .. code-block:: python
+
+          import yt
+          extra_fields = [('family', 'I'), ('info', 'I')]
+          ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
+          # ('all', 'family') and ('all', 'info') now in ds.field_list
+
+      The format of the passed argument is as follow: ``[('field_name_1', 'type_1'), …, ('field_name_n', 'type_n')]`` where the ``type_n`` is as follow `python convention <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+
+``cosmological``
+      Force yt to consider a simulation to be cosmological or
+      not. This may be useful for some specific simulations e.g. that
+      run down to negative redshifts.
+
+``bbox``
+      The subbox to load. Yt will only read CPUs intersecting with the
+      subbox. This is especially useful for large simulations or
+      zoom-in simulations, where you don't want to have access to data
+      outside of a small region of interest. This argument will prevent
+      yt from loading AMR files outside the subbox and will hence
+      spare memory and time.
+      For example, one could use
+
+      .. code-block:: python
+
+          import yt
+	  # Only load a small cube of size (0.1)**3
+	  bbox = [[0., 0., 0.], [0.1, 0.1, 0.1]]
+	  ds = yt.load('output_00001/info_00001.txt', bbox=bbox)
+
+	  # See the note below for the following examples
+	  ds.right_edge == [1, 1, 1]             # is True
+
+	  ad = ds.all_data()
+	  ad['particle_position_x'].max() > 0.1  # _may_ be True
+
+	  bb = ds.box(left_edge=bbox[0], right_edge=bbox[1])
+	  bb['particle_position_x'].max() < 0.1  # is True
+      .. note::
+	 When using the bbox argument, yt will read all the CPUs
+         intersecting with the subbox. However it may also read some
+         data *outside* the selected region. This is due to the fact
+         that domains have a complicated shape when using Hilbert
+         ordering. Internally, yt will hence assume the loaded dataset
+         covers the entire simulation. If you only want the data from
+         the selected region, you may want to use ``ds.box(…)``.
+
+      .. note::
+	 This feature is only available when using Hilbert ordering.
+
 
 Adding custom particle fields
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb -r 691d304f219cc3b5db48ba9b93c7eb02ecd6c604 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -20,7 +20,6 @@
 import numpy as np
 import stat
 import weakref
-from io import BytesIO
 
 from yt.extern.six import string_types
 from yt.funcs import \
@@ -41,6 +40,7 @@
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
+from .hilbert import get_cpu_list
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
@@ -335,11 +335,8 @@
                 self.ds.domain_left_edge, self.ds.domain_right_edge)
         root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
         self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
-        fb = open(self.amr_fn, "rb")
-        fb.seek(self.amr_offset)
-        f = BytesIO()
-        f.write(fb.read())
-        f.seek(0)
+        f = open(self.amr_fn, "rb")
+        f.seek(self.amr_offset)
         mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
             self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
         def _ng(c, l):
@@ -473,8 +470,13 @@
         super(RAMSESIndex, self).__init__(ds, dataset_type)
 
     def _initialize_oct_handler(self):
+        if self.ds._bbox:
+            cpu_list = get_cpu_list(self.dataset, self.dataset._bbox)
+        else:
+            cpu_list = range(self.dataset['ncpu'])
+
         self.domains = [RAMSESDomainFile(self.dataset, i + 1)
-                        for i in range(self.dataset['ncpu'])]
+                        for i in cpu_list]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
         self.max_level = max(dom.max_level for dom in self.domains)
@@ -663,7 +665,8 @@
     def __init__(self, filename, dataset_type='ramses',
                  fields=None, storage_filename=None,
                  units_override=None, unit_system="cgs",
-                 extra_particle_fields=None, cosmological=None):
+                 extra_particle_fields=None, cosmological=None,
+                 bbox=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
@@ -679,6 +682,7 @@
         self._extra_particle_fields = extra_particle_fields
         self._warn_extra_fields = False
         self.force_cosmological = cosmological
+        self._bbox = bbox
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
 
@@ -760,9 +764,9 @@
         rheader = {}
         f = open(self.parameter_filename)
         def read_rhs(cast):
-            line = f.readline()
+            line = f.readline().replace('\n', '')
             p, v = line.split("=")
-            rheader[p.strip()] = cast(v)
+            rheader[p.strip()] = cast(v.strip())
         for i in range(6): read_rhs(int)
         f.readline()
         for i in range(11): read_rhs(float)
@@ -781,6 +785,11 @@
             for n in range(rheader['ncpu']):
                 dom, mi, ma = f.readline().split()
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
+
+        if rheader['ordering type'] != 'hilbert' and self.bbox:
+            raise NotImplementedError(
+                'The ordering %s is not compatible with the `bbox` argument.'
+                % rheader['ordering type'])
         self.parameters.update(rheader)
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \

diff -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb -r 691d304f219cc3b5db48ba9b93c7eb02ecd6c604 yt/frontends/ramses/hilbert.py
--- /dev/null
+++ b/yt/frontends/ramses/hilbert.py
@@ -0,0 +1,199 @@
+"""
+RAMSES-specific hilbert ordering routines.
+
+These functions were translated from their original files from the
+RAMSES project with the agreement of the original developer. See
+https://bitbucket.org/rteyssie/ramses.
+
+
+"""
+from __future__ import print_function, absolute_import
+
+#-----------------------------------------------------------------------------
+# Copyright (c) yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+def hilbert3d(X, bit_length):
+    '''Compute the order using Hilbert indexing.
+
+    Arguments
+    ---------
+    * X: (N, ndim) float array
+      The positions
+    * bit_length: integer
+      The bit_length for the indexing.
+    '''
+    X = np.atleast_2d(X)
+    state_diagram = np.array([
+        1, 2, 3, 2, 4, 5, 3, 5,
+        0, 1, 3, 2, 7, 6, 4, 5,
+        2, 6, 0, 7, 8, 8, 0, 7,
+        0, 7, 1, 6, 3, 4, 2, 5,
+        0, 9,10, 9, 1, 1,11,11,
+        0, 3, 7, 4, 1, 2, 6, 5,
+        6, 0, 6,11, 9, 0, 9, 8,
+        2, 3, 1, 0, 5, 4, 6, 7,
+        11,11, 0, 7, 5, 9, 0, 7,
+        4, 3, 5, 2, 7, 0, 6, 1,
+        4, 4, 8, 8, 0, 6,10, 6,
+        6, 5, 1, 2, 7, 4, 0, 3,
+        5, 7, 5, 3, 1, 1,11,11,
+        4, 7, 3, 0, 5, 6, 2, 1,
+        6, 1, 6,10, 9, 4, 9,10,
+        6, 7, 5, 4, 1, 0, 2, 3,
+        10, 3, 1, 1,10, 3, 5, 9,
+        2, 5, 3, 4, 1, 6, 0, 7,
+        4, 4, 8, 8, 2, 7, 2, 3,
+        2, 1, 5, 6, 3, 0, 4, 7,
+        7, 2,11, 2, 7, 5, 8, 5,
+        4, 5, 7, 6, 3, 2, 0, 1,
+        10, 3, 2, 6,10, 3, 4, 4,
+        6, 1, 7, 0, 5, 2, 4, 3]).reshape(12, 2, 8).T
+
+    x_bit_mask, y_bit_mask, z_bit_mask = [np.zeros(bit_length, dtype=bool) for _ in range(3)]
+    i_bit_mask = np.zeros(3*bit_length, dtype=bool)
+
+    npoint = X.shape[0]
+    order = np.zeros(npoint)
+
+    # Convert positions to binary
+    for ip in range(npoint):
+        for i in range(bit_length):
+            mask = 0b01 << i
+            x_bit_mask[i] = X[ip, 0] & mask
+            y_bit_mask[i] = X[ip, 1] & mask
+            z_bit_mask[i] = X[ip, 2] & mask
+
+        for i in range(bit_length):
+            # Interleave bits
+            i_bit_mask[3*i+2] = x_bit_mask[i]
+            i_bit_mask[3*i+1] = y_bit_mask[i]
+            i_bit_mask[3*i  ] = z_bit_mask[i]
+
+        # Build Hilbert ordering using state diagram
+        cstate = 0
+        for i in range(bit_length-1, -1, -1):
+            sdigit = (4 * i_bit_mask[3*i+2] +
+                      2 * i_bit_mask[3*i+1] +
+                      1 * i_bit_mask[3*i  ])
+            nstate = state_diagram[sdigit, 0, cstate]
+            hdigit = state_diagram[sdigit, 1, cstate]
+
+            i_bit_mask[3*i+2] = hdigit & 0b100
+            i_bit_mask[3*i+1] = hdigit & 0b010
+            i_bit_mask[3*i  ] = hdigit & 0b001
+
+            cstate = nstate
+
+        # Compute ordering
+        for i in range(3*bit_length):
+            order[ip] = order[ip] + i_bit_mask[i]*2**i
+
+    return order
+
+def get_cpu_list(ds, X):
+    '''
+    Return the list of the CPU intersecting with the positions
+    given. Note that it will be 0-indexed.
+
+    Parameters
+    ----------
+    * ds: Dataset
+      The dataset containing the information
+    * X: (N, ndim) float array
+      An array containing positions. They should be between 0 and 1.
+    '''
+    X = np.atleast_2d(X)
+    if X.shape[1] != 3:
+        raise NotImplementedError('This function is only implemented in 3D.')
+
+    levelmax = ds.parameters['levelmax']
+    ncpu = ds.parameters['ncpu']
+    ndim = ds.parameters['ndim']
+
+    xmin, ymin, zmin = X.min(axis=0)
+    xmax, ymax, zmax = X.max(axis=0)
+
+    dmax = max(xmax-xmin, ymax-ymin, zmax-zmin)
+    ilevel = 0
+    deltax = dmax * 2
+
+    while deltax >= dmax:
+        ilevel += 1
+        deltax = 0.5**ilevel
+
+    lmin = ilevel
+    bit_length = lmin - 1
+    maxdom = 2**bit_length
+
+    imin, imax, jmin, jmax, kmin, kmax = 0, 0, 0, 0, 0, 0
+    if bit_length > 0:
+        imin = int(xmin * maxdom)
+        imax = imin + 1
+        jmin = int(ymin * maxdom)
+        jmax = jmin + 1
+        kmin = int(zmin * maxdom)
+        kmax = kmin + 1
+
+
+    dkey = (2**(levelmax+1) / maxdom)**ndim
+    ndom = 1
+    if (bit_length > 0): ndom = 8
+
+    idom, jdom, kdom = [np.zeros(8, dtype=int) for _ in range(3)]
+
+    idom[0], idom[1] = imin, imax
+    idom[2], idom[3] = imin, imax
+    idom[4], idom[5] = imin, imax
+    idom[6], idom[7] = imin, imax
+
+    jdom[0], jdom[1] = jmin, jmin
+    jdom[2], jdom[3] = jmax, jmax
+    jdom[4], jdom[5] = jmin, jmin
+    jdom[6], jdom[7] = jmax, jmax
+
+    kdom[0], kdom[1] = kmin, kmin
+    kdom[2], kdom[3] = kmin, kmin
+    kdom[4], kdom[5] = kmax, kmax
+    kdom[6], kdom[7] = kmax, kmax
+
+    bounding_min, bounding_max = np.zeros(ndom), np.zeros(ndom)
+    for i in range(ndom):
+        if bit_length > 0:
+            order_min = hilbert3d([idom[i], jdom[i], kdom[i]], bit_length)
+        else:
+            order_min = 0
+        bounding_min[i] = (order_min  )*dkey
+        bounding_max[i] = (order_min+1)*dkey
+
+    bound_key = {}
+    for icpu in range(1, ncpu+1):
+        bound_key[icpu-1], bound_key[icpu] = ds.hilbert_indices[icpu]
+
+    cpu_min, cpu_max = [np.zeros(ncpu + 1, dtype=np.int) for _ in range(2)]
+    for icpu in range(1, ncpu+1):
+        for i in range(ndom):
+            if (bound_key[icpu - 1] <= bounding_min[i] and
+                bound_key[icpu    ] >  bounding_min[i]):
+                cpu_min[i] = icpu-1
+            if (bound_key[icpu - 1] <  bounding_max[i] and
+                bound_key[icpu    ] >= bounding_max[i]):
+                cpu_max[i] = icpu
+
+    ncpu_read = 0
+    cpu_list = []
+    cpu_read = np.zeros(ncpu, dtype=np.bool)
+    for i in range(ndom):
+        for j in range(cpu_min[i], cpu_max[i]):
+            if not cpu_read[j]:
+                ncpu_read += 1
+                cpu_list.append(j)
+                cpu_read[j] = True
+
+    return sorted(cpu_list)

diff -r c2298bef11d31e9ea4e28c6a0b4eaf9807ccc2fb -r 691d304f219cc3b5db48ba9b93c7eb02ecd6c604 yt/frontends/ramses/tests/test_hilbert.py
--- /dev/null
+++ b/yt/frontends/ramses/tests/test_hilbert.py
@@ -0,0 +1,49 @@
+from yt.frontends.ramses.hilbert import get_cpu_list, hilbert3d
+from yt.testing import \
+    assert_equal, \
+    requires_file
+import numpy as np
+import yt
+
+def test_hilbert3d():
+    # 8 different cases, checked against RAMSES' own implementation
+    inputs = [[0, 0, 0],
+              [1, 0, 0],
+              [0, 1, 0],
+              [1, 1, 0],
+              [0, 0, 1],
+              [1, 0, 1],
+              [0, 1, 1],
+              [1, 1, 1]]
+    outputs = [0, 1, 7, 6, 3, 2, 4, 5]
+
+    for i, o in zip(inputs, outputs):
+        assert_equal(int(hilbert3d(i, 3)), o)
+
+
+output_00080 = "output_00080/info_00080.txt"
+ at requires_file(output_00080)
+def test_get_cpu_list():
+    ds = yt.load(output_00080)
+
+    np.random.seed(16091992)
+    # These are randomly generated outputs, checked against RAMSES' own implementation
+    inputs = (
+        [[ 0.27747276,  0.30018937,  0.17916189], [ 0.42656026,  0.40509483,  0.29927838]],
+        [[ 0.90660856,  0.44201328,  0.22770587], [ 1.09175462,  0.58017918,  0.2836648 ]],
+        [[ 0.98542323,  0.58543376,  0.45858327], [ 1.04441105,  0.62079207,  0.58919283]],
+        [[ 0.42274841,  0.44887745,  0.87793679], [ 0.52066634,  0.58936331,  1.00666222]],
+        [[ 0.69964803,  0.65893669,  0.03660775], [ 0.80565696,  0.67409752,  0.11434604]])
+    outputs = (
+        [0, 15],
+        [0, 15],
+        [0, 1, 15],
+        [0, 13, 14, 15],
+        [0]
+    )
+
+    for i, o in zip(inputs, outputs):
+        bbox = i
+        ls = get_cpu_list(ds, bbox)
+        assert(len(ls) > 0)
+        assert(all(np.array(o) == np.array(ls)))


https://bitbucket.org/yt_analysis/yt/commits/b2cf9b80769c/
Changeset:   b2cf9b80769c
User:        Hugo Pfister
Date:        2017-11-30 15:08:35+00:00
Summary:     solve bug when deposit velocity/age fields
Affected #:  1 file

diff -r befe05adfea96a3285199c09543b6c22295d946f -r b2cf9b80769ca8145940e9286015aff334442d36 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -173,7 +173,7 @@
     for ax in 'xyz':
         for method, name in zip(("cic", "sum"), ("cic", "nn")):
             function = _get_density_weighted_deposit_field(
-                "particle_velocity_%s" % ax, "cm/s", method)
+                "particle_velocity_%s" % ax, "code_velocity", method)
             registry.add_field(
                 ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)), sampling_type="cell",
                 function=function, units=unit_system["velocity"], take_log=False,
@@ -181,7 +181,7 @@
 
     for method, name in zip(("cic", "sum"), ("cic", "nn")):
         function = _get_density_weighted_deposit_field(
-            "age", "s", method)
+            "age", "code_time", method)
         registry.add_field(
             ("deposit", ("%s_"+name+"_age") % (ptype)), sampling_type="cell",
             function=function, units=unit_system["time"], take_log=False,


https://bitbucket.org/yt_analysis/yt/commits/2bcebfab1118/
Changeset:   2bcebfab1118
User:        Hugo Pfister
Date:        2017-12-05 10:30:49+00:00
Summary:     Merge branch 'master' of https://github.com/yt-project/yt
Affected #:  10 files

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -318,7 +318,7 @@
 * yt does not read the Maestro base state (although you can have Maestro
   map it to a full Cartesian state variable before writing the plotfile
   to get around this).  E-mail the dev list if you need this support.
-* yt supports AMReX/BoxLib particle data stored in the standard format used 
+* yt supports AMReX/BoxLib particle data stored in the standard format used
   by Nyx and WarpX, and optionally Castro. It currently does not support the ASCII particle
   data used by Maestro and Castro.
 * For Maestro, yt aliases either "tfromp" or "tfromh to" ``temperature``
@@ -331,7 +331,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Most AMReX/BoxLib codes output cell-centered data. If the underlying discretization
-is not cell-centered, then fields are typically averaged to cell centers before 
+is not cell-centered, then fields are typically averaged to cell centers before
 they are written to plot files for visualization. WarpX, however, has the option
 to output the raw (i.e., not averaged to cell centers) data as well.  If you
 run your WarpX simulation with ``warpx.plot_raw_fields = 1`` in your inputs
@@ -348,10 +348,10 @@
 The raw fields in WarpX are nodal in at least one direction. We define a field
 to be "nodal" in a given direction if the field data is defined at the "low"
 and "high" sides of the cell in that direction, rather than at the cell center.
-Instead of returning one field value per cell selected, nodal fields return a 
+Instead of returning one field value per cell selected, nodal fields return a
 number of values, depending on their centering. This centering is marked by
 a `nodal_flag` that describes whether the fields is nodal in each dimension.
-``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while 
+``nodal_flag = [0, 0, 0]`` means that the field is cell-centered, while
 ``nodal_flag = [0, 0, 1]`` means that the field is nodal in the z direction
 and cell centered in the others, i.e. it is defined on the z faces of each cell.
 ``nodal_flag = [1, 1, 0]`` would mean that the field is centered in the z direction,
@@ -371,7 +371,7 @@
 
 Here, the field ``('raw', 'Ex')`` is nodal in two directions, so four values per cell
 are returned, corresponding to the four edges in each cell on which the variable
-is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned 
+is defined. ``('raw', 'Bx')`` is nodal in one direction, so two values are returned
 per cell. The standard, averaged-to-cell-centers fields are still available.
 
 Currently, slices and data selection are implemented for nodal fields. Projections,
@@ -684,7 +684,7 @@
 * fits.gz
 * fts.gz
 
-yt can currently read two kinds of FITS files: FITS image files and FITS 
+yt can currently read two kinds of FITS files: FITS image files and FITS
 binary table files containing positions, times, and energies of X-ray events.
 
 Though a FITS image is composed of a single array in the FITS file,
@@ -760,14 +760,14 @@
 If your data is of the first case, yt will determine the length units based
 on the information in the header. If your data is of the second or third
 cases, no length units will be assigned, but the world coordinate information
-about the axes will be stored in separate fields. If your data is of the 
-fourth type, the coordinates of the first three axes will be determined 
+about the axes will be stored in separate fields. If your data is of the
+fourth type, the coordinates of the first three axes will be determined
 according to cases 1-3.
 
 .. note::
 
-  Linear length-based coordinates (Case 1 above) are only supported if all 
-  dimensions have the same value for ``CUNITx``. WCS coordinates are only 
+  Linear length-based coordinates (Case 1 above) are only supported if all
+  dimensions have the same value for ``CUNITx``. WCS coordinates are only
   supported for Cases 2-4.
 
 FITS Data Decomposition
@@ -791,8 +791,8 @@
              512	  981940800
 
 For 3D spectral-cube data, the decomposition into grids will be done along the
-spectral axis since this will speed up many common operations for this 
-particular type of dataset. 
+spectral axis since this will speed up many common operations for this
+particular type of dataset.
 
 yt will generate its own domain decomposition, but the number of grids can be
 set manually by passing the ``nprocs`` parameter to the ``load`` call:
@@ -830,10 +830,10 @@
 will be generated from the pixel coordinates in the file using the WCS
 transformations provided by AstroPy.
 
-X-ray event data will be loaded as particle fields in yt, but a grid will be 
-constructed from the WCS information in the FITS header. There is a helper 
-function, ``setup_counts_fields``, which may be used to make deposited image 
-fields from the event data for different energy bands (for an example see 
+X-ray event data will be loaded as particle fields in yt, but a grid will be
+constructed from the WCS information in the FITS header. There is a helper
+function, ``setup_counts_fields``, which may be used to make deposited image
+fields from the event data for different energy bands (for an example see
 :ref:`xray_fits`).
 
 .. note::
@@ -848,7 +848,7 @@
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
-The following are additional options that may be passed to the ``load`` command 
+The following are additional options that may be passed to the ``load`` command
 when analyzing FITS data:
 
 ``nan_mask``
@@ -888,9 +888,9 @@
 Miscellaneous Tools for Use with FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-A number of tools have been prepared for use with FITS data that enhance yt's 
-visualization and analysis capabilities for this particular type of data. These 
-are included in the ``yt.frontends.fits.misc`` module, and can be imported like 
+A number of tools have been prepared for use with FITS data that enhance yt's
+visualization and analysis capabilities for this particular type of data. These
+are included in the ``yt.frontends.fits.misc`` module, and can be imported like
 so:
 
 .. code-block:: python
@@ -900,7 +900,7 @@
 ``setup_counts_fields``
 """""""""""""""""""""""
 
-This function can be used to create image fields from X-ray counts data in 
+This function can be used to create image fields from X-ray counts data in
 different energy bands:
 
 .. code-block:: python
@@ -914,9 +914,9 @@
 ``ds9_region``
 """"""""""""""
 
-This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and 
-creates a "cut region" data container from it, that can be used to select 
-the cells in the FITS dataset that fall within the region. To use this 
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and
+creates a "cut region" data container from it, that can be used to select
+the cells in the FITS dataset that fall within the region. To use this
 functionality, the `pyregion <https://github.com/astropy/pyregion/>`_
 package must be installed.
 
@@ -930,8 +930,8 @@
 ``PlotWindowWCS``
 """""""""""""""""
 
-This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS 
-data and adds celestial coordinates to the plot axes. To use it, a 
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS
+data and adds celestial coordinates to the plot axes. To use it, a
 version of AstroPy >= 1.3 must be installed.
 
 .. code-block:: python
@@ -940,7 +940,7 @@
   wcs_slc.show() # for the IPython notebook
   wcs_slc.save()
 
-``WCSAxes`` is still in an experimental state, but as its functionality 
+``WCSAxes`` is still in an experimental state, but as its functionality
 improves it will be utilized more here.
 
 ``create_spectral_slabs``
@@ -948,14 +948,14 @@
 
 .. note::
 
-  The following functionality requires the 
-  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be 
+  The following functionality requires the
+  `spectral-cube <http://spectral-cube.readthedocs.org>`_ library to be
   installed.
 
 If you have a spectral intensity dataset of some sort, and would like to
-extract emission in particular slabs along the spectral axis of a certain 
-width, ``create_spectral_slabs`` can be used to generate a dataset with 
-these slabs as different fields. In this example, we use it to extract 
+extract emission in particular slabs along the spectral axis of a certain
+width, ``create_spectral_slabs`` can be used to generate a dataset with
+these slabs as different fields. In this example, we use it to extract
 individual lines from an intensity cube:
 
 .. code-block:: python
@@ -968,12 +968,12 @@
                                     slab_centers, slab_width,
                                     nan_mask=0.0)
 
-All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when 
-creating the dataset (see :ref:`additional_fits_options` above). In the 
-returned dataset, the different slabs will be different fields, with the field 
-names taken from the keys in ``slab_centers``. The WCS coordinates on the 
-spectral axis are reset so that the center of the domain along this axis is 
-zero, and the left and right edges of the domain along this axis are 
+All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when
+creating the dataset (see :ref:`additional_fits_options` above). In the
+returned dataset, the different slabs will be different fields, with the field
+names taken from the keys in ``slab_centers``. The WCS coordinates on the
+spectral axis are reset so that the center of the domain along this axis is
+zero, and the left and right edges of the domain along this axis are
 :math:`\pm` ``0.5*slab_width``.
 
 Examples of Using FITS Data
@@ -991,10 +991,10 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of
 a plot file, checkpoint file, or particle file. Particle files require special handling
-depending on the situation, the main issue being that they typically lack grid information. 
-The first case is when you have a plotfile and a particle file that you would like to 
+depending on the situation, the main issue being that they typically lack grid information.
+The first case is when you have a plotfile and a particle file that you would like to
 load together. In the simplest case, this occurs automatically. For instance, if you
 were in a directory with the following files:
 
@@ -1003,8 +1003,8 @@
    radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
    radio_halo_1kpc_hdf5_part_0100 # particle file
 
-where the plotfile and the particle file were created at the same time (therefore having 
-particle data consistent with the grid structure of the former). Notice also that the 
+where the plotfile and the particle file were created at the same time (therefore having
+particle data consistent with the grid structure of the former). Notice also that the
 prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
 the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
 This also works when loading a number of files in a time series.
@@ -1018,10 +1018,10 @@
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
-However, if you don't have a corresponding plotfile for a particle file, but would still 
-like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+However, if you don't have a corresponding plotfile for a particle file, but would still
+like to load the particle data, you can still call ``yt.load`` on the file. However, the
 grid information will not be available, and the particle data will be loaded in a fashion
-similar to SPH data. 
+similar to SPH data.
 
 .. rubric:: Caveats
 
@@ -1349,7 +1349,7 @@
    yt only supports a block structure where the grid edges on the ``n``-th
    refinement level are aligned with the cell edges on the ``n-1``-th level.
 
-Particle fields are supported by adding 1-dimensional arrays to each 
+Particle fields are supported by adding 1-dimensional arrays to each
 ``grid``'s dict:
 
 .. code-block:: python
@@ -1394,7 +1394,7 @@
 simultaneously divide the domain into 12 chunks, so that you can take advantage
 of the underlying parallelism.
 
-Particle fields are added as one-dimensional arrays in a similar manner as the 
+Particle fields are added as one-dimensional arrays in a similar manner as the
 three-dimensional grid fields:
 
 .. code-block:: python
@@ -1562,7 +1562,7 @@
 
    # only plot the second
    sl = yt.SlicePlot(ds, 'z', ('connect2', 'test'))
-   
+
    # plot both
    sl = yt.SlicePlot(ds, 'z', ('all', 'test'))
 
@@ -1631,10 +1631,10 @@
 Gizmo Data
 ----------
 
-Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
-manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
-and then uses smoothing kernels to deposit those fields to an underlying 
-grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields
+and then uses smoothing kernels to deposit those fields to an underlying
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.
 To load Gizmo datasets using the standard HDF5 output format::
 
    import yt
@@ -1642,11 +1642,11 @@
 
 Because the Gizmo output format is similar to the Gadget format, yt
 may load Gizmo datasets as Gadget depending on the circumstances, but this
-should not pose a problem in most situations.  FIRE outputs will be loaded 
-accordingly due to the number of metallicity fields found (11 or 17).  
+should not pose a problem in most situations.  FIRE outputs will be loaded
+accordingly due to the number of metallicity fields found (11 or 17).
 
 For Gizmo outputs written as raw binary outputs, you may have to specify
-a bounding box, field specification, and units as are done for standard 
+a bounding box, field specification, and units as are done for standard
 Gadget outputs.  See :ref:`loading-gadget-data` for more information.
 
 .. _halo-catalog-data:
@@ -2001,7 +2001,8 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify
+
+yt will attempt to guess the fields in the file. You may also specify
 a list of hydro fields by supplying the ``fields`` keyword in your
 call to ``load``. It is also possible to provide a list of *extra*
 particle fields by supplying the ``extra_particle_fields``:
@@ -2013,6 +2014,13 @@
    ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
    # ('all', 'family') and ('all', 'info') now in ds.field_list
 
+yt also support the new way particles are handled introduced after
+version ``stable_17_091`` (the version introduced after the 2017 Ramses
+User Meeting). In this case, the file ``part_file_descriptor.txt``
+containing the different fields in the particle files will be read. If
+you use a custom version of RAMSES, make sure this file is up-to-date
+and reflects the true layout of the particles.
+
 yt supports outputs made by the mainline ``RAMSES`` code as well as the
 ``RAMSES-RT`` fork. Files produces by ``RAMSES-RT`` are recognized as such
 based on the presence of a ``info_rt_*.txt`` file in the output directory.
@@ -2030,6 +2038,19 @@
 default (including dark matter, stars, tracer particles, …). Sink
 particles have the particle type ``sink``.
 
+Adding custom particle fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to add support for particle fields. For this, one
+should tweak
+:func:`~yt.frontends.ramses.io._read_part_file_descriptor` to include
+the field as well as its data type to the assoc list, following the
+convention from
+`python struct module <https://docs.python.org/3.5/library/struct.html#format-characters>`_.
+For example, to add support for a longint field named
+``my_custom_field``, one would add ``('my_custom_field', 'l')`` to ``assoc``.
+
+
 .. _loading-sph-data:
 
 SPH Particle Data
@@ -2113,6 +2134,3 @@
 
     import yt
     ds = yt.load(filename, cosmology_parameters={})
-
-
-

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -15,16 +15,16 @@
 #-----------------------------------------------------------------------------
 
 import copy
-from collections import defaultdict
 
 from contextlib import contextmanager
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
 from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import mylog
 
-# One to many mapping
-filter_registry = defaultdict(list)
+# One to one mapping
+filter_registry = {}
 
 class DummyFieldInfo(object):
     particle_type = True
@@ -131,7 +131,9 @@
     if requires is None:
         requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
-    filter_registry[name].append(filter)
+    if filter_registry.get(name, None) is not None:
+        mylog.warning('The %s particle filter already exists. Overriding.' % name)
+    filter_registry[name] = filter
 
 
 def particle_filter(name=None, requires=None, filtered_type='all'):

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -663,11 +663,12 @@
         self.known_filters[n] = None
         if isinstance(filter, string_types):
             used = False
-            for f in filter_registry[filter]:
-                used = self._setup_filtered_type(f)
-                if used:
-                    filter = f
-                    break
+            f = filter_registry.get(filter, None)
+            if f is None:
+                return False
+            used = self._setup_filtered_type(f)
+            if used:
+                filter = f
         else:
             used = self._setup_filtered_type(filter)
         if not used:

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/data_objects/tests/test_particle_filter.py
--- a/yt/data_objects/tests/test_particle_filter.py
+++ b/yt/data_objects/tests/test_particle_filter.py
@@ -33,6 +33,50 @@
     ad['deposit', 'stars_cic']
     assert True
 
+
+def test_add_particle_filter_overriding():
+    """Test the add_particle_filter overriding"""
+    from yt.data_objects.particle_filters import filter_registry
+    from yt.funcs import mylog
+
+    def star_0(pfilter, data):
+        pass
+
+    def star_1(pfilter, data):
+        pass
+
+    # Use a closure to store whether the warning was called
+    def closure(status):
+        def warning_patch(*args, **kwargs):
+            status[0] = True
+
+        def was_called():
+            return status[0]
+
+        return warning_patch, was_called
+
+    ## Test 1: we add a dummy particle filter
+    add_particle_filter("dummy", function=star_0, filtered_type='all',
+                        requires=["creation_time"])
+    assert 'dummy' in filter_registry
+    assert_equal(filter_registry['dummy'].function, star_0)
+
+    ## Test 2: we add another dummy particle filter.
+    ##         a warning is expected. We use the above closure to
+    ##         check that.
+    # Store the original warning function
+    warning = mylog.warning
+    monkey_warning, monkey_patch_was_called = closure([False])
+    mylog.warning = monkey_warning
+    add_particle_filter("dummy", function=star_1, filtered_type='all',
+                        requires=["creation_time"])
+    assert_equal(filter_registry['dummy'].function, star_1)
+    assert_equal(monkey_patch_was_called(), True)
+
+    # Restore the original warning function
+    mylog.warning = warning
+
+
 @requires_file(iso_galaxy)
 def test_particle_filter():
     """Test the particle_filter decorator"""
@@ -61,7 +105,7 @@
 
     for grid in ds.index.grids[20:31]:
         cg = ds.covering_grid(grid.Level, grid.LeftEdge, grid.ActiveDimensions)
-        
+
         assert_equal(cg['stars', 'particle_ones'].shape[0],
                      grid['stars', 'particle_ones'].shape[0])
         assert_equal(cg['stars', 'particle_mass'].shape[0],

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -34,8 +34,10 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt.data_objects.particle_filters import add_particle_filter
 
-from .definitions import ramses_header, field_aliases
+from .definitions import ramses_header, field_aliases, particle_families
+from .io import _read_part_file_descriptor
 from yt.utilities.physical_constants import mp, kb
 from .fields import \
     RAMSESFieldInfo, _X
@@ -58,12 +60,14 @@
 
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
+        basedir = os.path.abspath(
+            os.path.dirname(ds.parameter_filename))
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.abspath(
-              os.path.dirname(ds.parameter_filename)),
-            num, domain_id)
+            basedir, num, domain_id)
+        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
         for t in ['grav', 'hydro', 'part', 'amr', 'sink']:
             setattr(self, "%s_fn" % t, basename % t)
+        self._part_file_descriptor = part_file_descriptor
         self._read_amr_header()
         self._read_hydro_header()
         self._read_particle_header()
@@ -91,6 +95,13 @@
         return os.path.exists(self.sink_fn)
 
     @property
+    def _has_part_descriptor(self):
+        '''
+        Does the output include particle file descriptor?
+        '''
+        return os.path.exists(self._part_file_descriptor)
+
+    @property
     def level_count(self):
         if self._level_count is not None: return self._level_count
         self.hydro_offset
@@ -214,6 +225,7 @@
             self.local_particle_count = 0
             self.particle_field_offsets = {}
             return
+
         f = open(self.part_fn, "rb")
         f.seek(0, os.SEEK_END)
         flen = f.tell()
@@ -233,7 +245,12 @@
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
 
-        particle_fields = [
+        # Try reading particle file descriptor
+        if self._has_part_descriptor:
+            particle_fields = (
+                _read_part_file_descriptor(self._part_file_descriptor))
+        else:
+            particle_fields = [
                 ("particle_position_x", "d"),
                 ("particle_position_y", "d"),
                 ("particle_position_z", "d"),
@@ -244,13 +261,14 @@
                 ("particle_identifier", "i"),
                 ("particle_refinement_level", "I")]
 
-        if self.ds._extra_particle_fields is not None:
-            particle_fields += self.ds._extra_particle_fields
+            if self.ds._extra_particle_fields is not None:
+                particle_fields += self.ds._extra_particle_fields
+
+        ptype = 'io'
 
         field_offsets = {}
         _pfields = {}
 
-        ptype = 'io'
 
         # Read offsets
         for field, vtype in particle_fields:
@@ -667,6 +685,25 @@
         self.storage_filename = storage_filename
 
 
+    def create_field_info(self, *args, **kwa):
+        """Extend create_field_info to add the particles types."""
+        super(RAMSESDataset, self).create_field_info(*args, **kwa)
+        # Register particle filters
+        if ('io', 'particle_family') in self.field_list:
+            for fname, value in particle_families.items():
+                def loc(val):
+                    def closure(pfilter, data):
+                        filter = data[(pfilter.filtered_type, "particle_family")] == val
+                        return filter
+
+                    return closure
+                add_particle_filter(fname, loc(value),
+                                    filtered_type='io', requires=['particle_family'])
+
+            for k in particle_families.keys():
+                mylog.info('Adding particle_type: %s' % k)
+                self.add_particle_filter('%s' % k)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -61,3 +61,14 @@
                           'Metallicity'),
 
 }
+
+particle_families = {
+    'DM': 1,
+    'star': 2,
+    'cloud': 3,
+    'dust': 4,
+    'star_tracer': -2,
+    'cloud_tracer': -3,
+    'dust_tracer': -4,
+    'gas_tracer': 0
+}

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -64,6 +64,7 @@
 _X = 0.76 # H fraction, hardcoded
 _Y = 0.24 # He fraction, hardcoded
 
+
 class RAMSESFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Density", (rho_units, ["density"], None)),
@@ -88,7 +89,10 @@
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
         ("particle_age", ("code_time", ['age'], None)),
+        ("particle_birth_time", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
+        ("particle_family", ("", [], None)),
+        ("particle_tag", ("", [], None))
     )
 
     known_sink_fields = (

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -23,7 +23,10 @@
 import yt.utilities.fortran_utils as fpu
 from yt.utilities.lib.cosmology_time import \
     get_ramses_ages
+from yt.utilities.exceptions import YTFieldTypeNotFound, YTParticleOutputFormatNotImplemented, \
+    YTFileNotParseable
 from yt.extern.six import PY3
+import re
 
 if PY3:
     from io import BytesIO as IO
@@ -156,9 +159,66 @@
 
             else:
                 # Raise here an exception
-                raise Exception('Unknown particle type %s' % ptype)
+                raise YTFieldTypeNotFound(ptype)
 
             tr.update(_ramses_particle_file_handler(
                 fname, foffsets, data_types, subset, subs_fields))
 
         return tr
+
+def _read_part_file_descriptor(fname):
+    """
+    Read the particle file descriptor and returns the array of the fields found.
+    """
+    VERSION_RE = re.compile('# version: *(\d+)')
+    VAR_DESC_RE = re.compile(r'\s*(\d+),\s*(\w+),\s*(\w+)')
+
+    # Mapping
+    mapping = [
+        ('position_x', 'particle_position_x'),
+        ('position_y', 'particle_position_y'),
+        ('position_z', 'particle_position_z'),
+        ('velocity_x', 'particle_velocity_x'),
+        ('velocity_y', 'particle_velocity_y'),
+        ('velocity_z', 'particle_velocity_z'),
+        ('mass', 'particle_mass'),
+        ('identity', 'particle_identity'),
+        ('levelp', 'particle_level'),
+        ('family', 'particle_family'),
+        ('tag', 'particle_tag')
+    ]
+    # Convert in dictionary
+    mapping = {k: v for k, v in mapping}
+
+    with open(fname, 'r') as f:
+        line = f.readline()
+        tmp = VERSION_RE.match(line)
+        mylog.info('Reading part file descriptor.')
+        if not tmp:
+            raise YTParticleOutputFormatNotImplemented()
+
+        version = int(tmp.group(1))
+
+        if version == 1:
+            # Skip one line (containing the headers)
+            line = f.readline()
+            fields = []
+            for i, line in enumerate(f.readlines()):
+                tmp = VAR_DESC_RE.match(line)
+                if not tmp:
+                    raise YTFileNotParseable(fname, i+1)
+
+                # ivar = tmp.group(1)
+                varname = tmp.group(2)
+                dtype = tmp.group(3)
+
+                if varname in mapping:
+                    varname = mapping[varname]
+                else:
+                    varname = 'particle_%s' % varname
+
+                fields.append((varname, dtype))
+        else:
+            raise YTParticleOutputFormatNotImplemented()
+
+    return fields

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -204,3 +204,33 @@
 
     for field in expected_fields:
         assert(('sink', 'field') not in ds.field_list)
+
+
+ramses_new_format = "ramses_new_format/output_00002/info_00002.txt"
+ at requires_file(ramses_new_format)
+def test_new_format():
+    expected_particle_fields = [
+        ('star', 'particle_identity'),
+        ('star', 'particle_level'),
+        ('star', 'particle_mass'),
+        ('star', 'particle_metallicity'),
+        ('star', 'particle_position_x'),
+        ('star', 'particle_position_y'),
+        ('star', 'particle_position_z'),
+        ('star', 'particle_tag'),
+        ('star', 'particle_velocity_x'),
+        ('star', 'particle_velocity_y'),
+        ('star', 'particle_velocity_z')]
+
+    ds = yt.load(ramses_new_format)
+    ad = ds.all_data()
+
+    # Check all the expected fields exist and can be accessed
+    for f in expected_particle_fields:
+        assert(f in ds.derived_field_list)
+        ad[f]
+
+    # Check there is only stars with tag 0 (it should be right)
+    assert(all(ad['star', 'particle_family'] == 2))
+    assert(all(ad['star', 'particle_tag'] == 0))
+    assert(len(ad['star', 'particle_tag']) == 600)

diff -r b2cf9b80769ca8145940e9286015aff334442d36 -r 2bcebfab11184dd7b93e374b3f2d9d03cfec00f4 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -424,6 +424,19 @@
         v += r"'%s'."
         return v % (self.obj_name, self.ds)
 
+class YTParticleOutputFormatNotImplemented(YTException):
+    def __str__(self):
+        return "The particle output format is not supported."
+
+class YTFileNotParseable(YTException):
+    def __init__(self, fname, line):
+        self.fname = fname
+        self.line = line
+
+    def __str__(self):
+        v = r"Error while parsing file %s at line %s"
+        return v % (self.fname, self.line)
+
 class YTRockstarMultiMassNotSupported(YTException):
     def __init__(self, mi, ma, ptype):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt/commits/c4bf32338e8e/
Changeset:   c4bf32338e8e
User:        ngoldbaum
Date:        2017-12-06 16:13:06+00:00
Summary:     Merge pull request #1638 from HugoPfister/master

solve bug when deposit velocity/age fields?
Affected #:  1 file

diff -r 691d304f219cc3b5db48ba9b93c7eb02ecd6c604 -r c4bf32338e8eea3fe3ff40f1a6a8253427200d1d yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -173,7 +173,7 @@
     for ax in 'xyz':
         for method, name in zip(("cic", "sum"), ("cic", "nn")):
             function = _get_density_weighted_deposit_field(
-                "particle_velocity_%s" % ax, "cm/s", method)
+                "particle_velocity_%s" % ax, "code_velocity", method)
             registry.add_field(
                 ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)), sampling_type="cell",
                 function=function, units=unit_system["velocity"], take_log=False,
@@ -181,7 +181,7 @@
 
     for method, name in zip(("cic", "sum"), ("cic", "nn")):
         function = _get_density_weighted_deposit_field(
-            "age", "s", method)
+            "age", "code_time", method)
         registry.add_field(
             ("deposit", ("%s_"+name+"_age") % (ptype)), sampling_type="cell",
             function=function, units=unit_system["time"], take_log=False,

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list