[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jul 10 12:21:29 PDT 2017


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/55444431a9b3/
Changeset:   55444431a9b3
User:        Corentin Cadiou
Date:        2017-06-28 14:29:21+00:00
Summary:     add support for extra array
Affected #:  1 file

diff -r b8ae5d5e61686d61d691f33ace5397ffcf7f5c35 -r 55444431a9b3ccdb36ac896c9e96309fe7186d39 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -54,6 +54,7 @@
         self.ds = ds
         self.domain_id = domain_id
         self.nvar = 0 # Set this later!
+
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
         basename = "%s/%%s_%s.out%05i" % (
@@ -154,19 +155,22 @@
         hvals.update(fpu.read_attrs(f, attrs))
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
+
         particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "i"),
-                ("particle_refinement_level", "I")]
+            ("particle_position_x", "d"),
+            ("particle_position_y", "d"),
+            ("particle_position_z", "d"),
+            ("particle_velocity_x", "d"),
+            ("particle_velocity_y", "d"),
+            ("particle_velocity_z", "d"),
+            ("particle_mass", "d"),
+            ("particle_identifier", "i"),
+            ("particle_refinement_level", "I")]
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
+        if self.ds._extra_particle_fields is not None:
+            particle_fields += self.ds._extra_particle_fields
 
         field_offsets = {}
         _pfields = {}
@@ -205,7 +209,7 @@
 
     def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
+           For each oct, only the position, index, level and domain
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
@@ -231,7 +235,7 @@
         min_level = self.ds.min_level
         # yt max level is not the same as the RAMSES one.
         # yt max level is the maximum number of additional refinement levels
-        # so for a uni grid run with no refinement, it would be 0. 
+        # so for a uni grid run with no refinement, it would be 0.
         # So we initially assume that.
         max_level = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
@@ -368,7 +372,7 @@
             dsl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(dsl)
         self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
+                          + self.particle_field_list
 
     def _setup_auto_fields(self):
         '''
@@ -376,7 +380,7 @@
         '''
         # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
         # Find nvar
-        
+
 
         # TODO: copy/pasted from DomainFile; needs refactoring!
         num = os.path.basename(self.dataset.parameter_filename).split("."
@@ -410,25 +414,25 @@
             raise ValueError
         # Basic hydro runs
         if nvar == 5:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
                       "Pressure"]
         if nvar > 5 and nvar < 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
                       "Pressure", "Metallicity"]
         # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
         if nvar == 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                       "Pressure"]
         if nvar > 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                       "Pressure","Metallicity"]
         while len(fields) < nvar:
             fields.append("var"+str(len(fields)))
@@ -486,9 +490,9 @@
         return {'io': npart}
 
     def print_stats(self):
-        
+
         # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs. 
+        # and therefore does not work for DM only runs.
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
@@ -528,19 +532,23 @@
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo
     gamma = 1.4 # This will get replaced on hydro_fn open
-    
+
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system="cgs",
+                 extra_particle_fields=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
         '''
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
+        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file
+                If set to None, will try a default set of field
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
+        self._extra_particle_fields = extra_particle_fields
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.storage_filename = storage_filename
@@ -559,7 +567,7 @@
         time_unit = self.parameters['unit_t']
 
         # calculating derived units (except velocity and temperature, done below)
-        mass_unit = density_unit * length_unit**3     
+        mass_unit = density_unit * length_unit**3
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
@@ -658,7 +666,7 @@
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
- 
+
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
 


https://bitbucket.org/yt_analysis/yt/commits/cd0e3f809cf3/
Changeset:   cd0e3f809cf3
User:        Corentin Cadiou
Date:        2017-06-28 14:32:08+00:00
Summary:     clarify doc
Affected #:  1 file

diff -r 55444431a9b3ccdb36ac896c9e96309fe7186d39 -r cd0e3f809cf3c2c75def57512659192139dd72b3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -543,8 +543,7 @@
         '''
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
-        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file
-                If set to None, will try a default set of field
+        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields


https://bitbucket.org/yt_analysis/yt/commits/1d7019588e0a/
Changeset:   1d7019588e0a
User:        Corentin Cadiou
Date:        2017-06-28 15:43:03+00:00
Summary:     add test
Affected #:  1 file

diff -r cd0e3f809cf3c2c75def57512659192139dd72b3 -r 1d7019588e0a94ad11f7b4f1286f4ac1a3cb154c yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -70,3 +70,19 @@
 
     expected_time = 14087886140997.336 # in seconds
     assert_equal(ds.current_time.in_units('s').value, expected_time)
+
+ramsesExtraFieldsSmall = 'ramses_extra_fields_small/output_00001'
+ at requires_file(ramsesExtraFieldsSmall)
+def test_extra_fields():
+    extra_fields = [('family', 'I'), ('pointer', 'I')]
+    ds = yt.load(os.path.join(ramsesExtraFieldsSmall, 'info_00001.txt'),
+                 extra_particle_fields=extra_fields)
+
+    # the dataset should contain the fields
+    for field, _ in extra_fields:
+        assert ('all', field) in ds.field_list
+
+    # Check the family (they should equal 100, for tracer particles)
+    dd = ds.all_data()
+    families = dd[('all', 'family')]
+    assert all(families == 100)


https://bitbucket.org/yt_analysis/yt/commits/6b130b267ccc/
Changeset:   6b130b267ccc
User:        Corentin Cadiou
Date:        2017-06-29 09:03:55+00:00
Summary:     doc
Affected #:  1 file

diff -r 1d7019588e0a94ad11f7b4f1286f4ac1a3cb154c -r 6b130b267ccc893225d9bdaa28e98d52dc167158 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1909,8 +1909,18 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify a list
-of fields by supplying the ``fields`` keyword in your call to ``load``.
+yt will attempt to guess the fields in the file.  You may also specify
+a list of hydro fields by supplying the ``fields`` keyword in your
+call to ``load``. It is also possible to provide a list of *extra*
+particle fields by supplying the ``extra_particle_fields``:
+
+.. code-block:: python
+
+   import yt
+   extra_fields = [('family', 'I'), ('info', 'I')]
+   ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
+   # ('all', 'family') and ('all', 'info') now in ds.field_list
+
 
 .. _loading-sph-data:
 


https://bitbucket.org/yt_analysis/yt/commits/2447f34f2682/
Changeset:   2447f34f2682
User:        ngoldbaum
Date:        2017-07-10 19:20:46+00:00
Summary:     Merge pull request #1470 from cphyc/master

[RAMSES] Support for custom particle fields
Affected #:  3 files

diff -r 6e1264f48f8060e56cbaab4a367d7dd67e803485 -r 2447f34f2682526765e4b9dd5dd247f062cd3d5a doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1909,8 +1909,18 @@
    import yt
    ds = yt.load("output_00007/info_00007.txt")
 
-yt will attempt to guess the fields in the file.  You may also specify a list
-of fields by supplying the ``fields`` keyword in your call to ``load``.
+yt will attempt to guess the fields in the file.  You may also specify
+a list of hydro fields by supplying the ``fields`` keyword in your
+call to ``load``. It is also possible to provide a list of *extra*
+particle fields by supplying the ``extra_particle_fields``:
+
+.. code-block:: python
+
+   import yt
+   extra_fields = [('family', 'I'), ('info', 'I')]
+   ds = yt.load("output_00001/info_00001.txt", extra_particle_fields=extra_fields)
+   # ('all', 'family') and ('all', 'info') now in ds.field_list
+
 
 .. _loading-sph-data:
 

diff -r 6e1264f48f8060e56cbaab4a367d7dd67e803485 -r 2447f34f2682526765e4b9dd5dd247f062cd3d5a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -54,6 +54,7 @@
         self.ds = ds
         self.domain_id = domain_id
         self.nvar = 0 # Set this later!
+
         num = os.path.basename(ds.parameter_filename).split("."
                 )[0].split("_")[1]
         basename = "%s/%%s_%s.out%05i" % (
@@ -154,19 +155,22 @@
         hvals.update(fpu.read_attrs(f, attrs))
         self.particle_header = hvals
         self.local_particle_count = hvals['npart']
+
         particle_fields = [
-                ("particle_position_x", "d"),
-                ("particle_position_y", "d"),
-                ("particle_position_z", "d"),
-                ("particle_velocity_x", "d"),
-                ("particle_velocity_y", "d"),
-                ("particle_velocity_z", "d"),
-                ("particle_mass", "d"),
-                ("particle_identifier", "i"),
-                ("particle_refinement_level", "I")]
+            ("particle_position_x", "d"),
+            ("particle_position_y", "d"),
+            ("particle_position_z", "d"),
+            ("particle_velocity_x", "d"),
+            ("particle_velocity_y", "d"),
+            ("particle_velocity_z", "d"),
+            ("particle_mass", "d"),
+            ("particle_identifier", "i"),
+            ("particle_refinement_level", "I")]
         if hvals["nstar_tot"] > 0:
             particle_fields += [("particle_age", "d"),
                                 ("particle_metallicity", "d")]
+        if self.ds._extra_particle_fields is not None:
+            particle_fields += self.ds._extra_particle_fields
 
         field_offsets = {}
         _pfields = {}
@@ -205,7 +209,7 @@
 
     def _read_amr(self):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
+           For each oct, only the position, index, level and domain
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
@@ -231,7 +235,7 @@
         min_level = self.ds.min_level
         # yt max level is not the same as the RAMSES one.
         # yt max level is the maximum number of additional refinement levels
-        # so for a uni grid run with no refinement, it would be 0. 
+        # so for a uni grid run with no refinement, it would be 0.
         # So we initially assume that.
         max_level = 0
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
@@ -368,7 +372,7 @@
             dsl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(dsl)
         self.field_list = [("ramses", f) for f in self.fluid_field_list] \
-                        + self.particle_field_list
+                          + self.particle_field_list
 
     def _setup_auto_fields(self):
         '''
@@ -376,7 +380,7 @@
         '''
         # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
         # Find nvar
-        
+
 
         # TODO: copy/pasted from DomainFile; needs refactoring!
         num = os.path.basename(self.dataset.parameter_filename).split("."
@@ -410,25 +414,25 @@
             raise ValueError
         # Basic hydro runs
         if nvar == 5:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
                       "Pressure"]
         if nvar > 5 and nvar < 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
                       "Pressure", "Metallicity"]
         # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
         if nvar == 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                       "Pressure"]
         if nvar > 11:
-            fields = ["Density", 
-                      "x-velocity", "y-velocity", "z-velocity", 
-                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
-                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+            fields = ["Density",
+                      "x-velocity", "y-velocity", "z-velocity",
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                       "Pressure","Metallicity"]
         while len(fields) < nvar:
             fields.append("var"+str(len(fields)))
@@ -486,9 +490,9 @@
         return {'io': npart}
 
     def print_stats(self):
-        
+
         # This function prints information based on the fluid on the grids,
-        # and therefore does not work for DM only runs. 
+        # and therefore does not work for DM only runs.
         if not self.fluid_field_list:
             print("This function is not implemented for DM only runs")
             return
@@ -528,19 +532,22 @@
     _index_class = RAMSESIndex
     _field_info_class = RAMSESFieldInfo
     gamma = 1.4 # This will get replaced on hydro_fn open
-    
+
     def __init__(self, filename, dataset_type='ramses',
                  fields = None, storage_filename = None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system="cgs",
+                 extra_particle_fields=None):
         # Here we want to initiate a traceback, if the reader is not built.
         if isinstance(fields, string_types):
             fields = field_aliases[fields]
         '''
         fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                 If set to None, will try a default set of fields
+        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
         '''
         self.fluid_types += ("ramses",)
         self._fields_in_file = fields
+        self._extra_particle_fields = extra_particle_fields
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.storage_filename = storage_filename
@@ -559,7 +566,7 @@
         time_unit = self.parameters['unit_t']
 
         # calculating derived units (except velocity and temperature, done below)
-        mass_unit = density_unit * length_unit**3     
+        mass_unit = density_unit * length_unit**3
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
@@ -658,7 +665,7 @@
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
- 
+
             self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
 

diff -r 6e1264f48f8060e56cbaab4a367d7dd67e803485 -r 2447f34f2682526765e4b9dd5dd247f062cd3d5a yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -70,3 +70,19 @@
 
     expected_time = 14087886140997.336 # in seconds
     assert_equal(ds.current_time.in_units('s').value, expected_time)
+
+ramsesExtraFieldsSmall = 'ramses_extra_fields_small/output_00001'
+ at requires_file(ramsesExtraFieldsSmall)
+def test_extra_fields():
+    extra_fields = [('family', 'I'), ('pointer', 'I')]
+    ds = yt.load(os.path.join(ramsesExtraFieldsSmall, 'info_00001.txt'),
+                 extra_particle_fields=extra_fields)
+
+    # the dataset should contain the fields
+    for field, _ in extra_fields:
+        assert ('all', field) in ds.field_list
+
+    # Check the family (they should equal 100, for tracer particles)
+    dd = ds.all_data()
+    families = dd[('all', 'family')]
+    assert all(families == 100)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list