[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Jun 28 09:57:34 PDT 2014


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6018e280f315/
Changeset:   6018e280f315
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-26 18:53:57
Summary:     We want to iterate in sorted order for data_files.

This fixes a potentially difficult bug, wherein the data_files, and thus the
particles, were being iterated over in a different way.  Recent changes to how
the iteration occurred (and inserting a few additional memory changes, which
can change order-of-sorting for data_files non-deterministically) caused this
to show up as an answer test failure in OWLS.

This adds in a sorting order for particle data files.
Affected #:  7 files

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,3 +717,6 @@
 
     def _calculate_offsets(self, fields):
         pass
+
+    def __cmp__(self, other):
+        return cmp(self.filename, other.filename)

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -43,7 +43,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 x = f['particle_position_x'].value.astype("float64")
@@ -61,7 +61,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/halo_catalogs/owls_subfind/io.py
--- a/yt/frontends/halo_catalogs/owls_subfind/io.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -44,7 +44,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -78,7 +78,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -45,7 +45,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -65,7 +65,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -48,7 +48,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             yield "dark_matter", (
                 self._handle['x'], self._handle['y'], self._handle['z'])
@@ -62,7 +62,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             for ptype, field_list in sorted(ptf.items()):
                 x = self._handle['x']

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -77,7 +77,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -93,7 +93,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 g = f["/%s" % ptype]
@@ -251,7 +251,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -268,7 +268,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -498,7 +498,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -519,7 +519,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -725,7 +725,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype in ptf:
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")
@@ -738,7 +738,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype, field_list in sorted(ptf.items()):
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")

diff -r 35cecc8a0a24bbf074956ef320f70e0e686478a7 -r 6018e280f3152f80e42dde1c7cde75de0025ca1f yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -108,7 +108,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -121,7 +121,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
                 x, y, z = (f[ptype, "particle_position_%s" % ax]


https://bitbucket.org/yt_analysis/yt/commits/19c930cb6534/
Changeset:   19c930cb6534
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-27 15:54:44
Summary:     Reduce roundoff error.
Affected #:  1 file

diff -r 6018e280f3152f80e42dde1c7cde75de0025ca1f -r 19c930cb6534137a2bc17c41c0083a9f2a4b234e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 import numpy as np
 from selection_routines cimport SelectorObject
+from libc.math cimport rint
 cimport selection_routines
 
 ORDER_MAX = 20
@@ -278,7 +279,7 @@
         cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            ind[i] = <np.int64_t> (rint((ppos[i] - self.DLE[i])/dds[i]))
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
             ind32[i] = ind[i]


https://bitbucket.org/yt_analysis/yt/commits/ec0dea9da851/
Changeset:   ec0dea9da851
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-27 17:14:02
Summary:     Reducing unit conversions
Affected #:  1 file

diff -r 19c930cb6534137a2bc17c41c0083a9f2a4b234e -r ec0dea9da8519fc47d2853ca6df2a9e8e14df3ec yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -417,6 +417,8 @@
 
         """
         new_units = self._unit_repr_check_same(units)
+        if new_units == self.units:
+            return self
         conversion_factor = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
@@ -453,6 +455,8 @@
 
         """
         new_units = self._unit_repr_check_same(units)
+        if new_units == self.units:
+            return self.copy()
         conversion_factor = self.units.get_conversion_factor(new_units)
 
         new_array = self * conversion_factor


https://bitbucket.org/yt_analysis/yt/commits/3e88307f964b/
Changeset:   3e88307f964b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-27 23:00:12
Summary:     This masked some unit equivalency things.
Affected #:  1 file

diff -r ec0dea9da8519fc47d2853ca6df2a9e8e14df3ec -r 3e88307f964b193fee5ea010e1a242e9edba4a1e yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -417,8 +417,6 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        if new_units == self.units:
-            return self
         conversion_factor = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
@@ -455,8 +453,6 @@
 
         """
         new_units = self._unit_repr_check_same(units)
-        if new_units == self.units:
-            return self.copy()
         conversion_factor = self.units.get_conversion_factor(new_units)
 
         new_array = self * conversion_factor


https://bitbucket.org/yt_analysis/yt/commits/3c033ed5dea1/
Changeset:   3c033ed5dea1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-27 23:04:18
Summary:     Switch to floor from rint.
Affected #:  1 file

diff -r 3e88307f964b193fee5ea010e1a242e9edba4a1e -r 3c033ed5dea1745ecbd29a59b084ea069803c87b yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -18,7 +18,7 @@
 cimport numpy as np
 import numpy as np
 from selection_routines cimport SelectorObject
-from libc.math cimport rint
+from libc.math cimport floor
 cimport selection_routines
 
 ORDER_MAX = 20
@@ -279,7 +279,7 @@
         cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> (rint((ppos[i] - self.DLE[i])/dds[i]))
+            ind[i] = <np.int64_t> (floor((ppos[i] - self.DLE[i])/dds[i]))
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
             ind32[i] = ind[i]


https://bitbucket.org/yt_analysis/yt/commits/103131490fef/
Changeset:   103131490fef
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-06-28 18:57:28
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #984)

Ensure stable order for data file iteration
Affected #:  9 files

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,3 +717,6 @@
 
     def _calculate_offsets(self, fields):
         pass
+
+    def __cmp__(self, other):
+        return cmp(self.filename, other.filename)

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -43,7 +43,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 x = f['particle_position_x'].value.astype("float64")
@@ -61,7 +61,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/halo_catalogs/owls_subfind/io.py
--- a/yt/frontends/halo_catalogs/owls_subfind/io.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -44,7 +44,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -78,7 +78,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -45,7 +45,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -65,7 +65,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -48,7 +48,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             yield "dark_matter", (
                 self._handle['x'], self._handle['y'], self._handle['z'])
@@ -62,7 +62,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             for ptype, field_list in sorted(ptf.items()):
                 x = self._handle['x']

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -77,7 +77,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -93,7 +93,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 g = f["/%s" % ptype]
@@ -251,7 +251,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -268,7 +268,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -498,7 +498,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -519,7 +519,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -725,7 +725,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype in ptf:
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")
@@ -738,7 +738,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype, field_list in sorted(ptf.items()):
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -108,7 +108,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -121,7 +121,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
                 x, y, z = (f[ptype, "particle_position_%s" % ax]

diff -r a040dfda42d9c21da5d9efe0ec795b1cd636bcd2 -r 103131490fefcf01b2a7288ad246869fe853dd47 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 import numpy as np
 from selection_routines cimport SelectorObject
+from libc.math cimport floor
 cimport selection_routines
 
 ORDER_MAX = 20
@@ -278,7 +279,7 @@
         cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            ind[i] = <np.int64_t> (floor((ppos[i] - self.DLE[i])/dds[i]))
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
             ind32[i] = ind[i]

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list