[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 25 11:23:29 PDT 2016


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/85395039d6ff/
Changeset:   85395039d6ff
Branch:      yt
User:        ngoldbaum
Date:        2016-08-24 21:20:40+00:00
Summary:     Make it so fake_amr_ds can create a dataset with randomly positioned particles
Affected #:  1 file

diff -r 074304ddcca58c47e5ad244ff6f93578704b7745 -r 85395039d6ff6f6c2f4f934d6b8bfbfb8ba52b87 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -229,7 +229,7 @@
                    ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlondep
 }
 
-def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):
+def fake_amr_ds(fields = ("Density",), geometry = "cartesian", particles=0):
     from yt.frontends.stream.api import load_amr_grids
     LE, RE = _geom_transforms[geometry]
     LE = np.array(LE)
@@ -245,6 +245,16 @@
                      dimensions = dims)
         for f in fields:
             gdata[f] = np.random.random(dims)
+        if particles:
+            for i, f in enumerate('particle_position_%s' % ax for ax in 'xyz'):
+                pdata = np.random.random(particles)
+                pdata /= (right_edge[i] - left_edge[i])
+                pdata += left_edge[i]
+                gdata['io', f] = (pdata, 'code_length')
+            for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
+                gdata['io', f] = (np.random.random(particles) - 0.5, 'cm/s')
+            gdata['io', 'particle_mass'] = (np.random.random(particles), 'g')
+            gdata['number_of_particles'] = particles
         data.append(gdata)
     bbox = np.array([LE, RE]).T
     return load_amr_grids(data, [32, 32, 32], geometry=geometry, bbox=bbox)


https://bitbucket.org/yt_analysis/yt/commits/134965feaf3b/
Changeset:   134965feaf3b
Branch:      yt
User:        ngoldbaum
Date:        2016-08-24 21:21:24+00:00
Summary:     fix bug in mean() numpy-like op
Affected #:  1 file

diff -r 85395039d6ff6f6c2f4f934d6b8bfbfb8ba52b87 -r 134965feaf3b900799d6899d24921c60f19335b8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -913,10 +913,7 @@
         if axis in self.ds.coordinates.axis_name:
             r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
         elif axis is None:
-            if weight is None:
-                r = self.quantities.total_quantity(field)
-            else:
-                r = self.quantities.weighted_average_quantity(field, weight)
+            r = self.quantities.weighted_average_quantity(field, weight_field)
         else:
             raise NotImplementedError("Unknown axis %s" % axis)
         return r


https://bitbucket.org/yt_analysis/yt/commits/c941aa457bc3/
Changeset:   c941aa457bc3
Branch:      yt
User:        ngoldbaum
Date:        2016-08-25 16:15:04+00:00
Summary:     Make numpy-like operations behave more nicely when passed particle fields
Affected #:  2 files

diff -r 134965feaf3b900799d6899d24921c60f19335b8 -r c941aa457bc3c9e43f3ab0ecafa82d2924a8e204 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -93,6 +93,17 @@
         return tr
     return save_state
 
+def sanitize_weight_field(ds, field, weight):
+    field_object = ds._get_field_info(field)
+    if weight is None:
+        if field_object.particle_type is True:
+            weight_field = (field_object.name[0], 'particle_ones')
+        else:
+            weight_field = ('index', 'ones')
+    else:
+        weight_field = weight
+    return weight_field
+
 class RegisteredDataContainer(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -451,7 +462,7 @@
 
         Parameters
         ----------
-        fields : list of strings or tuples, default None
+        fields : list of strings or tuple field names, default None
             If this is supplied, it is the list of fields to be exported into
             the data frame.  If not supplied, whatever fields presently exist
             will be used.
@@ -493,7 +504,7 @@
             The name of the file to be written.  If None, the name 
             will be a combination of the original dataset and the type 
             of data container.
-        fields : list of strings or tuples, optional
+        fields : list of string or tuple field names, optional
             If this is supplied, it is the list of fields to be saved to
             disk.  If not supplied, all the fields that have been queried
             will be saved.
@@ -623,7 +634,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to maximize.
         axis : string or list of strings, optional
             If supplied, the fields to sample along; if not supplied, defaults
@@ -663,7 +674,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to minimize.
         axis : string or list of strings, optional
             If supplied, the fields to sample along; if not supplied, defaults
@@ -714,7 +725,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to maximize.
         axis : string, optional
             If supplied, the axis to project the maximum along.
@@ -753,7 +764,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to minimize.
         axis : string, optional
             If supplied, the axis to compute the minimum along.
@@ -784,7 +795,25 @@
             raise NotImplementedError("Unknown axis %s" % axis)
 
     def std(self, field, weight=None):
-        raise NotImplementedError
+        """Compute the variance of a field.
+
+        This will, in a parallel-ware fashion, compute the variance of
+        the given field.
+
+        Parameters
+        ----------
+        field : string or tuple field name
+            The field to calculate the variance of
+        weight : string or tuple field name
+            The field to weight the variance calculation by. Defaults to
+            unweighted if unset.
+
+        Returns
+        -------
+        Scalar
+        """
+        weight_field = sanitize_weight_field(self.ds, field, weight)
+        return self.quantities.weighted_variance(field, weight_field)[0]
 
     def ptp(self, field):
         r"""Compute the range of values (maximum - minimum) of a field.
@@ -794,7 +823,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to average.
 
         Returns
@@ -881,18 +910,19 @@
                    fractional, deposition)
         return p
 
-    def mean(self, field, axis=None, weight='ones'):
+    def mean(self, field, axis=None, weight=None):
         r"""Compute the mean of a field, optionally along an axis, with a
         weight.
 
         This will, in a parallel-aware fashion, compute the mean of the
         given field.  If an axis is supplied, it will return a projection,
-        where the weight is also supplied.  By default the weight is "ones",
-        resulting in a strict average.
+        where the weight is also supplied.  By default the weight field will be
+        "ones" or "particle_ones", depending on the field being averaged,
+        resulting in an unweighted average.
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to average.
         axis : string, optional
             If supplied, the axis to compute the mean along (i.e., to project
@@ -910,8 +940,10 @@
         >>> avg_rho = reg.mean("density", weight="cell_volume")
         >>> rho_weighted_T = reg.mean("temperature", axis="y", weight="density")
         """
+        weight_field = sanitize_weight_field(self.ds, field, weight)
         if axis in self.ds.coordinates.axis_name:
-            r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
+            r = self.ds.proj(field, axis, data_source=self,
+                             weight_field=weight_field)
         elif axis is None:
             r = self.quantities.weighted_average_quantity(field, weight_field)
         else:
@@ -928,7 +960,7 @@
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to sum.
         axis : string, optional
             If supplied, the axis to sum along.
@@ -955,15 +987,17 @@
             raise NotImplementedError("Unknown axis %s" % axis)
         return r
 
-    def integrate(self, field, axis=None):
+    def integrate(self, field, weight=None, axis=None):
         r"""Compute the integral (projection) of a field along an axis.
 
         This projects a field along an axis.
 
         Parameters
         ----------
-        field : string or tuple of strings
+        field : string or tuple field name
             The field to project.
+        weight: string or tuple field name
+            The field to weight the projection by
         axis : string
             The axis to project along.
 
@@ -976,8 +1010,13 @@
 
         >>> column_density = reg.integrate("density", axis="z")
         """
+        if weight is not None:
+            weight_field = sanitize_weight_field(self.ds, field, weight)
+        else:
+            weight_field = None
         if axis in self.ds.coordinates.axis_name:
-            r = self.ds.proj(field, axis, data_source=self)
+            r = self.ds.proj(field, axis, data_source=self,
+                             weight_field=weight_field)
         else:
             raise NotImplementedError("Unknown axis %s" % axis)
         return r

diff -r 134965feaf3b900799d6899d24921c60f19335b8 -r c941aa457bc3c9e43f3ab0ecafa82d2924a8e204 yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -10,9 +10,10 @@
 def test_mean_sum_integrate():
     for nprocs in [-1, 1, 2, 16]:
         if nprocs == -1:
-            ds = fake_amr_ds(fields=("density",))
+            ds = fake_amr_ds(fields=("density",), particles=20)
         else:
-            ds = fake_random_ds(32, nprocs=nprocs, fields=("density",))
+            ds = fake_random_ds(32, nprocs=nprocs, fields=("density",),
+                                particles=20)
         ad = ds.all_data()
 
         # Sums
@@ -20,80 +21,109 @@
 
         q1 = ad.quantities.total_quantity('density')
 
-        yield assert_equal, q, q1
+        assert_equal(q, q1)
+
+        q = ad.sum('particle_ones')
+
+        q1 = ad.quantities.total_quantity('particle_ones')
+
+        assert_equal(q, q1)
 
         # Weighted Averages
         w = ad.mean("density")
 
         w1 = ad.quantities.weighted_average_quantity('density', 'ones')
 
-        yield assert_equal, w, w1
+        assert_equal(w, w1)
 
         w = ad.mean("density", weight="density")
 
         w1 = ad.quantities.weighted_average_quantity('density', 'density')
 
-        yield assert_equal, w, w1
+        assert_equal(w, w1)
+
+        w = ad.mean('particle_mass')
+
+        w1 = ad.quantities.weighted_average_quantity(
+            'particle_mass', 'particle_ones')
+
+        assert_equal(w, w1)
+
+        w = ad.mean('particle_mass', weight='particle_mass')
+
+        w1 = ad.quantities.weighted_average_quantity(
+            'particle_mass', 'particle_mass')
+
+        assert_equal(w, w1)
 
         # Projections
         p = ad.sum('density', axis=0)
 
         p1 = ds.proj('density', 0, data_source=ad, method="sum")
 
-        yield assert_equal, p['density'], p1['density']
+        assert_equal(p['density'], p1['density'])
 
         # Check by axis-name
         p = ad.sum('density', axis='x')
 
-        yield assert_equal, p['density'], p1['density']
+        assert_equal(p['density'], p1['density'])
 
         # Now we check proper projections
         p = ad.integrate("density", axis=0)
         p1 = ds.proj("density", 0, data_source=ad)
 
-        yield assert_equal, p['density'], p1['density']
+        assert_equal(p['density'], p1['density'])
 
         # Check by axis-name
         p = ad.integrate('density', axis='x')
 
-        yield assert_equal, p['density'], p1['density']
+        assert_equal(p['density'], p1['density'])
 
 def test_min_max():
     for nprocs in [-1, 1, 2, 16]:
         if nprocs == -1:
-            ds = fake_amr_ds(fields=("density","temperature"))
+            ds = fake_amr_ds(fields=("density","temperature"), particles=20)
         else:
             ds = fake_random_ds(32, nprocs=nprocs,
-                fields=("density","temperature"))
+                fields=("density","temperature"), particles=20)
 
         ad = ds.all_data()
 
         q = ad.min("density").v
-        yield assert_equal, q, ad["density"].min()
+        assert_equal(q, ad["density"].min())
 
         q = ad.max("density").v
-        yield assert_equal, q, ad["density"].max()
+        assert_equal(q, ad["density"].max())
+
+        q = ad.min('particle_mass').v
+        assert_equal(q, ad['particle_mass'].min())
+
+        q = ad.max('particle_mass').v
+        assert_equal(q, ad['particle_mass'].max())
 
         ptp = ad.ptp("density").v
-        yield assert_equal, ptp, ad["density"].max() - ad["density"].min()
+        assert_equal(ptp, ad["density"].max() - ad["density"].min())
+
+        ptp = ad.ptp("particle_mass").v
+        assert_equal(ptp, ad["particle_mass"].max() - ad["particle_mass"].min())
 
         p = ad.max("density", axis=1)
         p1 = ds.proj("density", 1, data_source=ad, method="mip")
-        yield assert_equal, p["density"], p1["density"]
+        assert_equal(p["density"], p1["density"])
 
         p = ad.max("density", axis="y")
         p1 = ds.proj("density", 1, data_source=ad, method="mip")
-        yield assert_equal, p["density"], p1["density"]
+        assert_equal(p["density"], p1["density"])
 
         # Test that we can get multiple in a single pass
 
         qrho, qtemp = ad.max(["density", "temperature"])
-        yield assert_equal, qrho, ad["density"].max()
-        yield assert_equal, qtemp, ad["temperature"].max()
+        assert_equal(qrho, ad["density"].max())
+        assert_equal(qtemp, ad["temperature"].max())
 
         qrho, qtemp = ad.min(["density", "temperature"])
-        yield assert_equal, qrho, ad["density"].min()
-        yield assert_equal, qtemp, ad["temperature"].min()
+        assert_equal(qrho, ad["density"].min())
+        assert_equal(qtemp, ad["temperature"].min())
 
 def test_argmin():
     for nprocs in [-1, 1, 2, 16]:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list