[yt-svn] commit/yt: 6 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Jul 18 05:20:51 PDT 2014
6 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/681d9a94a75a/
Changeset: 681d9a94a75a
Branch: yt-3.0
User: brittonsmith
Date: 2014-07-13 19:47:05
Summary: Adding variance to new profile objects.
Affected #: 1 file
diff -r d091dd64ed463b8feae5398375e8ab327baeb47a -r 681d9a94a75a483a36abfc6bb04244b09839d508 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -759,6 +759,8 @@
self.data_source = data_source
self.pf = data_source.pf
self.field_data = YTFieldData()
+ if weight_field is not None:
+ self.variance = YTFieldData()
self.weight_field = weight_field
self.field_units = {}
ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
@@ -805,20 +807,69 @@
def _finalize_storage(self, fields, temp_storage):
# We use our main comm here
# This also will fill _field_data
- temp_storage.values = self.comm.mpi_allreduce(temp_storage.values, op="sum", dtype="float64")
- temp_storage.weight_values = self.comm.mpi_allreduce(temp_storage.weight_values, op="sum", dtype="float64")
- temp_storage.used = self.comm.mpi_allreduce(temp_storage.used, op="sum", dtype="bool")
- blank = ~temp_storage.used
- self.used = temp_storage.used
- if self.weight_field is not None:
- # This is unnecessary, but it will suppress division errors.
- temp_storage.weight_values[blank] = 1e-30
- temp_storage.values /= temp_storage.weight_values[...,None]
- self.weight = temp_storage.weight_values[...,None]
+
+ if self.weight_field is None:
+ temp_storage.values = self.comm.mpi_allreduce(temp_storage.values,
+ op="sum", dtype="float64")
+ temp_storage.used = self.comm.mpi_allreduce(temp_storage.used,
+ op="sum", dtype="bool")
+ blank = ~temp_storage.used
+ self.used = temp_storage.used
+
+ else:
+ for i, field in enumerate(fields):
+ temp_storage.qvalues[..., i][temp_storage.used] /= \
+ temp_storage.weight_values[temp_storage.used]
+ all_store = {self.comm.rank: temp_storage}
+ all_store = self.comm.par_combine_object(all_store,
+ "join", datatype="dict")
+
+ all_mean = np.zeros_like(temp_storage.mvalues)
+ all_var = np.zeros_like(temp_storage.qvalues)
+ all_weight = np.zeros_like(temp_storage.weight_values)
+ all_used = np.zeros_like(temp_storage.used, dtype="bool")
+ for p in sorted(all_store.keys()):
+ all_used += all_store[p].used
+ old_mean = all_mean.copy()
+ old_weight = all_weight.copy()
+ all_weight[all_store[p].used] += \
+ all_store[p].weight_values[all_store[p].used]
+ for i, field in enumerate(fields):
+ all_mean[..., i][all_store[p].used] = \
+ (all_mean[..., i] * old_weight +
+ all_store[p].mvalues[..., i] *
+ all_store[p].weight_values)[all_store[p].used] / \
+ all_weight[all_store[p].used]
+ all_var[..., i][all_store[p].used] = \
+ (old_weight * (all_var[..., i] +
+ (old_mean[..., i] - all_mean[..., i])**2) +
+ all_store[p].weight_values *
+ (all_store[p].qvalues[..., i] +
+ (all_store[p].mvalues[..., i] -
+ all_mean[..., i])**2))[all_store[p].used] / \
+ all_weight[all_store[p].used]
+ all_var = np.sqrt(all_var)
+ del all_store
+ self.used = all_used
+ blank = ~all_used
+
+ self.weight = all_weight
self.weight[blank] = 0.0
+
self.field_map = {}
for i, field in enumerate(fields):
- self.field_data[field] = array_like_field(self.data_source, temp_storage.values[...,i], field)
+ if self.weight_field is None:
+ self.field_data[field] = \
+ array_like_field(self.data_source,
+ temp_storage.values[...,i], field)
+ else:
+ self.field_data[field] = \
+ array_like_field(self.data_source,
+ all_mean[...,i], field)
+ self.variance[field] = \
+ array_like_field(self.data_source,
+ all_var[...,i], field)
+ self.variance[field][blank] = 0.0
self.field_data[field][blank] = 0.0
self.field_units[field] = self.field_data[field].units
if isinstance(field, tuple):
https://bitbucket.org/yt_analysis/yt/commits/4040bf666ead/
Changeset: 4040bf666ead
Branch: yt-3.0
User: brittonsmith
Date: 2014-07-13 20:42:34
Summary: Fixing accumulation when weight field is present.
Affected #: 1 file
diff -r 681d9a94a75a483a36abfc6bb04244b09839d508 -r 4040bf666ead4aed129dbb25ade9df5f6681cbf4 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1341,13 +1341,27 @@
if not acc: continue
temp = obj.field_data[field]
temp = np.rollaxis(temp, axis)
+ if weight_field is not None:
+ temp_weight = obj.weight
+ temp_weight = np.rollaxis(temp_weight, axis)
if acc < 0:
temp = temp[::-1]
- temp = temp.cumsum(axis=0)
+ if weight_field is not None:
+ temp_weight = temp_weight[::-1]
+ if weight_field is None:
+ temp = temp.cumsum(axis=0)
+ else:
+ temp = (temp * temp_weight).cumsum(axis=0) / \
+ temp_weight.cumsum(axis=0)
if acc < 0:
temp = temp[::-1]
+ if weight_field is not None:
+ temp_weight = temp_weight[::-1]
temp = np.rollaxis(temp, axis)
obj.field_data[field] = temp
+ if weight_field is not None:
+ temp_weight = np.rollaxis(temp_weight, axis)
+ obj.weight = temp_weight
if units is not None:
for field, unit in units.iteritems():
field = data_source._determine_fields(field)[0]
https://bitbucket.org/yt_analysis/yt/commits/a418180dd8c1/
Changeset: a418180dd8c1
Branch: yt-3.0
User: brittonsmith
Date: 2014-07-13 21:03:10
Summary: Updating recipe.
Affected #: 1 file
diff -r 4040bf666ead4aed129dbb25ade9df5f6681cbf4 -r a418180dd8c14e2fa6d7b8b0bef03354d10fc0e5 doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
import matplotlib.pyplot as plt
import yt
@@ -16,15 +13,16 @@
# Create a 1D profile object for profiles over radius
# and add a velocity profile.
-prof = yt.create_profile(sp, 'radius', 'velocity_magnitude',
+prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
units = {'radius': 'kpc'},
extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
weight_field='cell_mass')
# Plot the average velocity magnitude.
-plt.loglog(prof.x, prof['velocity_magnitude'], label='Mean')
+plt.loglog(prof.x, prof['gas', 'velocity_magnitude'], label='Mean')
# Plot the variance of the velocity madnitude.
-plt.loglog(prof.x, prof['velocity_magnitude_std'], label='Standard Deviation')
+plt.loglog(prof.x, prof.variance['gas', 'velocity_magnitude'],
+ label='Standard Deviation')
plt.xlabel('r [kpc]')
plt.ylabel('v [cm/s]')
plt.legend()
https://bitbucket.org/yt_analysis/yt/commits/1e0a11e15958/
Changeset: 1e0a11e15958
Branch: yt-3.0
User: brittonsmith
Date: 2014-07-14 18:24:01
Summary: Adding comments to clarify the math being used.
Affected #: 1 file
diff -r a418180dd8c14e2fa6d7b8b0bef03354d10fc0e5 -r 1e0a11e159582ba0c073711b10ac6a54fe35af04 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -809,10 +809,12 @@
# This also will fill _field_data
if self.weight_field is None:
- temp_storage.values = self.comm.mpi_allreduce(temp_storage.values,
- op="sum", dtype="float64")
- temp_storage.used = self.comm.mpi_allreduce(temp_storage.used,
- op="sum", dtype="bool")
+ temp_storage.values = \
+ self.comm.mpi_allreduce(temp_storage.values,
+ op="sum", dtype="float64")
+ temp_storage.used = \
+ self.comm.mpi_allreduce(temp_storage.used,
+ op="sum", dtype="bool")
blank = ~temp_storage.used
self.used = temp_storage.used
@@ -828,6 +830,14 @@
all_var = np.zeros_like(temp_storage.qvalues)
all_weight = np.zeros_like(temp_storage.weight_values)
all_used = np.zeros_like(temp_storage.used, dtype="bool")
+
+ # Combine the weighted mean and variance from each processor.
+ # For two samples with total weight, mean, and variance
+ # given by w, m, and s, their combined mean and variance are:
+ # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
+ # s12 = (m1 * (s1**2 + (m1 - m12)**2) +
+ # m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
+ # Here, the mvalues are m and the qvalues are s**2.
for p in sorted(all_store.keys()):
all_used += all_store[p].used
old_mean = all_mean.copy()
https://bitbucket.org/yt_analysis/yt/commits/f95036a7c39b/
Changeset: f95036a7c39b
Branch: yt-3.0
User: brittonsmith
Date: 2014-07-15 19:27:03
Summary: Simplifying profile finalize to only use one assembly method.
Affected #: 1 file
diff -r 1e0a11e159582ba0c073711b10ac6a54fe35af04 -r f95036a7c39b867969d2d10406c51427aebabb8c yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -808,71 +808,69 @@
# We use our main comm here
# This also will fill _field_data
- if self.weight_field is None:
- temp_storage.values = \
- self.comm.mpi_allreduce(temp_storage.values,
- op="sum", dtype="float64")
- temp_storage.used = \
- self.comm.mpi_allreduce(temp_storage.used,
- op="sum", dtype="bool")
- blank = ~temp_storage.used
- self.used = temp_storage.used
+ for i, field in enumerate(fields):
+ # q values are returned as q * weight but we want just q
+ temp_storage.qvalues[..., i][temp_storage.used] /= \
+ temp_storage.weight_values[temp_storage.used]
- else:
+ # get the profile data from all procs
+ all_store = {self.comm.rank: temp_storage}
+ all_store = self.comm.par_combine_object(all_store,
+ "join", datatype="dict")
+
+ all_val = np.zeros_like(temp_storage.values)
+ all_mean = np.zeros_like(temp_storage.mvalues)
+ all_var = np.zeros_like(temp_storage.qvalues)
+ all_weight = np.zeros_like(temp_storage.weight_values)
+ all_used = np.zeros_like(temp_storage.used, dtype="bool")
+
+ # Combine the weighted mean and variance from each processor.
+ # For two samples with total weight, mean, and variance
+ # given by w, m, and s, their combined mean and variance are:
+ # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
+ # s12 = (m1 * (s1**2 + (m1 - m12)**2) +
+ # m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
+ # Here, the mvalues are m and the qvalues are s**2.
+ for p in sorted(all_store.keys()):
+ all_used += all_store[p].used
+ old_mean = all_mean.copy()
+ old_weight = all_weight.copy()
+ all_weight[all_store[p].used] += \
+ all_store[p].weight_values[all_store[p].used]
for i, field in enumerate(fields):
- temp_storage.qvalues[..., i][temp_storage.used] /= \
- temp_storage.weight_values[temp_storage.used]
- all_store = {self.comm.rank: temp_storage}
- all_store = self.comm.par_combine_object(all_store,
- "join", datatype="dict")
+ all_val[..., i][all_store[p].used] += \
+ all_store[p].values[..., i][all_store[p].used]
- all_mean = np.zeros_like(temp_storage.mvalues)
- all_var = np.zeros_like(temp_storage.qvalues)
- all_weight = np.zeros_like(temp_storage.weight_values)
- all_used = np.zeros_like(temp_storage.used, dtype="bool")
-
- # Combine the weighted mean and variance from each processor.
- # For two samples with total weight, mean, and variance
- # given by w, m, and s, their combined mean and variance are:
- # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
- # s12 = (m1 * (s1**2 + (m1 - m12)**2) +
- # m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
- # Here, the mvalues are m and the qvalues are s**2.
- for p in sorted(all_store.keys()):
- all_used += all_store[p].used
- old_mean = all_mean.copy()
- old_weight = all_weight.copy()
- all_weight[all_store[p].used] += \
- all_store[p].weight_values[all_store[p].used]
- for i, field in enumerate(fields):
- all_mean[..., i][all_store[p].used] = \
- (all_mean[..., i] * old_weight +
- all_store[p].mvalues[..., i] *
- all_store[p].weight_values)[all_store[p].used] / \
- all_weight[all_store[p].used]
- all_var[..., i][all_store[p].used] = \
- (old_weight * (all_var[..., i] +
- (old_mean[..., i] - all_mean[..., i])**2) +
- all_store[p].weight_values *
- (all_store[p].qvalues[..., i] +
- (all_store[p].mvalues[..., i] -
- all_mean[..., i])**2))[all_store[p].used] / \
- all_weight[all_store[p].used]
- all_var = np.sqrt(all_var)
- del all_store
- self.used = all_used
- blank = ~all_used
-
- self.weight = all_weight
- self.weight[blank] = 0.0
+ all_mean[..., i][all_store[p].used] = \
+ (all_mean[..., i] * old_weight +
+ all_store[p].mvalues[..., i] *
+ all_store[p].weight_values)[all_store[p].used] / \
+ all_weight[all_store[p].used]
+
+ all_var[..., i][all_store[p].used] = \
+ (old_weight * (all_var[..., i] +
+ (old_mean[..., i] - all_mean[..., i])**2) +
+ all_store[p].weight_values *
+ (all_store[p].qvalues[..., i] +
+ (all_store[p].mvalues[..., i] -
+ all_mean[..., i])**2))[all_store[p].used] / \
+ all_weight[all_store[p].used]
+
+ all_var = np.sqrt(all_var)
+ del all_store
+ self.used = all_used
+ blank = ~all_used
+
+ self.weight = all_weight
+ self.weight[blank] = 0.0
self.field_map = {}
for i, field in enumerate(fields):
if self.weight_field is None:
self.field_data[field] = \
array_like_field(self.data_source,
- temp_storage.values[...,i], field)
- else:
+ all_val[...,i], field)
+ else:
self.field_data[field] = \
array_like_field(self.data_source,
all_mean[...,i], field)
https://bitbucket.org/yt_analysis/yt/commits/39a936c40774/
Changeset: 39a936c40774
Branch: yt-3.0
User: MatthewTurk
Date: 2014-07-18 14:20:45
Summary: Merged in brittonsmith/yt/yt-3.0 (pull request #1016)
Adding variance to new profiles.
Affected #: 2 files
diff -r a7017f15e008ce7aa83f3cb3db9f16532d63fdd4 -r 39a936c40774b7ed8646255a6cbcf05a361f02aa doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
import matplotlib.pyplot as plt
import yt
@@ -16,15 +13,16 @@
# Create a 1D profile object for profiles over radius
# and add a velocity profile.
-prof = yt.create_profile(sp, 'radius', 'velocity_magnitude',
+prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
units = {'radius': 'kpc'},
extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
weight_field='cell_mass')
# Plot the average velocity magnitude.
-plt.loglog(prof.x, prof['velocity_magnitude'], label='Mean')
+plt.loglog(prof.x, prof['gas', 'velocity_magnitude'], label='Mean')
# Plot the variance of the velocity madnitude.
-plt.loglog(prof.x, prof['velocity_magnitude_std'], label='Standard Deviation')
+plt.loglog(prof.x, prof.variance['gas', 'velocity_magnitude'],
+ label='Standard Deviation')
plt.xlabel('r [kpc]')
plt.ylabel('v [cm/s]')
plt.legend()
diff -r a7017f15e008ce7aa83f3cb3db9f16532d63fdd4 -r 39a936c40774b7ed8646255a6cbcf05a361f02aa yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -759,6 +759,8 @@
self.data_source = data_source
self.pf = data_source.pf
self.field_data = YTFieldData()
+ if weight_field is not None:
+ self.variance = YTFieldData()
self.weight_field = weight_field
self.field_units = {}
ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
@@ -805,20 +807,77 @@
def _finalize_storage(self, fields, temp_storage):
# We use our main comm here
# This also will fill _field_data
- temp_storage.values = self.comm.mpi_allreduce(temp_storage.values, op="sum", dtype="float64")
- temp_storage.weight_values = self.comm.mpi_allreduce(temp_storage.weight_values, op="sum", dtype="float64")
- temp_storage.used = self.comm.mpi_allreduce(temp_storage.used, op="sum", dtype="bool")
- blank = ~temp_storage.used
- self.used = temp_storage.used
- if self.weight_field is not None:
- # This is unnecessary, but it will suppress division errors.
- temp_storage.weight_values[blank] = 1e-30
- temp_storage.values /= temp_storage.weight_values[...,None]
- self.weight = temp_storage.weight_values[...,None]
- self.weight[blank] = 0.0
+
+ for i, field in enumerate(fields):
+ # q values are returned as q * weight but we want just q
+ temp_storage.qvalues[..., i][temp_storage.used] /= \
+ temp_storage.weight_values[temp_storage.used]
+
+ # get the profile data from all procs
+ all_store = {self.comm.rank: temp_storage}
+ all_store = self.comm.par_combine_object(all_store,
+ "join", datatype="dict")
+
+ all_val = np.zeros_like(temp_storage.values)
+ all_mean = np.zeros_like(temp_storage.mvalues)
+ all_var = np.zeros_like(temp_storage.qvalues)
+ all_weight = np.zeros_like(temp_storage.weight_values)
+ all_used = np.zeros_like(temp_storage.used, dtype="bool")
+
+ # Combine the weighted mean and variance from each processor.
+ # For two samples with total weight, mean, and variance
+ # given by w, m, and s, their combined mean and variance are:
+ # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
+ # s12 = (m1 * (s1**2 + (m1 - m12)**2) +
+ # m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
+ # Here, the mvalues are m and the qvalues are s**2.
+ for p in sorted(all_store.keys()):
+ all_used += all_store[p].used
+ old_mean = all_mean.copy()
+ old_weight = all_weight.copy()
+ all_weight[all_store[p].used] += \
+ all_store[p].weight_values[all_store[p].used]
+ for i, field in enumerate(fields):
+ all_val[..., i][all_store[p].used] += \
+ all_store[p].values[..., i][all_store[p].used]
+
+ all_mean[..., i][all_store[p].used] = \
+ (all_mean[..., i] * old_weight +
+ all_store[p].mvalues[..., i] *
+ all_store[p].weight_values)[all_store[p].used] / \
+ all_weight[all_store[p].used]
+
+ all_var[..., i][all_store[p].used] = \
+ (old_weight * (all_var[..., i] +
+ (old_mean[..., i] - all_mean[..., i])**2) +
+ all_store[p].weight_values *
+ (all_store[p].qvalues[..., i] +
+ (all_store[p].mvalues[..., i] -
+ all_mean[..., i])**2))[all_store[p].used] / \
+ all_weight[all_store[p].used]
+
+ all_var = np.sqrt(all_var)
+ del all_store
+ self.used = all_used
+ blank = ~all_used
+
+ self.weight = all_weight
+ self.weight[blank] = 0.0
+
self.field_map = {}
for i, field in enumerate(fields):
- self.field_data[field] = array_like_field(self.data_source, temp_storage.values[...,i], field)
+ if self.weight_field is None:
+ self.field_data[field] = \
+ array_like_field(self.data_source,
+ all_val[...,i], field)
+ else:
+ self.field_data[field] = \
+ array_like_field(self.data_source,
+ all_mean[...,i], field)
+ self.variance[field] = \
+ array_like_field(self.data_source,
+ all_var[...,i], field)
+ self.variance[field][blank] = 0.0
self.field_data[field][blank] = 0.0
self.field_units[field] = self.field_data[field].units
if isinstance(field, tuple):
@@ -1305,13 +1364,27 @@
if not acc: continue
temp = obj.field_data[field]
temp = np.rollaxis(temp, axis)
+ if weight_field is not None:
+ temp_weight = obj.weight
+ temp_weight = np.rollaxis(temp_weight, axis)
if acc < 0:
temp = temp[::-1]
- temp = temp.cumsum(axis=0)
+ if weight_field is not None:
+ temp_weight = temp_weight[::-1]
+ if weight_field is None:
+ temp = temp.cumsum(axis=0)
+ else:
+ temp = (temp * temp_weight).cumsum(axis=0) / \
+ temp_weight.cumsum(axis=0)
if acc < 0:
temp = temp[::-1]
+ if weight_field is not None:
+ temp_weight = temp_weight[::-1]
temp = np.rollaxis(temp, axis)
obj.field_data[field] = temp
+ if weight_field is not None:
+ temp_weight = np.rollaxis(temp_weight, axis)
+ obj.weight = temp_weight
if units is not None:
for field, unit in units.iteritems():
field = data_source._determine_fields(field)[0]
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list