[yt-svn] commit/yt: 28 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Sun Dec 11 17:25:03 PST 2016
28 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/caa2770b901f/
Changeset: caa2770b901f
Branch: stable
User: atmyers
Date: 2016-10-05 17:26:53+00:00
Summary: fixing docstring identation level
Affected #: 1 file
diff -r 93abddc95841084a2d64c533d90961881fa428fb -r caa2770b901f31d99638bb51760bed0247b60836 yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -17,7 +17,14 @@
@cython.wraparound(False)
@cython.cdivision(True)
cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
-# https://tavianator.com/fast-branchless-raybounding-box-intersections/
+ '''
+
+ This returns an integer flag that indicates whether a ray and a bounding
+ box intersect. It does not modify either either the ray or the box.
+
+ '''
+
+ # https://tavianator.com/fast-branchless-raybounding-box-intersections/
cdef np.float64_t tmin = -INF
cdef np.float64_t tmax = INF
@@ -38,7 +45,17 @@
cdef np.int64_t ray_triangle_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
-# https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
+ '''
+
+ This returns an integer flag that indicates whether a triangle is the
+ closest hit for the ray so far. If it is, the ray is updated to store the
+ current triangle index and the distance to the first hit. The triangle used
+ is the one indexed by "item" in the array of primitives.
+
+
+ '''
+
+ # https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
cdef Triangle tri = (<Triangle*> primitives)[item]
@@ -87,6 +104,13 @@
cdef void triangle_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
+ '''
+
+ This computes the centroid of the input triangle. The triangle used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the numpy array passed in as "centroid".
+
+ '''
cdef Triangle tri = (<Triangle*> primitives)[item]
cdef np.int64_t i
@@ -100,7 +124,14 @@
cdef void triangle_bbox(const void *primitives,
const np.int64_t item,
BBox* bbox) nogil:
-
+ '''
+
+ This computes the bounding box of the input triangle. The triangle used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the input BBox.
+
+ '''
+
cdef Triangle tri = (<Triangle*> primitives)[item]
cdef np.int64_t i
for i in range(3):
@@ -115,17 +146,26 @@
const cython.floating u,
const cython.floating v,
cython.floating[3] S) nogil:
-
- cdef int i
- for i in range(3):
- S[i] = 0.25*(1.0 - u)*(1.0 - v)*(-u - v - 1)*verts[0][i] + \
- 0.25*(1.0 + u)*(1.0 - v)*( u - v - 1)*verts[1][i] + \
- 0.25*(1.0 + u)*(1.0 + v)*( u + v - 1)*verts[2][i] + \
- 0.25*(1.0 - u)*(1.0 + v)*(-u + v - 1)*verts[3][i] + \
- 0.5*(1 - u)*(1 - v*v)*verts[4][i] + \
- 0.5*(1 - u*u)*(1 - v)*verts[5][i] + \
- 0.5*(1 + u)*(1 - v*v)*verts[6][i] + \
- 0.5*(1 - u*u)*(1 + v)*verts[7][i]
+ '''
+
+ This function is a parametric representation of the surface of a bi-quadratic
+ patch. The inputs are the eight nodes that define a face of a 20-node hex element,
+ and two parameters u and v that vary from -1 to 1 and tell you where you are on
+ the surface of the patch. The output is the array 'S' that stores the physical
+ (x, y, z) position of the corresponding point on the patch. This function is needed
+ to compute the intersection of rays and bi-quadratic patches.
+
+ '''
+ cdef int i
+ for i in range(3):
+ S[i] = 0.25*(1.0 - u)*(1.0 - v)*(-u - v - 1)*verts[0][i] + \
+ 0.25*(1.0 + u)*(1.0 - v)*( u - v - 1)*verts[1][i] + \
+ 0.25*(1.0 + u)*(1.0 + v)*( u + v - 1)*verts[2][i] + \
+ 0.25*(1.0 - u)*(1.0 + v)*(-u + v - 1)*verts[3][i] + \
+ 0.5*(1 - u)*(1 - v*v)*verts[4][i] + \
+ 0.5*(1 - u*u)*(1 - v)*verts[5][i] + \
+ 0.5*(1 + u)*(1 - v*v)*verts[6][i] + \
+ 0.5*(1 - u*u)*(1 + v)*verts[7][i]
@cython.boundscheck(False)
@@ -134,15 +174,20 @@
cdef void patchSurfaceDerivU(const cython.floating[8][3] verts,
const cython.floating u,
const cython.floating v,
- cython.floating[3] Su) nogil:
- cdef int i
- for i in range(3):
- Su[i] = (-0.25*(v - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
- (-0.25*(v - 1.0)*(u - v - 1) - 0.25*(u + 1.0)*(v - 1.0))*verts[1][i] + \
- ( 0.25*(v + 1.0)*(u + v - 1) + 0.25*(u + 1.0)*(v + 1.0))*verts[2][i] + \
- ( 0.25*(v + 1.0)*(u - v + 1) + 0.25*(u - 1.0)*(v + 1.0))*verts[3][i] + \
- 0.5*(v*v - 1.0)*verts[4][i] + u*(v - 1.0)*verts[5][i] - \
- 0.5*(v*v - 1.0)*verts[6][i] - u*(v + 1.0)*verts[7][i]
+ cython.floating[3] Su) nogil:
+ '''
+
+ This function computes the derivative of the S(u, v) function w.r.t u.
+
+ '''
+ cdef int i
+ for i in range(3):
+ Su[i] = (-0.25*(v - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
+ (-0.25*(v - 1.0)*(u - v - 1) - 0.25*(u + 1.0)*(v - 1.0))*verts[1][i] + \
+ ( 0.25*(v + 1.0)*(u + v - 1) + 0.25*(u + 1.0)*(v + 1.0))*verts[2][i] + \
+ ( 0.25*(v + 1.0)*(u - v + 1) + 0.25*(u - 1.0)*(v + 1.0))*verts[3][i] + \
+ 0.5*(v*v - 1.0)*verts[4][i] + u*(v - 1.0)*verts[5][i] - \
+ 0.5*(v*v - 1.0)*verts[6][i] - u*(v + 1.0)*verts[7][i]
@cython.boundscheck(False)
@@ -152,6 +197,11 @@
const cython.floating u,
const cython.floating v,
cython.floating[3] Sv) nogil:
+ '''
+
+ This function computes the derivative of the S(u, v) function w.r.t v.
+
+ '''
cdef int i
for i in range(3):
Sv[i] = (-0.25*(u - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
@@ -168,7 +218,13 @@
cdef RayHitData compute_patch_hit(cython.floating[8][3] verts,
cython.floating[3] ray_origin,
cython.floating[3] ray_direction) nogil:
-
+ """
+
+ This function iteratively computes whether the bi-quadratic patch defined by the
+ eight input nodes intersects with the given ray. Either way, information about
+ the potential hit is stored in the returned RayHitData.
+
+ """
# first we compute the two planes that define the ray.
cdef cython.floating[3] n, N1, N2
cdef cython.floating A = dot(ray_direction, ray_direction)
@@ -243,7 +299,15 @@
cdef np.int64_t ray_patch_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
-
+ '''
+
+ This returns an integer flag that indicates whether the given patch is the
+ closest hit for the ray so far. If it is, the ray is updated to store the
+ current primitive index and the distance to the first hit. The patch used
+ is the one indexed by "item" in the array of primitives.
+
+
+ '''
cdef Patch patch = (<Patch*> primitives)[item]
cdef RayHitData hd = compute_patch_hit(patch.v, ray.origin, ray.direction)
@@ -267,7 +331,14 @@
cdef void patch_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
-
+ '''
+
+ This computes the centroid of the input patch. The patch used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the numpy array passed in as "centroid".
+
+ '''
+
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
@@ -289,6 +360,14 @@
const np.int64_t item,
BBox* bbox) nogil:
+ '''
+
+ This computes the bounding box of the input patch. The patch used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the input BBox.
+
+ '''
+
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
https://bitbucket.org/yt_analysis/yt/commits/a1326ffb923b/
Changeset: a1326ffb923b
Branch: stable
User: atmyers
Date: 2016-09-30 22:14:21+00:00
Summary: Some more docstrings for the unstructured mesh Cython.
Affected #: 4 files
diff -r caa2770b901f31d99638bb51760bed0247b60836 -r a1326ffb923b95c75e80672f544600217d519a21 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -4,6 +4,7 @@
the interface between the internal representation of the mesh and the pyembree
representation.
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r caa2770b901f31d99638bb51760bed0247b60836 -r a1326ffb923b95c75e80672f544600217d519a21 yt/utilities/lib/mesh_intersection.pyx
--- a/yt/utilities/lib/mesh_intersection.pyx
+++ b/yt/utilities/lib/mesh_intersection.pyx
@@ -1,7 +1,8 @@
"""
-This file contains functions used for performing ray-tracing with 2nd-order Lagrange
-Elements.
+This file contains functions used for performing ray-tracing with Embree
+for 2nd-order Lagrange Elements.
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r caa2770b901f31d99638bb51760bed0247b60836 -r a1326ffb923b95c75e80672f544600217d519a21 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -2,6 +2,7 @@
This file contains functions that sample a surface mesh at the point hit by
a ray. These can be used with pyembree in the form of "filter feedback functions."
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r caa2770b901f31d99638bb51760bed0247b60836 -r a1326ffb923b95c75e80672f544600217d519a21 yt/utilities/lib/mesh_triangulation.pyx
--- a/yt/utilities/lib/mesh_triangulation.pyx
+++ b/yt/utilities/lib/mesh_triangulation.pyx
@@ -1,3 +1,18 @@
+"""
+
+This file contains code for triangulating unstructured meshes. That is, for
+every element in the mesh, it breaks up the element into some number of
+triangles, returning a triangle mesh instead.
+
+It also contains code for removing duplicate triangles from the resulting
+mesh using a hash-table approach, so that we don't waste time rendering
+impossible-to-see triangles.
+
+This code is currently used by the OpenGL-accelerated unstructured mesh
+renderer, as well as when annotating mesh lines on regular slices.
+
+"""
+
import numpy as np
cimport numpy as np
cimport cython
https://bitbucket.org/yt_analysis/yt/commits/f44189f14ed4/
Changeset: f44189f14ed4
Branch: stable
User: atmyers
Date: 2016-09-30 22:06:31+00:00
Summary: expanding on the BVH docstrings
Affected #: 1 file
diff -r a1326ffb923b95c75e80672f544600217d519a21 -r f44189f14ed44dbc58d0020ff71fd75238de72ce yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -47,11 +47,25 @@
This class implements a bounding volume hierarchy (BVH), a spatial acceleration
structure for fast ray-tracing. A BVH is like a kd-tree, except that instead of
partitioning the *volume* of the parent to create the children, we partition the
- triangles themselves into 'left' or 'right' sub-trees. The bounding volume for a
- node is then determined by computing the bounding volume of the triangles that
- belong to it. This allows us to quickly discard triangles that are not close
+ primitives themselves into 'left' or 'right' sub-trees. The bounding volume for a
+ node is then determined by computing the bounding volume of the primitives that
+ belong to it. This allows us to quickly discard primitives that are not close
to intersecting a given ray.
+ This class is currently used to provide software 3D rendering support for
+ finite element datasets. For 1st-order meshes, every element of the mesh is
+ triangulated, and this set of triangles forms the primitives that will be used
+ for the ray-trace. The BVH can then quickly determine which element is hit by
+ each ray associated with the image plane, and the appropriate interpolation can
+ be performed to sample the finite element solution at that hit position.
+
+ Currently, 2nd-order meshes are only supported for 20-node hexahedral elements.
+ There, the primitive type is a bi-quadratic patch instead of a triangle, and
+ each intersection involves computing a Netwon-Raphson solve.
+
+ See yt/utilities/lib/primitives.pyx for the definitions of both of these primitive
+ types.
+
'''
@cython.boundscheck(False)
https://bitbucket.org/yt_analysis/yt/commits/cd1d25318b16/
Changeset: cd1d25318b16
Branch: stable
User: atmyers
Date: 2016-09-30 21:55:00+00:00
Summary: some docstrings for the primitives.pyx file
Affected #: 1 file
diff -r f44189f14ed44dbc58d0020ff71fd75238de72ce -r cd1d25318b163a3f6c50639b35139ca8da128c9c yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -299,15 +299,15 @@
cdef np.int64_t ray_patch_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
- '''
-
- This returns an integer flag that indicates whether the given patch is the
- closest hit for the ray so far. If it is, the ray is updated to store the
- current primitive index and the distance to the first hit. The patch used
- is the one indexed by "item" in the array of primitives.
-
-
- '''
+'''
+
+This returns an integer flag that indicates whether the given patch is the
+closest hit for the ray so far. If it is, the ray is updated to store the
+current primitive index and the distance to the first hit. The patch used
+is the one indexed by "item" in the array of primitives.
+
+
+'''
cdef Patch patch = (<Patch*> primitives)[item]
cdef RayHitData hd = compute_patch_hit(patch.v, ray.origin, ray.direction)
@@ -331,14 +331,14 @@
cdef void patch_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
- '''
-
- This computes the centroid of the input patch. The patch used
- is the one indexed by "item" in the array of primitives. The result
- will be stored in the numpy array passed in as "centroid".
-
- '''
-
+'''
+
+This computes the centroid of the input patch. The patch used
+is the one indexed by "item" in the array of primitives. The result
+will be stored in the numpy array passed in as "centroid".
+
+'''
+
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
@@ -360,13 +360,13 @@
const np.int64_t item,
BBox* bbox) nogil:
- '''
-
- This computes the bounding box of the input patch. The patch used
- is the one indexed by "item" in the array of primitives. The result
- will be stored in the input BBox.
-
- '''
+'''
+
+This computes the bounding box of the input patch. The patch used
+is the one indexed by "item" in the array of primitives. The result
+will be stored in the input BBox.
+
+'''
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
https://bitbucket.org/yt_analysis/yt/commits/8f13df1aa8da/
Changeset: 8f13df1aa8da
Branch: stable
User: xarthisius
Date: 2016-10-27 23:48:02+00:00
Summary: [opt] Typedef array returned in yt.geometry.selection_routines.points_in_cells
Affected #: 1 file
diff -r 93abddc95841084a2d64c533d90961881fa428fb -r 8f13df1aa8da54c1727201e3c94108639565263e yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2071,6 +2071,7 @@
# We use brute force since the cells are a relatively unordered collection.
cdef int p, c, n_p, n_c
+ cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
n_p = px.size
n_c = cx.size
https://bitbucket.org/yt_analysis/yt/commits/ff3611176796/
Changeset: ff3611176796
Branch: stable
User: chummels
Date: 2016-12-11 01:23:43+00:00
Summary: Merging.
Affected #: 6 files
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -47,11 +47,25 @@
This class implements a bounding volume hierarchy (BVH), a spatial acceleration
structure for fast ray-tracing. A BVH is like a kd-tree, except that instead of
partitioning the *volume* of the parent to create the children, we partition the
- triangles themselves into 'left' or 'right' sub-trees. The bounding volume for a
- node is then determined by computing the bounding volume of the triangles that
- belong to it. This allows us to quickly discard triangles that are not close
+ primitives themselves into 'left' or 'right' sub-trees. The bounding volume for a
+ node is then determined by computing the bounding volume of the primitives that
+ belong to it. This allows us to quickly discard primitives that are not close
to intersecting a given ray.
+ This class is currently used to provide software 3D rendering support for
+ finite element datasets. For 1st-order meshes, every element of the mesh is
+ triangulated, and this set of triangles forms the primitives that will be used
+ for the ray-trace. The BVH can then quickly determine which element is hit by
+ each ray associated with the image plane, and the appropriate interpolation can
+ be performed to sample the finite element solution at that hit position.
+
+ Currently, 2nd-order meshes are only supported for 20-node hexahedral elements.
+ There, the primitive type is a bi-quadratic patch instead of a triangle, and
+ each intersection involves computing a Netwon-Raphson solve.
+
+ See yt/utilities/lib/primitives.pyx for the definitions of both of these primitive
+ types.
+
'''
@cython.boundscheck(False)
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -4,6 +4,7 @@
the interface between the internal representation of the mesh and the pyembree
representation.
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/mesh_intersection.pyx
--- a/yt/utilities/lib/mesh_intersection.pyx
+++ b/yt/utilities/lib/mesh_intersection.pyx
@@ -1,7 +1,8 @@
"""
-This file contains functions used for performing ray-tracing with 2nd-order Lagrange
-Elements.
+This file contains functions used for performing ray-tracing with Embree
+for 2nd-order Lagrange Elements.
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -2,6 +2,7 @@
This file contains functions that sample a surface mesh at the point hit by
a ray. These can be used with pyembree in the form of "filter feedback functions."
+Note - this file is only used for the Embree-accelerated ray-tracer.
"""
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/mesh_triangulation.pyx
--- a/yt/utilities/lib/mesh_triangulation.pyx
+++ b/yt/utilities/lib/mesh_triangulation.pyx
@@ -1,3 +1,18 @@
+"""
+
+This file contains code for triangulating unstructured meshes. That is, for
+every element in the mesh, it breaks up the element into some number of
+triangles, returning a triangle mesh instead.
+
+It also contains code for removing duplicate triangles from the resulting
+mesh using a hash-table approach, so that we don't waste time rendering
+impossible-to-see triangles.
+
+This code is currently used by the OpenGL-accelerated unstructured mesh
+renderer, as well as when annotating mesh lines on regular slices.
+
+"""
+
import numpy as np
cimport numpy as np
cimport cython
diff -r 8f13df1aa8da54c1727201e3c94108639565263e -r ff361117679640a75272d55d702772fc469c0b62 yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -17,7 +17,14 @@
@cython.wraparound(False)
@cython.cdivision(True)
cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
-# https://tavianator.com/fast-branchless-raybounding-box-intersections/
+ '''
+
+ This returns an integer flag that indicates whether a ray and a bounding
+ box intersect. It does not modify either either the ray or the box.
+
+ '''
+
+ # https://tavianator.com/fast-branchless-raybounding-box-intersections/
cdef np.float64_t tmin = -INF
cdef np.float64_t tmax = INF
@@ -38,7 +45,17 @@
cdef np.int64_t ray_triangle_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
-# https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
+ '''
+
+ This returns an integer flag that indicates whether a triangle is the
+ closest hit for the ray so far. If it is, the ray is updated to store the
+ current triangle index and the distance to the first hit. The triangle used
+ is the one indexed by "item" in the array of primitives.
+
+
+ '''
+
+ # https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
cdef Triangle tri = (<Triangle*> primitives)[item]
@@ -87,6 +104,13 @@
cdef void triangle_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
+ '''
+
+ This computes the centroid of the input triangle. The triangle used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the numpy array passed in as "centroid".
+
+ '''
cdef Triangle tri = (<Triangle*> primitives)[item]
cdef np.int64_t i
@@ -100,7 +124,14 @@
cdef void triangle_bbox(const void *primitives,
const np.int64_t item,
BBox* bbox) nogil:
-
+ '''
+
+ This computes the bounding box of the input triangle. The triangle used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the input BBox.
+
+ '''
+
cdef Triangle tri = (<Triangle*> primitives)[item]
cdef np.int64_t i
for i in range(3):
@@ -115,17 +146,26 @@
const cython.floating u,
const cython.floating v,
cython.floating[3] S) nogil:
-
- cdef int i
- for i in range(3):
- S[i] = 0.25*(1.0 - u)*(1.0 - v)*(-u - v - 1)*verts[0][i] + \
- 0.25*(1.0 + u)*(1.0 - v)*( u - v - 1)*verts[1][i] + \
- 0.25*(1.0 + u)*(1.0 + v)*( u + v - 1)*verts[2][i] + \
- 0.25*(1.0 - u)*(1.0 + v)*(-u + v - 1)*verts[3][i] + \
- 0.5*(1 - u)*(1 - v*v)*verts[4][i] + \
- 0.5*(1 - u*u)*(1 - v)*verts[5][i] + \
- 0.5*(1 + u)*(1 - v*v)*verts[6][i] + \
- 0.5*(1 - u*u)*(1 + v)*verts[7][i]
+ '''
+
+ This function is a parametric representation of the surface of a bi-quadratic
+ patch. The inputs are the eight nodes that define a face of a 20-node hex element,
+ and two parameters u and v that vary from -1 to 1 and tell you where you are on
+ the surface of the patch. The output is the array 'S' that stores the physical
+ (x, y, z) position of the corresponding point on the patch. This function is needed
+ to compute the intersection of rays and bi-quadratic patches.
+
+ '''
+ cdef int i
+ for i in range(3):
+ S[i] = 0.25*(1.0 - u)*(1.0 - v)*(-u - v - 1)*verts[0][i] + \
+ 0.25*(1.0 + u)*(1.0 - v)*( u - v - 1)*verts[1][i] + \
+ 0.25*(1.0 + u)*(1.0 + v)*( u + v - 1)*verts[2][i] + \
+ 0.25*(1.0 - u)*(1.0 + v)*(-u + v - 1)*verts[3][i] + \
+ 0.5*(1 - u)*(1 - v*v)*verts[4][i] + \
+ 0.5*(1 - u*u)*(1 - v)*verts[5][i] + \
+ 0.5*(1 + u)*(1 - v*v)*verts[6][i] + \
+ 0.5*(1 - u*u)*(1 + v)*verts[7][i]
@cython.boundscheck(False)
@@ -134,15 +174,20 @@
cdef void patchSurfaceDerivU(const cython.floating[8][3] verts,
const cython.floating u,
const cython.floating v,
- cython.floating[3] Su) nogil:
- cdef int i
- for i in range(3):
- Su[i] = (-0.25*(v - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
- (-0.25*(v - 1.0)*(u - v - 1) - 0.25*(u + 1.0)*(v - 1.0))*verts[1][i] + \
- ( 0.25*(v + 1.0)*(u + v - 1) + 0.25*(u + 1.0)*(v + 1.0))*verts[2][i] + \
- ( 0.25*(v + 1.0)*(u - v + 1) + 0.25*(u - 1.0)*(v + 1.0))*verts[3][i] + \
- 0.5*(v*v - 1.0)*verts[4][i] + u*(v - 1.0)*verts[5][i] - \
- 0.5*(v*v - 1.0)*verts[6][i] - u*(v + 1.0)*verts[7][i]
+ cython.floating[3] Su) nogil:
+ '''
+
+ This function computes the derivative of the S(u, v) function w.r.t u.
+
+ '''
+ cdef int i
+ for i in range(3):
+ Su[i] = (-0.25*(v - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
+ (-0.25*(v - 1.0)*(u - v - 1) - 0.25*(u + 1.0)*(v - 1.0))*verts[1][i] + \
+ ( 0.25*(v + 1.0)*(u + v - 1) + 0.25*(u + 1.0)*(v + 1.0))*verts[2][i] + \
+ ( 0.25*(v + 1.0)*(u - v + 1) + 0.25*(u - 1.0)*(v + 1.0))*verts[3][i] + \
+ 0.5*(v*v - 1.0)*verts[4][i] + u*(v - 1.0)*verts[5][i] - \
+ 0.5*(v*v - 1.0)*verts[6][i] - u*(v + 1.0)*verts[7][i]
@cython.boundscheck(False)
@@ -152,6 +197,11 @@
const cython.floating u,
const cython.floating v,
cython.floating[3] Sv) nogil:
+ '''
+
+ This function computes the derivative of the S(u, v) function w.r.t v.
+
+ '''
cdef int i
for i in range(3):
Sv[i] = (-0.25*(u - 1.0)*(u + v + 1) - 0.25*(u - 1.0)*(v - 1.0))*verts[0][i] + \
@@ -168,7 +218,13 @@
cdef RayHitData compute_patch_hit(cython.floating[8][3] verts,
cython.floating[3] ray_origin,
cython.floating[3] ray_direction) nogil:
-
+ """
+
+ This function iteratively computes whether the bi-quadratic patch defined by the
+ eight input nodes intersects with the given ray. Either way, information about
+ the potential hit is stored in the returned RayHitData.
+
+ """
# first we compute the two planes that define the ray.
cdef cython.floating[3] n, N1, N2
cdef cython.floating A = dot(ray_direction, ray_direction)
@@ -243,7 +299,15 @@
cdef np.int64_t ray_patch_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
+'''
+This returns an integer flag that indicates whether the given patch is the
+closest hit for the ray so far. If it is, the ray is updated to store the
+current primitive index and the distance to the first hit. The patch used
+is the one indexed by "item" in the array of primitives.
+
+
+'''
cdef Patch patch = (<Patch*> primitives)[item]
cdef RayHitData hd = compute_patch_hit(patch.v, ray.origin, ray.direction)
@@ -267,6 +331,13 @@
cdef void patch_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
+'''
+
+This computes the centroid of the input patch. The patch used
+is the one indexed by "item" in the array of primitives. The result
+will be stored in the numpy array passed in as "centroid".
+
+'''
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
@@ -289,6 +360,14 @@
const np.int64_t item,
BBox* bbox) nogil:
+'''
+
+This computes the bounding box of the input patch. The patch used
+is the one indexed by "item" in the array of primitives. The result
+will be stored in the input BBox.
+
+'''
+
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
https://bitbucket.org/yt_analysis/yt/commits/a79a40e4934f/
Changeset: a79a40e4934f
Branch: stable
User: xarthisius
Date: 2016-10-28 00:50:21+00:00
Summary: Use slighlty smaller objects for certain tests
Affected #: 2 files
diff -r ff361117679640a75272d55d702772fc469c0b62 -r a79a40e4934f4666a79695cc7ac6502dbd0c3119 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -226,9 +226,9 @@
ds = load('GadgetDiskGalaxy/snapshot_200.hdf5')
fn = ds.add_smoothed_particle_field(('PartType0', 'particle_ones'))
assert_equal(fn, ('deposit', 'PartType0_smoothed_particle_ones'))
- ad = ds.all_data()
- ret = ad[fn]
- assert_almost_equal(ret.sum(), 3824750.912653606)
+ dd = ds.sphere('center', (500, 'code_length'))
+ ret = dd[fn]
+ assert_almost_equal(ret.sum(), 638.5652315154682)
def test_add_gradient_fields():
global base_ds
diff -r ff361117679640a75272d55d702772fc469c0b62 -r a79a40e4934f4666a79695cc7ac6502dbd0c3119 yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -63,11 +63,11 @@
ytcfg["yt","skip_dataset_cache"] = "True"
ds_cgs = load(gslr)
- dd_cgs = ds_cgs.sphere("c", (100., "kpc"))
+ dd_cgs = ds_cgs.sphere("c", (15., "kpc"))
for us in test_units:
ds = load(gslr, unit_system=us)
- dd = ds.sphere("c", (100.,"kpc"))
+ dd = ds.sphere("c", (15.,"kpc"))
for field in test_fields:
if us == "code":
# For this dataset code units are cgs
https://bitbucket.org/yt_analysis/yt/commits/e727e7e77f1a/
Changeset: e727e7e77f1a
Branch: stable
User: ngoldbaum
Date: 2016-10-31 19:06:41+00:00
Summary: [tipsy] Correct auxiliary file footer skip offset to account for read in data
Affected #: 1 file
diff -r a79a40e4934f4666a79695cc7ac6502dbd0c3119 -r e727e7e77f1a98e49a43bf2d47d998cbab51af40 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -165,7 +165,7 @@
aux_fh[afield].seek(0, os.SEEK_SET)
sh = aux_fields_offsets[afield][ptype][0] + total
sf = aux_fields_offsets[afield][ptype][1] + \
- tp[ptype] - count
+ tp[ptype] - count - total
if tp[ptype] > 0:
aux = np.genfromtxt(
aux_fh[afield], skip_header=sh,
https://bitbucket.org/yt_analysis/yt/commits/79d4086d25be/
Changeset: 79d4086d25be
Branch: stable
User: ngoldbaum
Date: 2016-11-04 15:24:11+00:00
Summary: clarify the use of cosmology_parameters in the loading tipsy data docs
Affected #: 1 file
diff -r e727e7e77f1a98e49a43bf2d47d998cbab51af40 -r 79d4086d25bed3529a193abec8d7cdaa3d04399c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1631,7 +1631,9 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Cosmological parameters can be specified to Tipsy to enable computation of
-default units. The parameters recognized are of this form:
+default units. For example do the following, to load a Tipsy dataset whose
+path is stored in the variable ``my_filename`` with specified cosmology
+parameters:
.. code-block:: python
@@ -1640,14 +1642,21 @@
'omega_matter': 0.272,
'hubble_constant': 0.702}
-If you wish to set the default units directly, you can do so by using the
+ ds = yt.load(my_filename,
+ cosmology_parameters=cosmology_parameters)
+
+If you wish to set the unit system directly, you can do so by using the
``unit_base`` keyword in the load statement.
.. code-block:: python
import yt
+
ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+See the documentation for the
+:class:`~yt.frontends.tipsy.data_structures.TipsyDataset` class for more
+information.
Loading Cosmological Simulations
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
https://bitbucket.org/yt_analysis/yt/commits/db526d981cfc/
Changeset: db526d981cfc
Branch: stable
User: ngoldbaum
Date: 2016-11-07 20:04:29+00:00
Summary: [bugfix] explicitly use integer division in ramses time calculation
Affected #: 1 file
diff -r 79d4086d25bed3529a193abec8d7cdaa3d04399c -r db526d981cfcbd03afde648556c197aabf1cc9af yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -654,7 +654,7 @@
age = self.parameters['time']
iage = 1 + int(10.*age/self.dtau)
- iage = np.min([iage,self.n_frw/2 + (iage - self.n_frw/2)/10])
+ iage = np.min([iage,self.n_frw//2 + (iage - self.n_frw//2)//10])
self.time_simu = self.t_frw[iage ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
self.t_frw[iage-1]*(age-self.tau_frw[iage ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
https://bitbucket.org/yt_analysis/yt/commits/fbc9e7a7d2ef/
Changeset: fbc9e7a7d2ef
Branch: stable
User: ngoldbaum
Date: 2016-11-08 15:02:01+00:00
Summary: [bugfix] Explicitly use integer division in octree indexing
Affected #: 1 file
diff -r db526d981cfcbd03afde648556c197aabf1cc9af -r fbc9e7a7d2efedb4c3453970c8a39e809f7bbdc7 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -92,7 +92,7 @@
def _reshape_vals(self, arr):
nz = self.nz
if len(arr.shape) <= 2:
- n_oct = arr.shape[0] / (nz**3)
+ n_oct = arr.shape[0] // (nz**3)
else:
n_oct = max(arr.shape)
if arr.size == nz*nz*nz*n_oct:
https://bitbucket.org/yt_analysis/yt/commits/25dfb46a51b7/
Changeset: 25dfb46a51b7
Branch: stable
User: ngoldbaum
Date: 2016-11-09 16:18:04+00:00
Summary: [bugfix] use integer division in gadget IO routines
Affected #: 1 file
diff -r fbc9e7a7d2efedb4c3453970c8a39e809f7bbdc7 -r 25dfb46a51b766cee3ce00e06e8a0db7361b1c64 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -135,7 +135,7 @@
arr = np.fromfile(f, dtype=dt, count = count)
if name in self._vector_fields:
factor = self._vector_fields[name]
- arr = arr.reshape((count/factor, factor), order="C")
+ arr = arr.reshape((count//factor, factor), order="C")
return arr.astype("float64")
def _initialize_index(self, data_file, regions):
https://bitbucket.org/yt_analysis/yt/commits/232c57f55951/
Changeset: 232c57f55951
Branch: stable
User: ngoldbaum
Date: 2016-11-12 16:37:22+00:00
Summary: [bugfix] fix rich display of ProfilePlot in the jupyter notebook
Affected #: 2 files
diff -r 25dfb46a51b766cee3ce00e06e8a0db7361b1c64 -r 232c57f5595122fcad91f1a0f46c91d597ec8190 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -118,6 +118,14 @@
if font_color is not None:
label.set_color(self.font_color)
+ def _repr_png_(self):
+ from ._mpl_imports import FigureCanvasAgg
+ canvas = FigureCanvasAgg(self.figure)
+ f = BytesIO()
+ with matplotlib_style_context():
+ canvas.print_figure(f)
+ f.seek(0)
+ return f.read()
class ImagePlotMPL(PlotMPL):
"""A base class for yt plots made using imshow
@@ -165,15 +173,6 @@
for which in ['major', 'minor']:
self.cax.tick_params(which=which, axis='y', direction='in')
- def _repr_png_(self):
- from ._mpl_imports import FigureCanvasAgg
- canvas = FigureCanvasAgg(self.figure)
- f = BytesIO()
- with matplotlib_style_context():
- canvas.print_figure(f)
- f.seek(0)
- return f.read()
-
def _get_best_layout(self):
# Ensure the figure size along the long axis is always equal to _figure_size
diff -r 25dfb46a51b766cee3ce00e06e8a0db7361b1c64 -r 232c57f5595122fcad91f1a0f46c91d597ec8190 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -326,20 +326,16 @@
def _repr_html_(self):
"""Return an html representation of the plot object. Will display as a
png for each WindowPlotMPL instance in self.plots"""
- from . import _mpl_imports as mpl
ret = ''
- unique = set(self.figures.values())
- if len(unique) < len(self.figures):
+ unique = set(self.plots.values())
+ if len(unique) < len(self.plots):
iters = izip(range(len(unique)), sorted(unique))
else:
- iters = iteritems(self.figures)
- for uid, fig in iters:
- canvas = mpl.FigureCanvasAgg(fig)
- f = BytesIO()
+ iters = iteritems(self.plots)
+ for uid, plot in iters:
with matplotlib_style_context():
- canvas.print_figure(f)
- f.seek(0)
- img = base64.b64encode(f.read()).decode()
+ img = plot._repr_png_()
+ img = base64.b64encode(img).decode()
ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
r'src="data:image/png;base64,{0}"><br>'.format(img)
return ret
https://bitbucket.org/yt_analysis/yt/commits/6632c3319fa8/
Changeset: 6632c3319fa8
Branch: stable
User: jwise77
Date: 2016-11-20 13:45:19+00:00
Summary: Updating eps_writer to handle PlotWindow that have the plots as a dict
instead of a list. Also fixing y-axis labeling after the reordering
of plots in a previous changeset.
Affected #: 1 file
diff -r 232c57f5595122fcad91f1a0f46c91d597ec8190 -r 6632c3319fa8b743fa83ba1abbdf6087d71afbbd yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -1110,9 +1110,9 @@
else:
this_plot = yt_plots
if j == nrow-1:
+ xaxis = 0
+ elif j == 0:
xaxis = 1
- elif j == 0:
- xaxis = 0
else:
xaxis = -1
if i == 0:
@@ -1177,6 +1177,10 @@
for i in range(ncol):
xpos0 = i*(figsize[0] + margins[0])
index = j*ncol + i
+ if isinstance(yt_plots, list):
+ this_plot = yt_plots[index]
+ else:
+ this_plot = yt_plots
if (not _yt and colorbars is not None) or (_yt and not yt_nocbar):
if cb_flags is not None:
if not cb_flags[index]:
@@ -1228,7 +1232,7 @@
if fields[index] is None:
fields[index] = d.return_field(yt_plots[index])
- d.colorbar_yt(yt_plots[index],
+ d.colorbar_yt(this_plot,
field=fields[index],
pos=[xpos,ypos],
shrink=shrink_cb,
https://bitbucket.org/yt_analysis/yt/commits/e118799b9fdc/
Changeset: e118799b9fdc
Branch: stable
User: brittonsmith
Date: 2016-11-22 21:35:57+00:00
Summary: Backporting PR #2442 https://bitbucket.org/yt_analysis/yt/pull-requests/2442
Affected #: 2 files
diff -r 6632c3319fa8b743fa83ba1abbdf6087d71afbbd -r e118799b9fdcb0c024d015227326e129732a088d yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -149,28 +149,47 @@
function = _nuclei_density,
particle_type = particle_type,
units = unit_system["number_density"])
- if len(elements) == 0:
- for element in ["H", "He"]:
- registry.add_field((ftype, "%s_nuclei_density" % element),
- function = _default_nuclei_density,
- particle_type = particle_type,
- units = unit_system["number_density"])
+
+ for element in ["H", "He"]:
+ if element in elements:
+ continue
+ registry.add_field((ftype, "%s_nuclei_density" % element),
+ function = _default_nuclei_density,
+ particle_type = particle_type,
+ units = unit_system["number_density"])
def _default_nuclei_density(field, data):
+ ftype = field.name[0]
element = field.name[1][:field.name[1].find("_")]
- return data["gas", "density"] * _primordial_mass_fraction[element] / \
+ return data[ftype, "density"] * _primordial_mass_fraction[element] / \
ChemicalFormula(element).weight / amu_cgs
def _nuclei_density(field, data):
+ ftype = field.name[0]
element = field.name[1][:field.name[1].find("_")]
- field_data = np.zeros_like(data["gas", "%s_number_density" %
+
+ nuclei_mass_field = "%s_nuclei_mass_density" % element
+ if (ftype, nuclei_mass_field) in data.ds.field_info:
+ return data[(ftype, nuclei_mass_field)] / \
+ ChemicalFormula(element).weight / amu_cgs
+ metal_field = "%s_metallicity" % element
+ if (ftype, metal_field) in data.ds.field_info:
+ return data[ftype, "density"] * data[(ftype, metal_field)] / \
+ ChemicalFormula(element).weight / amu_cgs
+
+ field_data = np.zeros_like(data[ftype, "%s_number_density" %
data.ds.field_info.species_names[0]])
for species in data.ds.field_info.species_names:
nucleus = species
if "_" in species:
nucleus = species[:species.find("_")]
+ # num is the number of nuclei contributed by this species.
num = _get_element_multiple(nucleus, element)
- field_data += num * data["gas", "%s_number_density" % species]
+ # Since this is a loop over all species existing in this dataset,
+ # we will encounter species that contribute nothing, so we skip them.
+ if num == 0:
+ continue
+ field_data += num * data[ftype, "%s_number_density" % species]
return field_data
def _get_all_elements(species_list):
diff -r 6632c3319fa8b743fa83ba1abbdf6087d71afbbd -r e118799b9fdcb0c024d015227326e129732a088d yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -14,15 +14,21 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from yt.fields.field_info_container import \
+ FieldInfoContainer
from yt.fields.particle_fields import \
add_volume_weighted_smoothed_field
from yt.fields.species_fields import \
- add_species_field_by_density
+ add_species_field_by_density, \
+ setup_species_fields
from yt.frontends.gadget.fields import \
GadgetFieldInfo
from yt.frontends.sph.fields import \
SPHFieldInfo
+metal_elements = ["He", "C", "N", "O", "Ne",
+ "Mg", "Si", "S", "Ca", "Fe"]
+
class GizmoFieldInfo(GadgetFieldInfo):
known_particle_fields = (
("Mass", ("code_mass", ["particle_mass"], None)),
@@ -58,8 +64,14 @@
def __init__(self, *args, **kwargs):
super(SPHFieldInfo, self).__init__(*args, **kwargs)
if ("PartType0", "Metallicity_00") in self.field_list:
- self.nuclei_names = ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
- "Ca", "Fe"]
+ self.nuclei_names = metal_elements
+ self.species_names = ["H", "H_p1"] + metal_elements
+
+ def setup_particle_fields(self, ptype):
+ FieldInfoContainer.setup_particle_fields(self, ptype)
+ if ptype in ("PartType0",):
+ self.setup_gas_particle_fields(ptype)
+ setup_species_fields(self, ptype)
def setup_gas_particle_fields(self, ptype):
super(GizmoFieldInfo, self).setup_gas_particle_fields(ptype)
https://bitbucket.org/yt_analysis/yt/commits/c8ba3da50c15/
Changeset: c8ba3da50c15
Branch: stable
User: ngoldbaum
Date: 2016-11-29 21:19:46+00:00
Summary: Backporting PR #2448 https://bitbucket.org/yt_analysis/yt/pull-requests/2448
Affected #: 4 files
diff -r e118799b9fdcb0c024d015227326e129732a088d -r c8ba3da50c151122288694442cd29f43ef73ad6f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
local_owls_000:
- yt/frontends/owls/tests/test_outputs.py
- local_pw_010:
+ local_pw_011:
- yt/visualization/tests/test_plotwindow.py:test_attributes
- yt/visualization/tests/test_plotwindow.py:test_attributes_wt
- yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -64,7 +64,7 @@
local_ramses_000:
- yt/frontends/ramses/tests/test_outputs.py
- local_ytdata_000:
+ local_ytdata_001:
- yt/frontends/ytdata
local_absorption_spectrum_005:
diff -r e118799b9fdcb0c024d015227326e129732a088d -r c8ba3da50c151122288694442cd29f43ef73ad6f yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -276,7 +276,13 @@
def _get_bins(self, mi, ma, n, take_log):
if take_log:
- return np.logspace(np.log10(mi), np.log10(ma), n+1)
+ ret = np.logspace(np.log10(mi), np.log10(ma), n+1)
+ # at this point ret[0] and ret[-1] are not exactly equal to
+ # mi and ma due to round-off error. Let's force them to be
+ # mi and ma exactly to avoid incorrectly discarding cells near
+ # the edges. See Issue #1300.
+ ret[0], ret[-1] = mi, ma
+ return ret
else:
return np.linspace(mi, ma, n+1)
@@ -994,6 +1000,11 @@
if extrema is None:
ex = [data_source.quantities["Extrema"](f, non_zero=l)
for f, l in zip(bin_fields, logs)]
+ # pad extrema by epsilon so cells at bin edges are not excluded
+ for i, (mi, ma) in enumerate(ex):
+ mi = mi - np.spacing(mi)
+ ma = ma + np.spacing(ma)
+ ex[i][0], ex[i][1] = mi, ma
else:
ex = []
for bin_field in bin_fields:
diff -r e118799b9fdcb0c024d015227326e129732a088d -r c8ba3da50c151122288694442cd29f43ef73ad6f yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -719,7 +719,7 @@
assert_true(hasattr(out, 'units'))
assert_true(not hasattr(ret, 'units'))
elif ufunc in (np.absolute, np.fabs, np.conjugate, np.floor, np.ceil,
- np.trunc, np.negative):
+ np.trunc, np.negative, np.spacing):
ret = ufunc(a, out=out)
assert_array_equal(ret, out)
diff -r e118799b9fdcb0c024d015227326e129732a088d -r c8ba3da50c151122288694442cd29f43ef73ad6f yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -27,7 +27,7 @@
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
- modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs
+ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
@@ -205,7 +205,7 @@
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
- signbit, floor, ceil, trunc, modf, frexp, fabs
+ signbit, floor, ceil, trunc, modf, frexp, fabs, spacing
)
binary_operators = (
@@ -365,6 +365,7 @@
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
+ spacing: passthrough_unit,
}
__array_priority__ = 2.0
https://bitbucket.org/yt_analysis/yt/commits/38a29ca615ae/
Changeset: 38a29ca615ae
Branch: stable
User: ngoldbaum
Date: 2016-11-29 22:26:46+00:00
Summary: Interact with mercurial repositories using hglib context manager.
This ensures we do not leave orphan mercurial processes running after
interacting with a repository.
Affected #: 2 files
diff -r c8ba3da50c151122288694442cd29f43ef73ad6f -r 38a29ca615aec7db782cc5ccca4af8939659a0a7 doc/helper_scripts/generate_doap.py
--- a/doc/helper_scripts/generate_doap.py
+++ b/doc/helper_scripts/generate_doap.py
@@ -75,47 +75,47 @@
lastname_sort = lambda a: a.rsplit(None, 1)[-1]
def get_release_tags():
- c = hglib.open(yt_path)
- releases = {}
- for name, rev, node, islocal in c.tags():
- if name.startswith("yt-"):
- releases[name] = node
- rr = []
- for name, node in sorted(releases.items()):
- date = c.log(node)[-1][-1]
- rr.append((date, name[3:]))
+ with hglib.open(yt_path) as c:
+ releases = {}
+ for name, rev, node, islocal in c.tags():
+ if name.startswith("yt-"):
+ releases[name] = node
+ rr = []
+ for name, node in sorted(releases.items()):
+ date = c.log(node)[-1][-1]
+ rr.append((date, name[3:]))
rr.sort()
return [(_[1], _[0].strftime("%Y-%M-%d")) for _ in rr]
def developer_names():
cmd = hglib.util.cmdbuilder("churn", "-c")
- c = hglib.open(yt_path)
- emails = set([])
- for dev in c.rawcommand(cmd).split("\n"):
- if len(dev.strip()) == 0: continue
- emails.add(dev.rsplit(None, 2)[0])
- print("Generating real names for {0} emails".format(len(emails)))
- names = set([])
- for email in sorted(emails):
- if email in name_ignores:
- continue
- if email in name_mappings:
- names.add(name_mappings[email])
- continue
- cset = c.log(revrange="last(author('%s'))" % email)
- if len(cset) == 0:
- print("Error finding {0}".format(email))
- realname = email
- else:
- realname, addr = parseaddr(cset[0][4])
- if realname == '':
- realname = email
- if realname in name_mappings:
- names.add(name_mappings[realname])
- continue
- realname = realname.decode('utf-8')
- realname = realname.encode('ascii', 'xmlcharrefreplace')
- names.add(realname)
+ with hglib.open(yt_path) as c:
+ emails = set([])
+ for dev in c.rawcommand(cmd).split("\n"):
+ if len(dev.strip()) == 0: continue
+ emails.add(dev.rsplit(None, 2)[0])
+ print("Generating real names for {0} emails".format(len(emails)))
+ names = set([])
+ for email in sorted(emails):
+ if email in name_ignores:
+ continue
+ if email in name_mappings:
+ names.add(name_mappings[email])
+ continue
+ cset = c.log(revrange="last(author('%s'))" % email)
+ if len(cset) == 0:
+ print("Error finding {0}".format(email))
+ realname = email
+ else:
+ realname, addr = parseaddr(cset[0][4])
+ if realname == '':
+ realname = email
+ if realname in name_mappings:
+ names.add(name_mappings[realname])
+ continue
+ realname = realname.decode('utf-8')
+ realname = realname.encode('ascii', 'xmlcharrefreplace')
+ names.add(realname)
#with open("devs.txt", "w") as f:
# for name in sorted(names, key=lastname_sort):
# f.write("%s\n" % name)
diff -r c8ba3da50c151122288694442cd29f43ef73ad6f -r 38a29ca615aec7db782cc5ccca4af8939659a0a7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -519,33 +519,34 @@
print("Try: pip install python-hglib")
return -1
f = open(os.path.join(path, "yt_updater.log"), "a")
- repo = hglib.open(path)
- repo.pull()
- ident = repo.identify().decode("utf-8")
- if "+" in ident:
- print("Can't rebuild modules by myself.")
- print("You will have to do this yourself. Here's a sample commands:")
- print("")
- print(" $ cd %s" % (path))
- print(" $ hg up")
- print(" $ %s setup.py develop" % (sys.executable))
- return 1
- print("Updating the repository")
- f.write("Updating the repository\n\n")
- repo.update(check=True)
- f.write("Updated from %s to %s\n\n" % (ident, repo.identify()))
- if skip_rebuild: return
- f.write("Rebuilding modules\n\n")
- p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
- stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
- stdout, stderr = p.communicate()
- f.write(stdout.decode('utf-8'))
- f.write("\n\n")
- if p.returncode:
- print("BROKEN: See %s" % (os.path.join(path, "yt_updater.log")))
- sys.exit(1)
- f.write("Successful!\n")
- print("Updated successfully.")
+ with hglib.open(path) as repo:
+ repo.pull()
+ ident = repo.identify().decode("utf-8")
+ if "+" in ident:
+ print("Can't rebuild modules by myself.")
+ print("You will have to do this yourself. Here's a sample commands:")
+ print("")
+ print(" $ cd %s" % (path))
+ print(" $ hg up")
+ print(" $ %s setup.py develop" % (sys.executable))
+ return 1
+ print("Updating the repository")
+ f.write("Updating the repository\n\n")
+ repo.update(check=True)
+ f.write("Updated from %s to %s\n\n" % (ident, repo.identify()))
+ if skip_rebuild: return
+ f.write("Rebuilding modules\n\n")
+ p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"],
+ cwd=path, stdout = subprocess.PIPE,
+ stderr = subprocess.STDOUT)
+ stdout, stderr = p.communicate()
+ f.write(stdout.decode('utf-8'))
+ f.write("\n\n")
+ if p.returncode:
+ print("BROKEN: See %s" % (os.path.join(path, "yt_updater.log")))
+ sys.exit(1)
+ f.write("Successful!\n")
+ print("Updated successfully.")
def get_hg_version(path):
try:
@@ -556,8 +557,8 @@
print("Try: pip install python-hglib")
return None
try:
- repo = hglib.open(path)
- return repo.identify()
+ with hglib.open(path) as repo:
+ return repo.identify()
except hglib.error.ServerError:
# path is not an hg repository
return None
https://bitbucket.org/yt_analysis/yt/commits/b6e7715131d0/
Changeset: b6e7715131d0
Branch: stable
User: chummels
Date: 2016-11-30 01:04:33+00:00
Summary: Adding check for negative column densities in absorption spectrum with warning.
Affected #: 1 file
diff -r 38a29ca615aec7db782cc5ccca4af8939659a0a7 -r b6e7715131d0e17ecc8ba213395ded9a469e54b8 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -390,6 +390,9 @@
# and deposit the lines into the spectrum
for line in parallel_objects(self.line_list, njobs=njobs):
column_density = field_data[line['field_name']] * field_data['dl']
+ if (column_density < 0).any():
+ mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name'])
+ np.clip(column_density, 0, np.inf, out=column_density)
if (column_density == 0).all():
mylog.info("Not adding line %s: insufficient column density" % line['label'])
continue
https://bitbucket.org/yt_analysis/yt/commits/feaf342157ed/
Changeset: feaf342157ed
Branch: stable
User: MatthewTurk
Date: 2016-11-30 22:39:34+00:00
Summary: Fixing x/y axis confusion in the geographic handler
Affected #: 1 file
diff -r b6e7715131d0e17ecc8ba213395ded9a469e54b8 -r feaf342157edd11b20c1a9ddd01f51afb7831f76 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -309,8 +309,8 @@
axis, width, depth)
elif name == self.radial_axis:
rax = self.radial_axis
- width = [self.ds.domain_width[self.y_axis[rax]],
- self.ds.domain_width[self.x_axis[rax]]]
+ width = [self.ds.domain_width[self.x_axis[rax]],
+ self.ds.domain_width[self.y_axis[rax]]]
elif name == 'latitude':
ri = self.axis_id[self.radial_axis]
# Remember, in spherical coordinates when we cut in theta,
https://bitbucket.org/yt_analysis/yt/commits/ba95f0fc8cfc/
Changeset: ba95f0fc8cfc
Branch: stable
User: chummels
Date: 2016-12-04 06:54:39+00:00
Summary: Enabling gadget frontend to alias temperature field to Temperature field as is done in gizmo
Affected #: 2 files
diff -r feaf342157edd11b20c1a9ddd01f51afb7831f76 -r ba95f0fc8cfc0a2bfba7bfff047d1ef09408960c yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -93,6 +93,7 @@
function=_temperature,
particle_type=True,
units=self.ds.unit_system["temperature"])
+ self.alias((ptype, 'temperature'), (ptype, 'Temperature'))
# For now, we hardcode num_neighbors. We should make this configurable
# in the future.
diff -r feaf342157edd11b20c1a9ddd01f51afb7831f76 -r ba95f0fc8cfc0a2bfba7bfff047d1ef09408960c yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -75,7 +75,6 @@
def setup_gas_particle_fields(self, ptype):
super(GizmoFieldInfo, self).setup_gas_particle_fields(ptype)
- self.alias((ptype, "temperature"), (ptype, "Temperature"))
def _h_density(field, data):
x_H = 1.0 - data[(ptype, "He_metallicity")] - \
https://bitbucket.org/yt_analysis/yt/commits/1ef1e9baf980/
Changeset: 1ef1e9baf980
Branch: stable
User: ngoldbaum
Date: 2016-12-05 20:13:24+00:00
Summary: [bugfix] ensure message printed out when openmp isn't available is a string
Affected #: 1 file
diff -r ba95f0fc8cfc0a2bfba7bfff047d1ef09408960c -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -45,7 +45,7 @@
if exit_code != 0:
print("Compilation of OpenMP test code failed with the error: ")
- print(err)
+ print(err.decode('utf8'))
print("Disabling OpenMP support. ")
# Clean up
https://bitbucket.org/yt_analysis/yt/commits/15e59ce7e073/
Changeset: 15e59ce7e073
Branch: stable
User: brittonsmith
Date: 2016-12-08 02:22:45+00:00
Summary: Backporting PR #2460 https://bitbucket.org/yt_analysis/yt/pull-requests/2460
Affected #: 5 files
diff -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -673,6 +673,25 @@
ds["hubble_constant"] = \
ds["hubble_constant"].to("100*km/(Mpc*s)").d
extra_attrs = {"data_type": "yt_light_ray"}
+
+ # save the light ray solution
+ if len(self.light_ray_solution) > 0:
+ # Convert everything to base unit system now to avoid
+ # problems with different units for each ds.
+ for s in self.light_ray_solution:
+ for f in s:
+ if isinstance(s[f], YTArray):
+ s[f].convert_to_base()
+ for key in self.light_ray_solution[0]:
+ if key in ["next", "previous"]:
+ continue
+ lrsa = [sol[key] for sol in self.light_ray_solution]
+ if isinstance(lrsa[-1], YTArray):
+ to_arr = YTArray
+ else:
+ to_arr = np.array
+ extra_attrs["light_ray_solution_%s" % key] = to_arr(lrsa)
+
field_types = dict([(field, "grid") for field in data.keys()])
# Only return LightRay elements with non-zero density
diff -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -12,7 +12,10 @@
import numpy as np
+from yt.convenience import \
+ load
from yt.testing import \
+ assert_array_equal, \
requires_file
from yt.analysis_modules.cosmological_observation.api import LightRay
import os
@@ -23,6 +26,19 @@
COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+def compare_light_ray_solutions(lr1, lr2):
+ assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)
+ if len(lr1.light_ray_solution) == 0:
+ return
+ for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):
+ for field in s1:
+ if field in ["next", "previous"]:
+ continue
+ if isinstance(s1[field], np.ndarray):
+ assert_array_equal(s1[field], s2[field])
+ else:
+ assert s1[field] == s2[field]
+
@requires_file(COSMO_PLUS)
def test_light_ray_cosmo():
"""
@@ -39,6 +55,9 @@
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
+ ds = load('lightray.h5')
+ compare_light_ray_solutions(lr, ds)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@@ -62,6 +81,9 @@
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
+ ds = load('lightray.h5')
+ compare_light_ray_solutions(lr, ds)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@@ -82,6 +104,9 @@
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
+ ds = load('lightray.h5')
+ compare_light_ray_solutions(lr, ds)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@@ -105,6 +130,9 @@
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
+ ds = load('lightray.h5')
+ compare_light_ray_solutions(lr, ds)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@@ -130,6 +158,9 @@
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
+ ds = load('lightray.h5')
+ compare_light_ray_solutions(lr, ds)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
diff -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -222,13 +222,58 @@
cont_type = parse_h5_attr(f, "container_type")
if data_type is None:
return False
- if data_type in ["yt_light_ray"]:
- return True
if data_type == "yt_data_container" and \
cont_type not in _grid_data_containers:
return True
return False
+class YTDataLightRayDataset(YTDataContainerDataset):
+ """Dataset for saved LightRay objects."""
+
+ def _parse_parameter_file(self):
+ super(YTDataLightRayDataset, self)._parse_parameter_file()
+ self._restore_light_ray_solution()
+
+ def _restore_light_ray_solution(self):
+ """
+ Restore all information asssociate with the light ray solution
+ to its original form.
+ """
+ key = "light_ray_solution"
+ self.light_ray_solution = []
+ lrs_fields = [par for par in self.parameters \
+ if key in par and not par.endswith("_units")]
+ if len(lrs_fields) == 0:
+ return
+ self.light_ray_solution = \
+ [{} for val in self.parameters[lrs_fields[0]]]
+ for sp3 in ["unique_identifier", "filename"]:
+ ksp3 = "%s_%s" % (key, sp3)
+ if ksp3 not in lrs_fields:
+ continue
+ self.parameters[ksp3] = self.parameters[ksp3].astype(str)
+ for field in lrs_fields:
+ field_name = field[len(key)+1:]
+ for i in range(self.parameters[field].shape[0]):
+ self.light_ray_solution[i][field_name] = self.parameters[field][i]
+ if "%s_units" % field in self.parameters:
+ if len(self.parameters[field].shape) > 1:
+ to_val = self.arr
+ else:
+ to_val = self.quan
+ self.light_ray_solution[i][field_name] = \
+ to_val(self.light_ray_solution[i][field_name],
+ self.parameters["%s_units" % field])
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if not args[0].endswith(".h5"): return False
+ with h5py.File(args[0], "r") as f:
+ data_type = parse_h5_attr(f, "data_type")
+ if data_type in ["yt_light_ray"]:
+ return True
+ return False
+
class YTSpatialPlotDataset(YTDataContainerDataset):
"""Dataset for saved slices and projections."""
_field_info_class = YTGridFieldInfo
diff -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -229,5 +229,5 @@
if iterable(val):
val = np.array(val)
if val.dtype.kind == 'U':
- val = val.astype('|S40')
+ val = val.astype('|S')
fh.attrs[str(attr)] = val
diff -r 1ef1e9baf9809836e45b8138caf544c8cd00bd3c -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2278,8 +2278,8 @@
for ray_ds in self.ray.light_ray_solution:
if ray_ds['unique_identifier'] == plot.ds.unique_identifier:
- start_coord = ray_ds['start']
- end_coord = ray_ds['end']
+ start_coord = plot.ds.arr(ray_ds['start'])
+ end_coord = plot.ds.arr(ray_ds['end'])
return (start_coord, end_coord)
# if no intersection between the plotted dataset and the LightRay
# return a false tuple to pass to start_coord
@@ -2309,9 +2309,11 @@
# if possible, break periodic ray into non-periodic
# segments and add each of them individually
if any(plot.ds.periodicity):
- segments = periodic_ray(start_coord, end_coord,
- left=plot.ds.domain_left_edge,
- right=plot.ds.domain_right_edge)
+ segments = periodic_ray(
+ start_coord.to("code_length"),
+ end_coord.to("code_length"),
+ left=plot.ds.domain_left_edge.to("code_length"),
+ right=plot.ds.domain_right_edge.to("code_length"))
else:
segments = [[start_coord, end_coord]]
https://bitbucket.org/yt_analysis/yt/commits/dfa57e828a2f/
Changeset: dfa57e828a2f
Branch: stable
User: Yurlungur
Date: 2016-12-06 17:28:11+00:00
Summary: Backporting PR #2462 https://bitbucket.org/yt_analysis/yt/pull-requests/2462
Affected #: 1 file
diff -r 15e59ce7e073b816dee3c516437ffb25ccba8d84 -r dfa57e828a2febf731f810eec179897a9a4eeeaf yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -410,9 +410,11 @@
path_length_unit = self.ds.field_info[path_element_name].units
path_length_unit = Unit(path_length_unit,
registry=self.ds.unit_registry)
- # Only convert to CGS for path elements that aren't angles
+ # Only convert to appropriate unit system for path
+ # elements that aren't angles
if not path_length_unit.is_dimensionless:
- path_length_unit = path_length_unit.get_cgs_equivalent()
+ path_length_unit = path_length_unit.get_base_equivalent(
+ unit_system=self.ds.unit_system)
if self.weight_field is None:
self._projected_units[field] = field_unit*path_length_unit
else:
https://bitbucket.org/yt_analysis/yt/commits/4078c04b520a/
Changeset: 4078c04b520a
Branch: stable
User: chummels
Date: 2016-12-11 02:45:25+00:00
Summary: Fixing a bad backport merge.
Affected #: 1 file
diff -r dfa57e828a2febf731f810eec179897a9a4eeeaf -r 4078c04b520adf23a830d5bc6d9ad8edf023d488 yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -299,15 +299,15 @@
cdef np.int64_t ray_patch_intersect(const void* primitives,
const np.int64_t item,
Ray* ray) nogil:
-'''
+ '''
-This returns an integer flag that indicates whether the given patch is the
-closest hit for the ray so far. If it is, the ray is updated to store the
-current primitive index and the distance to the first hit. The patch used
-is the one indexed by "item" in the array of primitives.
+ This returns an integer flag that indicates whether the given patch is the
+ closest hit for the ray so far. If it is, the ray is updated to store the
+ current primitive index and the distance to the first hit. The patch used
+ is the one indexed by "item" in the array of primitives.
-'''
+ '''
cdef Patch patch = (<Patch*> primitives)[item]
cdef RayHitData hd = compute_patch_hit(patch.v, ray.origin, ray.direction)
@@ -331,13 +331,13 @@
cdef void patch_centroid(const void *primitives,
const np.int64_t item,
np.float64_t[3] centroid) nogil:
-'''
+ '''
-This computes the centroid of the input patch. The patch used
-is the one indexed by "item" in the array of primitives. The result
-will be stored in the numpy array passed in as "centroid".
+ This computes the centroid of the input patch. The patch used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the numpy array passed in as "centroid".
-'''
+ '''
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
@@ -360,13 +360,13 @@
const np.int64_t item,
BBox* bbox) nogil:
-'''
+ '''
-This computes the bounding box of the input patch. The patch used
-is the one indexed by "item" in the array of primitives. The result
-will be stored in the input BBox.
+ This computes the bounding box of the input patch. The patch used
+ is the one indexed by "item" in the array of primitives. The result
+ will be stored in the input BBox.
-'''
+ '''
cdef np.int64_t i, j
cdef Patch patch = (<Patch*> primitives)[item]
https://bitbucket.org/yt_analysis/yt/commits/9a4c748bef78/
Changeset: 9a4c748bef78
Branch: stable
User: chummels
Date: 2016-12-11 07:49:16+00:00
Summary: Assuring a number-only unique-id hash is still interpreted as a string in annotate_ray.
Affected #: 2 files
diff -r 4078c04b520adf23a830d5bc6d9ad8edf023d488 -r 9a4c748bef788d196b5a17058ecbc4143eb17d88 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -683,7 +683,7 @@
if isinstance(s[f], YTArray):
s[f].convert_to_base()
for key in self.light_ray_solution[0]:
- if key in ["next", "previous"]:
+ if key in ["next", "previous", "index"]:
continue
lrsa = [sol[key] for sol in self.light_ray_solution]
if isinstance(lrsa[-1], YTArray):
diff -r 4078c04b520adf23a830d5bc6d9ad8edf023d488 -r 9a4c748bef788d196b5a17058ecbc4143eb17d88 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2277,7 +2277,7 @@
"""
for ray_ds in self.ray.light_ray_solution:
- if ray_ds['unique_identifier'] == plot.ds.unique_identifier:
+ if ray_ds['unique_identifier'] == str(plot.ds.unique_identifier):
start_coord = plot.ds.arr(ray_ds['start'])
end_coord = plot.ds.arr(ray_ds['end'])
return (start_coord, end_coord)
https://bitbucket.org/yt_analysis/yt/commits/f759840542e8/
Changeset: f759840542e8
Branch: stable
User: chummels
Date: 2016-12-10 22:21:31+00:00
Summary: Make install script automatically update to the tip of the appropriate branch when installing from source.
Affected #: 1 file
diff -r 9a4c748bef788d196b5a17058ecbc4143eb17d88 -r f759840542e82bb13a08c7c0d2bd0cdc917ce128 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1505,7 +1505,8 @@
else
echo "Building yt from source"
YT_DIR="${DEST_DIR}/src/yt-hg"
- log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+ log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+ log_cmd ${DEST_DIR}/bin/hg -R ${YT_DIR} up -C ${BRANCH}
if [ $INST_EMBREE -eq 1 ]
then
echo $DEST_DIR > ${YT_DIR}/embree.cfg
https://bitbucket.org/yt_analysis/yt/commits/199d7c39f768/
Changeset: 199d7c39f768
Branch: stable
User: chummels
Date: 2016-12-10 22:31:55+00:00
Summary: Updating installation docs to indicate only one install_script.
Affected #: 1 file
diff -r f759840542e82bb13a08c7c0d2bd0cdc917ce128 -r 199d7c39f7682f018779fe3309cf24cb6c1ce39e doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -95,22 +95,17 @@
Running the Install Script
^^^^^^^^^^^^^^^^^^^^^^^^^^
-To get the installation script for the ``stable`` branch of the code,
-download it using the following command:
+You can download the installation script with the following command:
.. code-block:: bash
- $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+ $ wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
If you do not have ``wget``, the following should also work:
.. code-block:: bash
- $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
-
-If you wish to install a different version of yt (see :ref:`branches-of-yt`),
-replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
-the path above to get the correct install script.
+ $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
By default, the bash install script will create a python environment based on
the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
https://bitbucket.org/yt_analysis/yt/commits/3ffb80c35dc2/
Changeset: 3ffb80c35dc2
Branch: stable
User: chummels
Date: 2016-12-11 17:08:21+00:00
Summary: Assuring code obeys flake8 requirements.
Affected #: 1 file
diff -r 199d7c39f7682f018779fe3309cf24cb6c1ce39e -r 3ffb80c35dc29a71999a605d5c0f4cadc0a22892 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -23,7 +23,6 @@
import matplotlib
import numpy as np
-from io import BytesIO
from .base_plot_types import \
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list