[Yt-svn] commit/yt: 9 new changesets
Bitbucket
commits-noreply at bitbucket.org
Thu Oct 27 09:33:22 PDT 2011
9 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/b5d6455129b7/
changeset: b5d6455129b7
branch: yt
user: MatthewTurk
date: 2011-10-25 20:29:19
summary: Fix segfault with adaptive healpix.
affected #: 2 files
diff -r 7da720c9a9ea0d5fc76ef1e3aec1c41e343bf76d -r b5d6455129b707fff3ca936fb43048e144d80a65 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1837,7 +1837,7 @@
r2[0] = (edge[i][0] - self.center[0])**2.0
for j in range(2):
r2[1] = r2[0] + (edge[j][1] - self.center[1])**2.0
- for k in range(3):
+ for k in range(2):
r2[2] = r2[1] + (edge[k][2] - self.center[2])**2.0
max_r2 = fmax(max_r2, r2[2])
domega = 4.0 * 3.1415926 * max_r2 # Used to be / Nrays
@@ -1863,6 +1863,7 @@
# Now we make four new ones
cdef double v_dir[3]
cdef AdaptiveRayPacket *prev = ray.prev
+ cdef AdaptiveRayPacket *new_ray
# It is important to note here that brick_prev is a local variable for
# the newly created rays, not the previous ray in this brick, as that
# has already been passed on to its next brick
diff -r 7da720c9a9ea0d5fc76ef1e3aec1c41e343bf76d -r b5d6455129b707fff3ca936fb43048e144d80a65 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -701,6 +701,7 @@
pbar.update(total_cells)
pbar.finish()
info, values = ray_source.get_rays()
+ self.ray_source = ray_source
return info, values
class StereoPairCamera(Camera):
https://bitbucket.org/yt_analysis/yt/changeset/8ecda08d313f/
changeset: 8ecda08d313f
branch: yt
user: MatthewTurk
date: 2011-10-26 20:52:25
summary: Splitting the AdaptiveRaySource functions into sub-functions, refactoring the
flow a bit, trying to stop losing rays.
affected #: 2 files
diff -r b5d6455129b707fff3ca936fb43048e144d80a65 -r 8ecda08d313fa6d6d333c6630d6e9686280e6e7d yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1613,7 +1613,7 @@
AdaptiveRayPacket *next
AdaptiveRayPacket *prev
AdaptiveRayPacket *brick_next
- #int cgi
+ int pgi
cdef class AdaptiveRaySource:
cdef np.float64_t center[3]
@@ -1626,7 +1626,10 @@
cdef AdaptiveRayPacket **lpacket_pointers
def __cinit__(self, center, rays_per_cell, initial_nside,
- np.float64_t normalization, brick_list, int max_nside = 8192):
+ np.float64_t normalization, brick_list,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges,
+ int max_nside = 8192):
cdef int i
self.max_nside = max_nside
self.center[0] = center[0]
@@ -1649,6 +1652,10 @@
self.lpacket_pointers[i] = self.packet_pointers[i] = NULL
self.normalization = normalization
self.nrays = 12*initial_nside*initial_nside
+ cdef int *grid_neighbors = <int*> malloc(sizeof(int) * (nbricks+1))
+ grid_neighbors[0] = nbricks
+ for i in range(nbricks):
+ grid_neighbors[i+1] = i
for i in range(self.nrays):
# Initialize rays here
ray = <AdaptiveRayPacket *> malloc(sizeof(AdaptiveRayPacket))
@@ -1667,14 +1674,13 @@
ray.pos[1] = self.center[1]
ray.pos[2] = self.center[2]
ray.brick_next = NULL
+ ray.pgi = -1
if last != NULL:
last.next = ray
- last.brick_next = ray
else:
self.first = ray
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, 0)
last = ray
- self.packet_pointers[0] = self.first
- self.lpacket_pointers[0] = last
def __dealloc__(self):
cdef AdaptiveRayPacket *next
@@ -1684,6 +1690,7 @@
free(ray)
ray = next
free(self.packet_pointers)
+ free(self.lpacket_pointers)
def get_rays(self):
cdef AdaptiveRayPacket *ray = self.first
@@ -1706,76 +1713,146 @@
ray = ray.next
return info, values
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ cdef np.float64_t integrate_ray(self, AdaptiveRayPacket *ray,
+ PartitionedGrid pg, TransferFunctionProxy tf):
+ cdef np.float64_t self_center[3], ray_v_dir[3], ray_value[4]
+ self_center[0] = self.center[0]
+ self_center[1] = self.center[1]
+ self_center[2] = self.center[2]
+ enter_t = ray.t
+ ray_v_dir[0] = ray.v_dir[0]
+ ray_v_dir[1] = ray.v_dir[1]
+ ray_v_dir[2] = ray.v_dir[2]
+ ray_value[0] = ray.value[0]
+ ray_value[1] = ray.value[1]
+ ray_value[2] = ray.value[2]
+ ray_value[3] = ray.value[3]
+ hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t)
+ ray.value[0] = ray_value[0]
+ ray.value[1] = ray_value[1]
+ ray.value[2] = ray_value[2]
+ ray.value[3] = ray_value[3]
+ if hit == 0: dt = 0.0
+ else: dt = (ray.t - enter_t)/hit
+ for i in range(3):
+ ray.pos[i] = ray.v_dir[i] * ray.t + self.center[i]
+ return dt
+
+ cdef send_ray_home(self, AdaptiveRayPacket *ray,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges,
+ int *grid_neighbors, np.float64_t dt,
+ int skip_append = 0):
+ cdef int found_a_home = 0
+ cdef int i, j, npgi
+ cdef np.float64_t offpos[3]
+ for i in range(3):
+ offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-5*dt
+ for j in range(grid_neighbors[0]):
+ i = grid_neighbors[j+1]
+ if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
+ (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
+ (ledges[i, 2] <= offpos[2] <= redges[i, 2]) and
+ ray.pgi != i):
+ if not skip_append: self.append_to_packets(i, ray)
+ ray.pgi = i
+ npgi = i
+ found_a_home = 1
+ break
+ if found_a_home == 0:
+ #print "Non-neighboring area", ray.pgi, ray.ipix, ray.nside
+ for i in range(ledges.shape[0]):
+ if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
+ (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
+ (ledges[i, 2] <= offpos[2] <= redges[i, 2])):
+ #print "Found a home!", i, ray.ipix, ray.nside, ray.pgi
+ if not skip_append: self.append_to_packets(i, ray)
+ ray.pgi = i
+ npgi = i
+ found_a_home = 1
+ break
+ if found_a_home == 0:
+ raise RuntimeError
+
+ cdef append_to_packets(self, int pgi, AdaptiveRayPacket *ray):
+ # packet_pointers are pointers to the *first* packet in a given brick
+ # lpacket_pointers point to the *final* packet in a given brick, for
+ # easy appending.
+ if self.lpacket_pointers[pgi] == NULL or \
+ self.packet_pointers[pgi] == NULL:
+ self.packet_pointers[pgi] = \
+ self.lpacket_pointers[pgi] = ray
+ ray.brick_next = NULL
+ else:
+ self.lpacket_pointers[pgi].brick_next = ray
+ self.lpacket_pointers[pgi] = ray
+ ray.brick_next = NULL
+
@cython.boundscheck(False)
@cython.wraparound(False)
def integrate_brick(self, PartitionedGrid pg, TransferFunctionProxy tf,
int pgi, np.ndarray[np.float64_t, ndim=2] ledges,
- np.ndarray[np.float64_t, ndim=2] redges):
- cdef np.float64_t domega
- domega = self.get_domega(pg.left_edge, pg.right_edge)
+ np.ndarray[np.float64_t, ndim=2] redges,
+ pgs, int inside = -1):
+ cdef np.float64_t domega, dt
+ cdef PartitionedGrid pgn
#print "dOmega", domega, self.nrays
- cdef int count = 0
- cdef int i, j
- cdef AdaptiveRayPacket *ray = self.packet_pointers[pgi]
+ cdef intersects
+ cdef int i, j, npgi, refined
+ cdef AdaptiveRayPacket *ray2, *ray = self.packet_pointers[pgi]
cdef AdaptiveRayPacket *next
+ cdef AdaptiveRayPacket **pray
cdef int *grid_neighbors = self.find_neighbors(pgi, pg.dds[0], ledges, redges)
- cdef np.float64_t enter_t, dt, offpos[3]
+ cdef int *grid2_neighbors
+ cdef np.float64_t enter_t, offpos[3]
cdef int found_a_home, hit
- #print "Grid: ", pgi, "has", grid_neighbors[0], "neighbors"
- # Some compilers throw errors on the passing of the center, v_dir and
- # value
- cdef np.float64_t self_center[3], ray_v_dir[3], ray_value[4]
- self_center[0] = self.center[0]
- self_center[1] = self.center[1]
- self_center[2] = self.center[2]
while ray != NULL:
- # Note that we may end up splitting a ray such that it ends up
- # outside the brick! This will likely cause them to get lost.
- #print count
- count +=1
- # We don't need to check for intersection anymore, as we are the
- # Ruler of the planet Omicron Persei 8
- #if self.intersects(ray, pg):
- ray = self.refine_ray(ray, domega, pg.dds[0],
- pg.left_edge, pg.right_edge)
- enter_t = ray.t
- ray_v_dir[0] = ray.v_dir[0]
- ray_v_dir[1] = ray.v_dir[1]
- ray_v_dir[2] = ray.v_dir[2]
- ray_value[0] = ray.value[0]
- ray_value[1] = ray.value[1]
- ray_value[2] = ray.value[2]
- ray_value[3] = ray.value[3]
- hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t)
- ray.value[0] = ray_value[0]
- ray.value[1] = ray_value[1]
- ray.value[2] = ray_value[2]
- ray.value[3] = ray_value[3]
- if hit == 0: dt = 0.0
- else: dt = (ray.t - enter_t)/hit
- for i in range(3):
- ray.pos[i] = ray.v_dir[i] * ray.t + self.center[i]
- offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-5*dt
- # We set 'next' after the refinement has occurred
- next = ray.brick_next
- found_a_home = 0
- for j in range(grid_neighbors[0]):
- i = grid_neighbors[j+1]
- if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
- (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
- (ledges[i, 2] <= offpos[2] <= redges[i, 2])):
- if self.lpacket_pointers[i] == NULL:
- self.packet_pointers[i] = \
- self.lpacket_pointers[i] = ray
- ray.brick_next = NULL
- else:
- self.lpacket_pointers[i].brick_next = ray
- self.lpacket_pointers[i] = ray
- ray.brick_next = NULL
- #ray.cgi = i
- found_a_home = 1
- break
+ #print "Integrating", pgi, ray.pgi, ray.ipix, ray.nside
+ if pgi != ray.pgi:
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, dt)
+ if ray.pgi != pgi:
+ ray = ray.brick_next
+ continue
+ # We start in this brick, and then we integrate to the edge
+ dt = self.integrate_ray(ray, pg, tf)
+ # Now the ray has moved, so we grab .brick_next first, then we
+ # move it to its new home
+ self.packet_pointers[pgi] = next = ray.brick_next
+ if ray.t >= 1.0:
+ ray = next
+ continue
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, dt)
+ # We now are moving into a new PG, which we check for refinement
+ pgn = pgs[ray.pgi]
+ domega = self.get_domega(pgn.left_edge, pgn.right_edge)
+ pray = &ray
+ refined = self.refine_ray(pray, domega, pgn.dds[0],
+ pgn.left_edge, pgn.right_edge)
+ # At this point we can no longer access ray, as it is no longer
+ # safe.
+ ray2 = pray[0]
+ for i in range(refined*4):
+ self.send_ray_home(ray2, ledges, redges, grid_neighbors, dt)
+ while ray2.pgi < pgi and ray2.t <= 1.0:
+ #print "Recursing", ray2.pgi, pgi, ray2.t, ray2.nside, ray2.ipix, dt
+ pgn = pgs[ray2.pgi]
+ grid2_neighbors = self.find_neighbors(ray2.pgi, pgn.dds[0],
+ ledges, redges)
+ dt = self.integrate_ray(ray2, pgn, tf)
+ self.send_ray_home(ray2, ledges, redges, grid2_neighbors,
+ dt, 1)
+ ray2 = ray2.next
ray = next
+ # We check to see if anything has been *added* to the queue, via a
+ # send_ray_home call, here. Otherwise we might end up in the
+ # situation that the final ray is refined, thus next is NULL, but
+ # there are more rays to work on.
+ if ray == NULL and self.packet_pointers[pgi] != NULL:
+ ray = self.packet_pointers[pgi]
+ #print "Packet pointers!", ray.ipix
free(grid_neighbors)
@cython.boundscheck(False)
@@ -1796,8 +1873,9 @@
gre[0] = redges[this_grid, 0] + dds
gre[1] = redges[this_grid, 1] + dds
gre[2] = redges[this_grid, 2] + dds
- for i in range(this_grid+1, ledges.shape[0]):
+ for i in range(ledges.shape[0]):
# Check for overlap
+ if i == this_grid: continue
if ((gle[0] <= redges[i, 0] and gre[0] >= ledges[i, 0]) and
(gle[1] <= redges[i, 1] and gre[1] >= ledges[i, 1]) and
(gle[2] <= redges[i, 2] and gre[2] >= ledges[i, 2])):
@@ -1805,8 +1883,9 @@
cdef int *tr = <int *> malloc(sizeof(int) * (count + 1))
tr[0] = count
count = 0
- for i in range(this_grid+1, ledges.shape[0]):
+ for i in range(ledges.shape[0]):
# Check for overlap
+ if i == this_grid: continue
if ((gle[0] <= redges[i, 0] and gre[0] >= ledges[i, 0]) and
(gle[1] <= redges[i, 1] and gre[1] >= ledges[i, 1]) and
(gle[2] <= redges[i, 2] and gre[2] >= ledges[i, 2])):
@@ -1823,6 +1902,19 @@
if ray.pos[i] > pg.right_edge[i]: return 0
return 1
+ cdef int find_owner(self, AdaptiveRayPacket *ray,
+ int *neighbors,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges):
+ cdef int i, pgi = -1
+ for i in range(ledges.shape[0]):
+ pgi = i
+ if ((ray.pos[0] <= redges[i, 0] and ray.pos[0] >= ledges[i, 0]) and
+ (ray.pos[1] <= redges[i, 1] and ray.pos[1] >= ledges[i, 1]) and
+ (ray.pos[2] <= redges[i, 2] and ray.pos[2] >= ledges[i, 2])):
+ return pgi
+ return -1
+
cdef np.float64_t get_domega(self, np.float64_t left_edge[3],
np.float64_t right_edge[3]):
# We should calculate the subtending angle at the maximum radius of the
@@ -1846,58 +1938,51 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef AdaptiveRayPacket *refine_ray(self, AdaptiveRayPacket *ray,
- np.float64_t domega,
- np.float64_t dx,
- np.float64_t left_edge[3],
- np.float64_t right_edge[3]):
- # This should be recursive; we are not correctly applying split
- # criteria multiple times.
+ cdef int refine_ray(self, AdaptiveRayPacket **pray,
+ np.float64_t domega, np.float64_t dx,
+ np.float64_t left_edge[3],
+ np.float64_t right_edge[3]):
+ cdef AdaptiveRayPacket *ray = pray[0]
+ cdef AdaptiveRayPacket *new_rays[4]
cdef long Nrays = 12 * ray.nside * ray.nside
cdef int i, j
if domega/Nrays < dx*dx/self.rays_per_cell:
- #print ray.nside, domega/Nrays, dx, (domega/Nrays * self.rays_per_cell)/(dx*dx)
- return ray
- if ray.nside >= self.max_nside: return ray
- #print "Refining %s from %s to %s" % (ray.ipix, ray.nside, ray.nside*2)
- # Now we make four new ones
+ return 0
+ if ray.nside >= self.max_nside: return 0
cdef double v_dir[3]
+ # We need a record of the previous one because we're inserting into a
+ # linked list.
cdef AdaptiveRayPacket *prev = ray.prev
- cdef AdaptiveRayPacket *new_ray
- # It is important to note here that brick_prev is a local variable for
- # the newly created rays, not the previous ray in this brick, as that
- # has already been passed on to its next brick
- cdef AdaptiveRayPacket *brick_prev = NULL
for i in range(4):
- new_ray = <AdaptiveRayPacket *> malloc(
+ new_rays[i] = <AdaptiveRayPacket *> malloc(
sizeof(AdaptiveRayPacket))
- new_ray.nside = ray.nside * 2
- new_ray.ipix = ray.ipix * 4 + i
- new_ray.t = ray.t
- #new_ray.cgi = ray.cgi
- new_ray.prev = prev
- if new_ray.prev != NULL:
- new_ray.prev.next = new_ray
- if brick_prev != NULL:
- brick_prev.brick_next = new_ray
- prev = brick_prev = new_ray
+ new_rays[i].nside = ray.nside * 2
+ new_rays[i].ipix = ray.ipix * 4 + i
+ new_rays[i].t = ray.t
healpix_interface.pix2vec_nest(
- new_ray.nside, new_ray.ipix, v_dir)
+ new_rays[i].nside, new_rays[i].ipix, v_dir)
for j in range(3):
- new_ray.v_dir[j] = v_dir[j] * self.normalization
- new_ray.value[j] = ray.value[j]
- new_ray.pos[j] = self.center[j] + ray.t * new_ray.v_dir[j]
- new_ray.value[3] = ray.value[3]
-
- new_ray.next = ray.next
- new_ray.brick_next = ray.brick_next
- if new_ray.next != NULL:
- new_ray.next.prev = new_ray
+ new_rays[i].v_dir[j] = v_dir[j] * self.normalization
+ new_rays[i].value[j] = ray.value[j]
+ new_rays[i].pos[j] = self.center[j] + ray.t * new_rays[i].v_dir[j]
+ new_rays[i].value[3] = ray.value[3]
+ # Insert into the external list
+ if ray.prev != NULL:
+ ray.prev.next = new_rays[0]
+ new_rays[0].prev = ray.prev
+ new_rays[3].next = ray.next
+ if ray.next != NULL:
+ ray.next.prev = new_rays[3]
+ for i in range(3):
+ # Connect backward and forward
+ new_rays[i].next = new_rays[i+1]
+ new_rays[3-i].prev = new_rays[2-i]
if self.first == ray:
- self.first = new_ray.prev.prev.prev
+ self.first = new_rays[0]
+ self.nrays += 3
free(ray)
- self.nrays += 3
- return new_ray.prev.prev.prev
+ pray[0] = new_rays[0]
+ return 1
# From Enzo:
# dOmega = 4 pi r^2/Nrays
diff -r b5d6455129b707fff3ca936fb43048e144d80a65 -r 8ecda08d313fa6d6d333c6630d6e9686280e6e7d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -694,9 +694,10 @@
self.center -= 1e-2 * min_dx
ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
self.initial_nside, self.radius,
- bricks, self.max_nside)
+ bricks, left_edges, right_edges, self.max_nside)
for i,brick in enumerate(bricks):
- ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges)
+ ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
+ bricks)
total_cells += na.prod(brick.my_data[0].shape)
pbar.update(total_cells)
pbar.finish()
https://bitbucket.org/yt_analysis/yt/changeset/9ab2f4b12eff/
changeset: 9ab2f4b12eff
branch: yt
user: MatthewTurk
date: 2011-10-26 21:37:19
summary: Adding a few comments. Still trying to figure out why the total integrated
value comes back > 1.0 and where rays get lost.
affected #: 1 file
diff -r 8ecda08d313fa6d6d333c6630d6e9686280e6e7d -r 9ab2f4b12eff554567319ca5ecb255ac19f95cbe yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1835,21 +1835,33 @@
# safe.
ray2 = pray[0]
for i in range(refined*4):
+ # If we have been refined, send the ray to its appropriate
+ # location.
self.send_ray_home(ray2, ledges, redges, grid_neighbors, dt)
+ # If it wants to go back in time that is fine but it needs to
+ # make sure it gets forward in time eventually
while ray2.pgi < pgi and ray2.t <= 1.0:
#print "Recursing", ray2.pgi, pgi, ray2.t, ray2.nside, ray2.ipix, dt
+ # Now we grab a new set of neighbors and whatnot
pgn = pgs[ray2.pgi]
grid2_neighbors = self.find_neighbors(ray2.pgi, pgn.dds[0],
ledges, redges)
+ # We just integrate, we don't bother with the full brick
+ # integration. This means no recursive refinement, and
+ # potential undersampling
dt = self.integrate_ray(ray2, pgn, tf)
- self.send_ray_home(ray2, ledges, redges, grid2_neighbors,
- dt, 1)
+ # Now we send this ray home. Hopefully it'll once again be
+ # forward in time.
+ self.send_ray_home(ray2, ledges, redges, grid2_neighbors, dt)
+ # This tosses us to the next one in line, of the four..
ray2 = ray2.next
+ # We use this because it's been set previously.
ray = next
# We check to see if anything has been *added* to the queue, via a
# send_ray_home call, here. Otherwise we might end up in the
# situation that the final ray is refined, thus next is NULL, but
- # there are more rays to work on.
+ # there are more rays to work on because they have been added via
+ # refinement.
if ray == NULL and self.packet_pointers[pgi] != NULL:
ray = self.packet_pointers[pgi]
#print "Packet pointers!", ray.ipix
@@ -1952,7 +1964,6 @@
cdef double v_dir[3]
# We need a record of the previous one because we're inserting into a
# linked list.
- cdef AdaptiveRayPacket *prev = ray.prev
for i in range(4):
new_rays[i] = <AdaptiveRayPacket *> malloc(
sizeof(AdaptiveRayPacket))
https://bitbucket.org/yt_analysis/yt/changeset/76f5e4e29dcd/
changeset: 76f5e4e29dcd
branch: yt
user: MatthewTurk
date: 2011-10-26 21:45:37
summary: No longer missing any rays. What was happening was that un-refined rays were
being added to the packets for the next grid, but then their refined rays were
replacing them, so the packet_pointers value was zero.
Now I'm getting >1.0 values for the integration. Not sure where that is coming
from.
affected #: 1 file
diff -r 9ab2f4b12eff554567319ca5ecb255ac19f95cbe -r 76f5e4e29dcdfa12d1c55f06d7d39e214ce95796 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1824,20 +1824,21 @@
if ray.t >= 1.0:
ray = next
continue
- self.send_ray_home(ray, ledges, redges, grid_neighbors, dt)
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, dt, 1)
# We now are moving into a new PG, which we check for refinement
pgn = pgs[ray.pgi]
domega = self.get_domega(pgn.left_edge, pgn.right_edge)
pray = &ray
refined = self.refine_ray(pray, domega, pgn.dds[0],
pgn.left_edge, pgn.right_edge)
+ if refined == 0: self.append_to_packets(ray.pgi, ray)
# At this point we can no longer access ray, as it is no longer
# safe.
ray2 = pray[0]
for i in range(refined*4):
# If we have been refined, send the ray to its appropriate
# location.
- self.send_ray_home(ray2, ledges, redges, grid_neighbors, dt)
+ self.send_ray_home(ray2, ledges, redges, grid_neighbors, dt, 1)
# If it wants to go back in time that is fine but it needs to
# make sure it gets forward in time eventually
while ray2.pgi < pgi and ray2.t <= 1.0:
@@ -1852,8 +1853,10 @@
dt = self.integrate_ray(ray2, pgn, tf)
# Now we send this ray home. Hopefully it'll once again be
# forward in time.
- self.send_ray_home(ray2, ledges, redges, grid2_neighbors, dt)
+ self.send_ray_home(ray2, ledges, redges, grid2_neighbors,
+ dt, 1)
# This tosses us to the next one in line, of the four..
+ self.append_to_packets(ray2.pgi, ray2)
ray2 = ray2.next
# We use this because it's been set previously.
ray = next
https://bitbucket.org/yt_analysis/yt/changeset/f4fecce87b3a/
changeset: f4fecce87b3a
branch: yt
user: MatthewTurk
date: 2011-10-26 21:54:44
summary: If a ray is refined at the *end* of its time in a box, it can be pushed *back*
into that box. However, this will cause integrate_ray to naively continue to
use the *first* intersection point of the ray with the box as its entry time.
This modification fixes that. I now get 1.0 everywhere for the integration.
Testing further now.
affected #: 1 file
diff -r 76f5e4e29dcdfa12d1c55f06d7d39e214ce95796 -r f4fecce87b3a372e607f1b6b211baeb86638c0a8 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -610,12 +610,13 @@
np.float64_t v_dir[3],
np.float64_t rgba[4],
TransferFunctionProxy tf,
- np.float64_t *return_t = NULL):
+ np.float64_t *return_t = NULL,
+ np.float64_t enter_t = -1.0):
cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
cdef np.float64_t intersect_t = 1.0
cdef np.float64_t iv_dir[3]
cdef np.float64_t intersect[3], tmax[3], tdelta[3]
- cdef np.float64_t enter_t, dist, alpha, dt, exit_t
+ cdef np.float64_t dist, alpha, dt, exit_t
cdef np.float64_t tr, tl, temp_x, temp_y, dv
for i in range(3):
if (v_dir[i] < 0):
@@ -651,6 +652,7 @@
self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
intersect_t = 0.0
+ if enter_t >= 0.0: intersect_t = enter_t
if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
for i in range(3):
intersect[i] = v_pos[i] + intersect_t * v_dir[i]
@@ -1730,7 +1732,8 @@
ray_value[1] = ray.value[1]
ray_value[2] = ray.value[2]
ray_value[3] = ray.value[3]
- hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t)
+ hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t,
+ ray.t)
ray.value[0] = ray_value[0]
ray.value[1] = ray_value[1]
ray.value[2] = ray_value[2]
@@ -1817,13 +1820,13 @@
ray = ray.brick_next
continue
# We start in this brick, and then we integrate to the edge
- dt = self.integrate_ray(ray, pg, tf)
- # Now the ray has moved, so we grab .brick_next first, then we
- # move it to its new home
self.packet_pointers[pgi] = next = ray.brick_next
if ray.t >= 1.0:
ray = next
continue
+ dt = self.integrate_ray(ray, pg, tf)
+ # Now the ray has moved, so we grab .brick_next first, then we
+ # move it to its new home
self.send_ray_home(ray, ledges, redges, grid_neighbors, dt, 1)
# We now are moving into a new PG, which we check for refinement
pgn = pgs[ray.pgi]
https://bitbucket.org/yt_analysis/yt/changeset/bbc5b43d7895/
changeset: bbc5b43d7895
branch: yt
user: MatthewTurk
date: 2011-10-26 23:18:24
summary: Setting the nudge factor to 1e-6 has fixed it for up to 10 rays per cell in my
test cases. When I went to 20 rays per cell the calculation didn't complete,
so there may be either a lingering bug, or that might just take too long.
affected #: 2 files
diff -r f4fecce87b3a372e607f1b6b211baeb86638c0a8 -r bbc5b43d7895808c050ac761e58107808e7f36c4 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1711,6 +1711,12 @@
values[count, 1] = ray.value[1]
values[count, 2] = ray.value[2]
values[count, 3] = ray.value[3]
+ if ray.t < 0.5:
+ print "PROBLEM",
+ print count, ray.ipix, ray.nside, ray.t,
+ print "vd", ray.v_dir[0], ray.v_dir[1], ray.v_dir[2],
+ print "pos", ray.pos[0], ray.pos[1], ray.pos[2],
+ print "pgi", ray.pgi
count += 1
ray = ray.next
return info, values
@@ -1753,7 +1759,7 @@
cdef int i, j, npgi
cdef np.float64_t offpos[3]
for i in range(3):
- offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-5*dt
+ offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-6*dt
for j in range(grid_neighbors[0]):
i = grid_neighbors[j+1]
if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
@@ -1841,10 +1847,10 @@
for i in range(refined*4):
# If we have been refined, send the ray to its appropriate
# location.
- self.send_ray_home(ray2, ledges, redges, grid_neighbors, dt, 1)
+ self.send_ray_home(ray2, ledges, redges, grid_neighbors, 0.0, 1)
# If it wants to go back in time that is fine but it needs to
# make sure it gets forward in time eventually
- while ray2.pgi < pgi and ray2.t <= 1.0:
+ while ray2.pgi <= pgi and ray2.t <= 1.0:
#print "Recursing", ray2.pgi, pgi, ray2.t, ray2.nside, ray2.ipix, dt
# Now we grab a new set of neighbors and whatnot
pgn = pgs[ray2.pgi]
diff -r f4fecce87b3a372e607f1b6b211baeb86638c0a8 -r bbc5b43d7895808c050ac761e58107808e7f36c4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -703,6 +703,7 @@
pbar.finish()
info, values = ray_source.get_rays()
self.ray_source = ray_source
+ self.bricks = bricks
return info, values
class StereoPairCamera(Camera):
https://bitbucket.org/yt_analysis/yt/changeset/4aefb6de7989/
changeset: 4aefb6de7989
branch: yt
user: MatthewTurk
date: 2011-10-26 23:19:06
summary: This removes the debugging attribute setting I missed.
affected #: 1 file
diff -r bbc5b43d7895808c050ac761e58107808e7f36c4 -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -702,8 +702,6 @@
pbar.update(total_cells)
pbar.finish()
info, values = ray_source.get_rays()
- self.ray_source = ray_source
- self.bricks = bricks
return info, values
class StereoPairCamera(Camera):
https://bitbucket.org/yt_analysis/yt/changeset/02886301c1d3/
changeset: 02886301c1d3
branch: yt
user: MatthewTurk
date: 2011-10-26 23:19:21
summary: Merge
affected #: 7 files
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -106,6 +106,9 @@
find_unique_solutions, \
project_unique_light_cones
+from .radial_column_density.api import \
+ RadialColumnDensity
+
from .simulation_handler.api import \
EnzoSimulation
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -840,7 +840,7 @@
"""
yt_counters("connect_chains_across_tasks")
# Remote (lower dens) chain -> local (higher) chain.
- chainID_translate_map_local = na.arange(self.nchains)
+ chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
# Build the stuff to send.
self.uphill_real_indices = na.concatenate((
self.index, self.index_pad))[self.padded_particles]
@@ -891,7 +891,8 @@
# it. Therefore each key (a chain) in this dict is unique, but the items
# the keys point to are not necessarily unique.
chainID_translate_map_global = \
- self.comm.mpi_allreduce(chainID_translate_map_local, op='min')
+ self.comm.mpi_allreduce(chainID_translate_map_local, op='min',
+ dtype='int64')
# Loop over chains, smallest to largest density, recursively until
# we reach a self-assigned chain. Then we assign that final chainID to
# the *current* one only.
@@ -1102,7 +1103,6 @@
top_keys = self.comm.par_combine_object(top_keys, datatype='array', op='cat')
bot_keys = self.comm.par_combine_object(bot_keys, datatype='array', op='cat')
vals = self.comm.par_combine_object(vals, datatype='array', op='cat')
-
return (top_keys, bot_keys, vals)
def _build_groups(self):
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/analysis_modules/radial_column_density/api.py
--- /dev/null
+++ b/yt/analysis_modules/radial_column_density/api.py
@@ -0,0 +1,28 @@
+"""
+API for radial_column_density
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: CU Boulder
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2010-2011 Stephen Skory. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .radial_column_density import RadialColumnDensity
+
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/analysis_modules/radial_column_density/radial_column_density.py
--- /dev/null
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -0,0 +1,277 @@
+"""
+Calculate the radial column density around a point.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: CU Boulder
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2008-2011 Stephen Skory. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+import yt.visualization.volume_rendering.camera as camera
+import yt.utilities.amr_utils as au
+from yt.utilities.math_utils import periodic_dist
+from yt.data_objects.field_info_container import FieldDetector
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ ParallelAnalysisInterface
+
+def _col_dens(field, data):
+ return data[field]
+
+class RadialColumnDensity(ParallelAnalysisInterface):
+ def __init__(self, pf, field, center, max_radius = 0.5, steps = 10,
+ base='lin', Nside = 32, ang_divs = 800j):
+ r"""
+ Calculate radial column densities in preparation to
+ adding them as a derived field.
+
+ This class is the first step in calculating a derived radial
+ column density field.
+ Given a central point, this calculates the column density to all cell
+ centers within the given radius in units of centimeters times
+ the units of the basis field.
+ For example, if the basis field is `NumberDensity`, which has units
+ of 1 / cm^3, the units of the derived field will be 1 / cm^2.
+ Please see the documentation or the example below on how to
+ use this to make the derived field which can be used like any other
+ derived field.
+
+ This builds a number of spherical
+ surfaces where the column density is calculated
+ using HEALPix Volume Rendering. The values of the column density at
+ grid points is then linearly interpolated between the two nearest
+ surfaces (one inward, one outward).
+ Please see the HEALPix Volume Rendering documentation for more on
+ that part of this calculation.
+
+ Parameters
+ ----------
+ pf : `StaticOutput`
+ The dataset to operate on.
+ field : string
+ The name of the basis field over which to
+ calculate a column density.
+ center : array_like
+ A list or array giving the location of where to
+ calculate the start of
+ the column density.
+ This will probably be "an object of interest" like
+ a star, black hole, or the center of a galaxy.
+ max_radius : float
+ How far out to calculate the column density, in code units. This
+ value will be automatically reduced if the supplied value would
+ result in calculating column densities outside the volume.
+ Default = 0.5.
+ steps : integer
+ How many surfaces to use. A higher number is more accurate, but
+ takes more resources.
+ Default = 10
+ base : string
+ How to evenly space the surfaces: linearly with "lin" or
+ logarithmically with "log".
+ Default = "lin".
+ Nside : int
+ The resolution of column density calculation as performed by
+ HEALPix. Higher numbers mean higher quality. Max = 8192.
+ Default = 32.
+ ang_divs : imaginary integer
+ This number controls the gridding of the HEALPix projection onto
+ the spherical surfaces. Higher numbers mean higher quality.
+ Default = 800j.
+
+ Examples
+ --------
+
+ >>> rcdnumdens = RadialColumnDensity(pf, 'NumberDensity', [0.5, 0.5, 0.5])
+ >>> def _RCDNumberDensity(field, data, rcd = rcdnumdens):
+ return rcd._build_derived_field(data)
+ >>> add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
+ """
+ ParallelAnalysisInterface.__init__(self)
+ self.pf = pf
+ self.center = na.asarray(center)
+ self.max_radius = max_radius
+ self.steps = steps
+ self.base = base
+ self.Nside = Nside
+ self.ang_divs = ang_divs
+ self.real_ang_divs = int(na.abs(ang_divs))
+ self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+ self.phi1d = self.phi[:,0]
+ self.theta1d = self.theta[0,:]
+ self.dphi = self.phi1d[1] - self.phi1d[0]
+ self.dtheta = self.theta1d[1] - self.theta1d[0]
+ self.pixi = au.arr_ang2pix_nest(Nside, self.theta.ravel(), self.
+ phi.ravel())
+ self.dw = pf.domain_right_edge - pf.domain_left_edge
+ # Here's where we actually do stuff.
+ self._fix_max_radius()
+ self._make_bins()
+ self._build_surfaces(field)
+
+ def _fix_max_radius(self):
+ # The max_radius can only be the distance from the center point to
+ # the closest face of the volume. This is because the column density
+ # for a surface outside the volume is ill-defined due to the way
+ # normalization is handled in the volume render.
+ # It may be possible to fix this in
+ # the future, and allow these calculations in the whole volume,
+ # but this will work for now.
+ right = self.pf.domain_right_edge - self.center
+ left = self.center - self.pf.domain_left_edge
+ min_r = na.min(right)
+ min_l = na.min(left)
+ self.max_radius = na.min([self.max_radius, min_r, min_l])
+
+ def _make_bins(self):
+ # We'll make the bins start from the smallest cell size to the
+ # specified radius. Column density inside the same cell as our
+ # center is kind of ill-defined, anyway.
+ if self.base == 'lin':
+ self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+ self.steps)
+ elif self.base == 'log':
+ self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
+ na.log10(self.max_radius), self.steps)
+
+ def _build_surfaces(self, field):
+ # This will be index by bin index.
+ self.surfaces = {}
+ for i, radius in enumerate(self.bins):
+ cam = camera.HEALpixCamera(self.center, radius, self.Nside,
+ pf = self.pf, log_fields = [False], fields = field)
+ bitmap = cam.snapshot()
+ self.surfaces[i] = radius * self.pf['cm'] * \
+ bitmap[:,0,0][self.pixi].reshape((self.real_ang_divs,self.real_ang_divs))
+ self.surfaces[i] = self.comm.mpi_allreduce(self.surfaces[i], op='max')
+
+ def _build_derived_field(self, data, minval=None):
+ r"""
+ Parameters
+ ----------
+
+ minval : float
+ This parameter will set any values of the
+ field that are zero to this minimum value.
+ Values of zero are found outside the maximum radius and
+ in the cell of the user-specified center point.
+ This setting is useful if the field is going to be logged
+ (e.g. na.log10) where zeros are inconvenient.
+ Default = None
+ """
+ x = data['x']
+ sh = x.shape
+ ad = na.prod(sh)
+ if type(data) == type(FieldDetector()):
+ return na.ones(sh)
+ y = data['y']
+ z = data['z']
+ pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+ del x, y, z
+ vals = self._interpolate_value(pos)
+ del pos
+ if minval:
+ zeros = (vals == 0.)
+ vals[zeros] = minval
+ del zeros
+ vals.shape = sh
+ return vals
+
+ def _interpolate_value(self, pos):
+ # Given a position, find the two surfaces it's in between,
+ # and the interpolate values from the surfaces to the point
+ # according to the points angle.
+ # 1. Find the angle from the center point to the position.
+ vec = pos - self.center
+ phi = na.arctan2(vec[:, 1], vec[:, 0])
+ # Convert the convention from [-pi, pi) to [0, 2pi).
+ sel = (phi < 0)
+ phi[sel] += 2 * na.pi
+ # Find the radius.
+ r = na.sqrt(na.sum(vec * vec, axis = 1))
+ # Keep track of the points outside of self.max_radius, which we'll
+ # handle separately before we return.
+ outside = (r > self.max_radius)
+ theta = na.arccos(vec[:, 2] / r)
+ # 2. Find the bin for this position.
+ digi = na.digitize(r, self.bins)
+ # Find the values on the inner and outer surfaces.
+ in_val = na.zeros_like(r)
+ out_val = na.zeros_like(r)
+ # These two will be used for interpolation.
+ in_r = na.zeros_like(r)
+ out_r = na.zeros_like(r)
+ for bin in na.unique(digi):
+ sel = (digi == bin)
+ # Special case if we're outside the largest sphere.
+ if bin == len(self.bins):
+ in_val[sel] = 0.
+ out_val[sel] = 0.
+ # Just something non-zero so we don't get divide errors later.
+ in_r[sel] = .1
+ out_r[sel] = .2
+ continue
+ # Special case if we're inside the smallest sphere.
+ elif bin == 0:
+ in_val[sel] = na.zeros_like(phi[sel])
+ in_r[sel] = 0.
+ out_val[sel] = self._interpolate_surface_value(1,
+ phi[sel], theta[sel])
+ out_r[sel] = self.bins[1]
+ continue
+ # General case.
+ else:
+ in_val[sel] = self._interpolate_surface_value(bin - 1,
+ phi[sel], theta[sel])
+ in_r[sel] = self.bins[bin - 1]
+ out_val[sel] = self._interpolate_surface_value(bin,
+ phi[sel], theta[sel])
+ out_r[sel] = self.bins[bin]
+ # Interpolate using a linear fit in column density / r space.
+ val = na.empty_like(r)
+ # Special case for inside smallest sphere.
+ sel = (digi == 0)
+ val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
+ na.invert(sel, sel) # In-place operation!
+ val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
+ (r[sel] - in_r[sel]) + in_val[sel]
+ # Fix the things to zero that should be zero.
+ val[outside] = 0.
+ return val
+
+ def _interpolate_surface_value(self, bin, phi, theta):
+ # Given a surface bin and an angle, interpolate the value on
+ # that surface to the angle.
+ # 1. Find the four values closest to the angle.
+ phi_bin = na.digitize(phi, self.phi1d)
+ theta_bin = na.digitize(theta, self.theta1d)
+ val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
+ val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
+ val10 = self.surfaces[bin][phi_bin, theta_bin - 1]
+ val11 = self.surfaces[bin][phi_bin, theta_bin]
+ # 2. Linearly interpolate the four values to the points.
+ int_val0 = (val10 - val00) / self.dphi * \
+ (phi - self.phi1d[phi_bin - 1]) + val00
+ int_val1 = (val11 - val01) / self.dphi * \
+ (phi - self.phi1d[phi_bin - 1]) + val01
+ vals = (int_val1 - int_val0) / self.dtheta * \
+ (theta - self.theta1d[theta_bin - 1]) + int_val0
+ return vals
+
+
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -16,6 +16,7 @@
config.add_subpackage("level_sets")
config.add_subpackage("light_ray")
config.add_subpackage("light_cone")
+ config.add_subpackage("radial_column_density")
config.add_subpackage("simulation_handler")
config.add_subpackage("spectral_integrator")
config.add_subpackage("star_analysis")
diff -r 4aefb6de7989d70184e9e3213cb9eeb6353c0950 -r 02886301c1d3501144162a7249aa684cff2443cb yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -467,7 +467,10 @@
if data is None:
ncols = -1
size = 0
+ dtype = 'float64'
+ mylog.info('Warning: Array passed to par_combine_object was None. Setting dtype to float64. This may break things!')
else:
+ dtype = data.dtype
if len(data) == 0:
ncols = -1
size = 0
@@ -477,8 +480,8 @@
else:
ncols, size = data.shape
ncols = self.comm.allreduce(ncols, op=MPI.MAX)
- if size == 0:
- data = na.zeros((ncols,0), dtype='float64') # This only works for
+ if ncols == 0:
+ data = na.zeros(0, dtype=dtype) # This only works for
size = data.shape[-1]
sizes = na.zeros(self.comm.size, dtype='int64')
outsize = na.array(size, dtype='int64')
https://bitbucket.org/yt_analysis/yt/changeset/0eb936dbc343/
changeset: 0eb936dbc343
branch: yt
user: MatthewTurk
date: 2011-10-27 18:15:57
summary: Missed a free call, so grid2_neighbors was allocated and not deallocated, and
also missed that if we only check for <= 1.0, not < 1.0 in the total ray
evolution, we can't ever terminate the recursive evolution loop if we have rays
that terminate in recursion. This could happen for instance in unigrids.
affected #: 1 file
diff -r 02886301c1d3501144162a7249aa684cff2443cb -r 0eb936dbc3431bb5fce2e42aba1796d3ad38d1ef yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -1850,7 +1850,7 @@
self.send_ray_home(ray2, ledges, redges, grid_neighbors, 0.0, 1)
# If it wants to go back in time that is fine but it needs to
# make sure it gets forward in time eventually
- while ray2.pgi <= pgi and ray2.t <= 1.0:
+ while ray2.pgi <= pgi and ray2.t < 1.0:
#print "Recursing", ray2.pgi, pgi, ray2.t, ray2.nside, ray2.ipix, dt
# Now we grab a new set of neighbors and whatnot
pgn = pgs[ray2.pgi]
@@ -1864,6 +1864,7 @@
# forward in time.
self.send_ray_home(ray2, ledges, redges, grid2_neighbors,
dt, 1)
+ free(grid2_neighbors)
# This tosses us to the next one in line, of the four..
self.append_to_packets(ray2.pgi, ray2)
ray2 = ray2.next
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list