[Yt-svn] yt-commit r1720 - in trunk/yt: _amr_utils extensions extensions/volume_rendering lagos lagos/hop raven
mturk at wrangler.dreamhost.com
mturk at wrangler.dreamhost.com
Tue May 18 12:36:51 PDT 2010
Author: mturk
Date: Tue May 18 12:36:46 2010
New Revision: 1720
URL: http://yt.enzotools.org/changeset/1720
Log:
Backport from hg:
* Adding camera class, along with generic HomogenizedVolume class, to make
multiple traversals of the same partitioned/homogenized volume much faster in
the volume rendering.
* Add initial version of stereoscopic image camera
* Remove some pointer dereferencing in the original enzohop code to speed it
up by ~10-30%.
* Bug fix for projections to limit the IO as well as restrict the number of
calls to loading data from grids for weight fields.
* Fix some generated smoothed grids that had derived fields.
* John Wise's fix to the coord_axes callback
* Britton Smith's path length in cosmology
Added:
trunk/yt/extensions/volume_rendering/camera.py
Modified:
trunk/yt/_amr_utils/png_writer.pyx
trunk/yt/extensions/MergerTree.py
trunk/yt/extensions/volume_rendering/__init__.py
trunk/yt/extensions/volume_rendering/grid_partitioner.py
trunk/yt/lagos/BaseDataTypes.py
trunk/yt/lagos/Cosmology.py
trunk/yt/lagos/HaloFinding.py
trunk/yt/lagos/hop/EnzoHop.c
trunk/yt/lagos/hop/hop_numpy.h
trunk/yt/lagos/hop/kd.h
trunk/yt/raven/Callbacks.py
Modified: trunk/yt/_amr_utils/png_writer.pyx
==============================================================================
--- trunk/yt/_amr_utils/png_writer.pyx (original)
+++ trunk/yt/_amr_utils/png_writer.pyx Tue May 18 12:36:46 2010
@@ -157,7 +157,6 @@
cdef int ys = buffer.shape[1]
cdef int v
v = iclip(<int>(pv * 255), 0, 255)
- print "VALUE CONTRIBUTION", v
for pi in range(np):
j = <int> (xs * px[pi])
i = <int> (ys * py[pi])
Modified: trunk/yt/extensions/MergerTree.py
==============================================================================
--- trunk/yt/extensions/MergerTree.py (original)
+++ trunk/yt/extensions/MergerTree.py Tue May 18 12:36:46 2010
@@ -589,6 +589,10 @@
if self.mine == 0:
to_write = []
# Open the temporary database.
+ try:
+ os.remove(temp_name)
+ except OSError:
+ pass
temp_conn = sql.connect(temp_name)
temp_cursor = temp_conn.cursor()
line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
Modified: trunk/yt/extensions/volume_rendering/__init__.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/__init__.py (original)
+++ trunk/yt/extensions/volume_rendering/__init__.py Tue May 18 12:36:46 2010
@@ -31,9 +31,11 @@
ProjectionTransferFunction
from yt.amr_utils import PartitionedGrid, VectorPlane, \
TransferFunctionProxy
-from grid_partitioner import HomogenizedBrickCollection, \
+from grid_partitioner import HomogenizedVolume, \
+ HomogenizedBrickCollection, \
export_partitioned_grids, \
import_partitioned_grids
from image_handling import export_rgba, import_rgba, \
plot_channel, plot_rgb
from software_sampler import VolumeRendering
+from camera import Camera, StereoPairCamera
Added: trunk/yt/extensions/volume_rendering/camera.py
==============================================================================
--- (empty file)
+++ trunk/yt/extensions/volume_rendering/camera.py Tue May 18 12:36:46 2010
@@ -0,0 +1,159 @@
+"""
+Import the components of the volume rendering extension
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+ Copyright (C) 2009 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+from grid_partitioner import HomogenizedVolume
+from TransferFunction import ProjectionTransferFunction
+from yt.funcs import *
+import yt.amr_utils as au
+
+class Camera(object):
+ def __init__(self, center, normal_vector, width,
+ resolution, transfer_function,
+ north_vector = None,
+ volume = None, fields = None,
+ log_fields = None,
+ sub_samples = 5, pf = None):
+ if pf is not None: self.pf = pf
+ if not iterable(resolution):
+ resolution = (resolution, resolution)
+ self.resolution = resolution
+ self.sub_samples = sub_samples
+ if not iterable(width):
+ width = (width, width, width) # front/back, left/right, top/bottom
+ self.width = width
+ self.center = center
+ if fields is None: fields = ["Density"]
+ self.fields = fields
+ if transfer_function is None:
+ transfer_function = ProjectionTransferFunction()
+ self.transfer_function = transfer_function
+ self._setup_normalized_vectors(normal_vector, north_vector)
+ self.log_fields = log_fields
+ if volume is None:
+ volume = HomogenizedVolume(fields, pf = self.pf,
+ log_fields = log_fields)
+ self.volume = volume
+
+ def _setup_normalized_vectors(self, normal_vector, north_vector):
+ # Now we set up our various vectors
+ normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+ if north_vector is None:
+ vecs = na.identity(3)
+ t = na.cross(normal_vector, vecs).sum(axis=1)
+ ax = t.argmax()
+ north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+ north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+ east_vector = -na.cross(north_vector, normal_vector).ravel()
+ east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+ self.unit_vectors = [north_vector, east_vector, normal_vector]
+ self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
+ self.unit_vectors[1]*self.width[1],
+ self.unit_vectors[2]*self.width[2]])
+
+ self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
+ - 0.5*self.width[1]*self.unit_vectors[1] \
+ - 0.5*self.width[2]*self.unit_vectors[2]
+ self.back_center = self.center - 0.5*self.width[0]*self.unit_vectors[2]
+ self.front_center = self.center + 0.5*self.width[0]*self.unit_vectors[2]
+ self.inv_mat = na.linalg.pinv(self.unit_vectors)
+
+ def look_at(self, new_center, north_vector = None):
+ normal_vector = self.front_center - new_center
+ self._setup_normalized_vectors(normal_vector, north_vector)
+
+ def get_vector_plane(self, image):
+ # We should move away from pre-generation of vectors like this and into
+ # the usage of on-the-fly generation in the VolumeIntegrator module
+ # We might have a different width and back_center
+ px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+ self.resolution[0])[:,None]
+ py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+ self.resolution[1])[None,:]
+ inv_mat = self.inv_mat
+ bc = self.back_center
+ vectors = na.zeros((self.resolution[0], self.resolution[1], 3),
+ dtype='float64', order='C')
+ vectors[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
+ vectors[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
+ vectors[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
+ bounds = (px.min(), px.max(), py.min(), py.max())
+ vector_plane = au.VectorPlane(vectors, self.box_vectors[2],
+ self.back_center, bounds, image,
+ self.unit_vectors[0],
+ self.unit_vectors[1])
+ return vector_plane
+
+ def snapshot(self):
+ image = na.zeros((self.resolution[0], self.resolution[1], 3),
+ dtype='float64', order='C')
+ vector_plane = self.get_vector_plane(image)
+ tfp = au.TransferFunctionProxy(self.transfer_function) # Reset it every time
+ tfp.ns = self.sub_samples
+ self.volume.initialize_source()
+ pbar = get_pbar("Ray casting",
+ (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+ total_cells = 0
+ for brick in self.volume.traverse(self.back_center, self.front_center):
+ brick.cast_plane(tfp, vector_plane)
+ total_cells += na.prod(brick.my_data[0].shape)
+ pbar.update(total_cells)
+ pbar.finish()
+ return image
+
+ def zoom(self, factor):
+ self.width = [w / factor for w in self.width]
+ self._setup_normalized_vectors(
+ self.unit_vectors[2], self.unit_vectors[0])
+
+ def zoomin(self, final, n_steps):
+ f = final**(1.0/n_steps)
+ for i in xrange(n_steps):
+ self.zoom(f)
+ yield self.snapshot()
+
+class StereoPairCamera(Camera):
+ def __init__(self, original_camera, relative_separation):
+ self.original_camera = original_camera
+ self.relative_separation = relative_separation
+
+ def split(self):
+ oc = self.original_camera
+ uv = oc.unit_vectors
+ c = oc.back_center
+ wx, wy, wz = oc.width
+ left_center = c + uv[1] * 0.5*self.relative_separation * wx \
+ + uv[2] * 0.5*wz
+ right_center = c - uv[1] * 0.5*self.relative_separation * wx \
+ + uv[2] * 0.5*wz
+ left_camera = Camera(left_center, uv[2], oc.width,
+ oc.resolution, oc.transfer_function, uv[0],
+ oc.volume, oc.fields, oc.log_fields,
+ oc.sub_samples, oc.pf)
+ right_camera = Camera(right_center, uv[2], oc.width,
+ oc.resolution, oc.transfer_function, uv[0],
+ oc.volume, oc.fields, oc.log_fields,
+ oc.sub_samples, oc.pf)
+ return (left_camera, right_camera)
Modified: trunk/yt/extensions/volume_rendering/grid_partitioner.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/grid_partitioner.py (original)
+++ trunk/yt/extensions/volume_rendering/grid_partitioner.py Tue May 18 12:36:46 2010
@@ -32,6 +32,92 @@
from yt.lagos import ParallelAnalysisInterface, only_on_root, parallel_root_only
from yt.parallel_tools import DistributedObjectCollection
+# HISTORICAL NOTE OF SOME IMPORT:
+# The homogenized brick class should and will be removed. The more general,
+# and less fancy-parallel, HomogenizedVolume is the way to go. The HBC is a
+# complicated mechanism that was designed for back when we were going to be
+# passing bricks all around between processors.
+
+class HomogenizedVolume(ParallelAnalysisInterface):
+ bricks = None
+ def __init__(self, fields = "Density", source = None, pf = None,
+ log_fields = None):
+ # Typically, initialized as hanging off a hierarchy. But, not always.
+ if pf is not None: self.pf = pf
+ if source is None: source = self.pf.h.all_data()
+ self.source = source
+ self.fields = ensure_list(fields)
+ if log_fields is not None:
+ log_fields = ensure_list(log_fields)
+ else:
+ log_fields = [self.pf.field_info[field].take_log
+ for field in self.fields]
+ self.log_fields = log_fields
+
+ def traverse(self, back_point, front_point):
+ if self.bricks is None: self.initialize_source()
+ vec = front_point - back_point
+ dist = na.minimum(
+ na.sum((self.brick_left_edges - back_point) * vec, axis=1),
+ na.sum((self.brick_right_edges - back_point) * vec, axis=1))
+ ind = na.argsort(dist)
+ for b in self.bricks[ind]: yield b
+
+ def _partition_grid(self, grid):
+
+ # This is not super efficient, as it re-fills the regions once for each
+ # field.
+ vcds = []
+ for field, log_field in zip(self.fields, self.log_fields):
+ vcd = grid.get_vertex_centered_data(field).astype('float64')
+ if log_field: vcd = na.log10(vcd)
+ vcds.append(vcd)
+
+ GF = GridFaces(grid.Children + [grid])
+ PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
+
+ pgs = []
+ for P in PP.sweep(0):
+ sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
+ if len(sl) == 0: continue
+ dd = [d[sl[0][0]:sl[0][1]+1,
+ sl[1][0]:sl[1][1]+1,
+ sl[2][0]:sl[2][1]+1].copy() for d in vcds]
+ pgs.append(PartitionedGrid(grid.id, len(self.fields), dd,
+ P.LeftEdge, P.RightEdge, sl[-1]))
+ return pgs
+
+ def initialize_source(self, source = None):
+ if self.bricks is not None and source is not None:
+ raise NotImplementedError("Sorry, dynamic shuffling of bricks is" +
+ " not yet supported")
+ if self.bricks is not None and source is None: return
+ bricks = []
+ self._preload(self.source._grids, self.fields, self.pf.h.io)
+ pbar = get_pbar("Partitioning ", len(self.source._grids))
+ for i, g in enumerate(self.source._grids):
+ pbar.update(i)
+ bricks += self._partition_grid(g)
+ pbar.finish()
+ bricks = na.array(bricks, dtype='object')
+ NB = len(bricks)
+ # Now we set up our (local for now) hierarchy. Note that to calculate
+ # intersection, we only need to do the left edge & right edge.
+ #
+ # We're going to double up a little bit here in memory.
+ self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
+ self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
+ self.brick_parents = na.zeros( NB, dtype='int64')
+ self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
+ for i,b in enumerate(bricks):
+ self.brick_left_edges[i,:] = b.LeftEdge
+ self.brick_right_edges[i,:] = b.RightEdge
+ self.brick_parents[i] = b.parent_grid_id
+ self.brick_dimensions[i,:] = b.my_data[0].shape
+ # Vertex-centered means we subtract one from the shape
+ self.brick_dimensions -= 1
+ self.bricks = na.array(bricks, dtype='object')
+
class HomogenizedBrickCollection(DistributedObjectCollection):
def __init__(self, source):
# The idea here is that we have two sources -- the global_domain
Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py (original)
+++ trunk/yt/lagos/BaseDataTypes.py Tue May 18 12:36:46 2010
@@ -1422,16 +1422,27 @@
pass
def _project_grid(self, grid, fields, zero_out):
+ # We split this next bit into two sections to try to limit the IO load
+ # on the system. This way, we perserve grid state (@restore_grid_state
+ # in _get_data_from_grid *and* we attempt not to load weight data
+ # independently of the standard field data.
if self._weight is None:
weight_data = na.ones(grid.ActiveDimensions, dtype='float64')
- else:
- weight_data = self._get_data_from_grid(grid, self._weight).astype('float64')
- if zero_out: weight_data[grid.child_indices] = 0
+ if zero_out: weight_data[grid.child_indices] = 0
+ masked_data = [fd.astype('float64') * weight_data
+ for fd in self._get_data_from_grid(grid, fields)]
+ else:
+ fields_to_get = list(set(fields + [self._weight]))
+ field_data = dict(zip(
+ fields_to_get, self._get_data_from_grid(grid, fields_to_get)))
+ weight_data = field_data[self._weight].copy().astype('float64')
+ if zero_out: weight_data[grid.child_indices] = 0
+ masked_data = [field_data[field].copy().astype('float64') * weight_data
+ for field in fields]
+ del field_data
# if we zero it out here, then we only have to zero out the weight!
- masked_data = [self._get_data_from_grid(grid, field) * weight_data
- for field in fields]
- full_proj = [self.func(field,axis=self.axis) for field in masked_data]
- weight_proj = self.func(weight_data,axis=self.axis)
+ full_proj = [self.func(field, axis=self.axis) for field in masked_data]
+ weight_proj = self.func(weight_data, axis=self.axis)
if (self._check_region and not self.source._is_fully_enclosed(grid)) or self._field_cuts is not None:
used_data = self._get_points_in_region(grid).astype('bool')
used_points = na.where(na.logical_or.reduce(used_data, self.axis))
@@ -1464,12 +1475,13 @@
return point_mask
@restore_grid_state
- def _get_data_from_grid(self, grid, field):
+ def _get_data_from_grid(self, grid, fields):
+ fields = ensure_list(fields)
if self._check_region:
bad_points = self._get_points_in_region(grid)
else:
bad_points = 1.0
- return grid[field] * bad_points
+ return [grid[field] * bad_points for field in fields]
def _gen_node_name(self):
return "%s/%s" % \
@@ -1632,10 +1644,6 @@
for field in fields_to_get:
if self.data.has_key(field):
continue
- mylog.info("Getting field %s from %s", field, len(self._grids))
- if field not in self.hierarchy.field_list and not in_grids:
- if self._generate_field(field):
- continue # True means we already assigned it
# There are a lot of 'ands' here, but I think they are all
# necessary.
if force_particle_read == False and \
@@ -1644,6 +1652,10 @@
self.pf.h.io._particle_reader:
self[field] = self.particles[field]
continue
+ mylog.info("Getting field %s from %s", field, len(self._grids))
+ if field not in self.hierarchy.field_list and not in_grids:
+ if self._generate_field(field):
+ continue # True means we already assigned it
self[field] = na.concatenate(
[self._get_data_from_grid(grid, field)
for grid in self._grids])
Modified: trunk/yt/lagos/Cosmology.py
==============================================================================
--- trunk/yt/lagos/Cosmology.py (original)
+++ trunk/yt/lagos/Cosmology.py Tue May 18 12:36:46 2010
@@ -116,6 +116,12 @@
def InverseExpansionFactor(self,z):
return 1 / self.ExpansionFactor(z)
+ def PathLengthFunction(self,z):
+ return ((1 + z)**2) * self.InverseExpansionFactor(z)
+
+ def PathLength(self, z_i, z_f):
+ return romberg(self.PathLengthFunction, z_i, z_f)
+
def sqr(x):
return (x**2.0)
Modified: trunk/yt/lagos/HaloFinding.py
==============================================================================
--- trunk/yt/lagos/HaloFinding.py (original)
+++ trunk/yt/lagos/HaloFinding.py Tue May 18 12:36:46 2010
@@ -41,7 +41,7 @@
from kd import *
from yt.funcs import *
-import math, sys, itertools, gc
+import math, sys, itertools, gc, random
from collections import defaultdict
TINY = 1.e-40
@@ -299,7 +299,7 @@
Return the HOP-identified maximum density.
"""
if self.max_dens_point is not None:
- return self._max_dens[self.id][0]
+ return self.max_dens_point[0]
max = self._mpi_allmax(self._max_dens[self.id][0])
return max
Modified: trunk/yt/lagos/hop/EnzoHop.c
==============================================================================
--- trunk/yt/lagos/hop/EnzoHop.c (original)
+++ trunk/yt/lagos/hop/EnzoHop.c Tue May 18 12:36:46 2010
@@ -137,11 +137,11 @@
PyArray_DescrFromType(NPY_FLOAT64));
fprintf(stdout, "Copying arrays for %d particles\n", num_particles);
- kd->np_masses = mass;
- kd->np_pos[0] = xpos;
- kd->np_pos[1] = ypos;
- kd->np_pos[2] = zpos;
- kd->np_densities = particle_density;
+ kd->np_masses = (npy_float64*) mass->data;
+ kd->np_pos[0] = (npy_float64*) xpos->data;
+ kd->np_pos[1] = (npy_float64*) ypos->data;
+ kd->np_pos[2] = (npy_float64*) zpos->data;
+ kd->np_densities = (npy_float64*) particle_density->data;
kd->totalmass = totalmass;
for (i = 0; i < num_particles; i++) kd->p[i].np_index = i;
@@ -274,11 +274,11 @@
totalmass /= normalize_to;
- self->kd->np_masses = self->mass;
- self->kd->np_pos[0] = self->xpos;
- self->kd->np_pos[1] = self->ypos;
- self->kd->np_pos[2] = self->zpos;
- self->kd->np_densities = self->densities;
+ self->kd->np_masses = (npy_float64 *)self->mass->data;
+ self->kd->np_pos[0] = (npy_float64 *)self->xpos->data;
+ self->kd->np_pos[1] = (npy_float64 *)self->ypos->data;
+ self->kd->np_pos[2] = (npy_float64 *)self->zpos->data;
+ self->kd->np_densities = (npy_float64 *)self->densities->data;
self->kd->totalmass = totalmass;
PrepareKD(self->kd);
Modified: trunk/yt/lagos/hop/hop_numpy.h
==============================================================================
--- trunk/yt/lagos/hop/hop_numpy.h (original)
+++ trunk/yt/lagos/hop/hop_numpy.h Tue May 18 12:36:46 2010
@@ -3,10 +3,10 @@
#include "numpy/ndarrayobject.h"
#define NP_DENS(kd, in) \
- (*(npy_float64*)PyArray_GETPTR1(kd->np_densities, kd->p[in].np_index))
+ kd->np_densities[kd->p[in].np_index]
#define NP_POS(kd, in, dim) \
- (*(npy_float64*)PyArray_GETPTR1(kd->np_pos[dim], kd->p[in].np_index))
+ kd->np_pos[dim][kd->p[in].np_index]
#define NP_MASS(kd, in) \
- (*(npy_float64*)PyArray_GETPTR1(kd->np_masses, kd->p[in].np_index))/kd->totalmass
+ (kd->np_masses[kd->p[in].np_index]/kd->totalmass)
#endif
Modified: trunk/yt/lagos/hop/kd.h
==============================================================================
--- trunk/yt/lagos/hop/kd.h (original)
+++ trunk/yt/lagos/hop/kd.h Tue May 18 12:36:46 2010
@@ -91,9 +91,9 @@
KDN *kdNodes;
int uSecond;
int uMicro;
- PyArrayObject *np_densities;
- PyArrayObject *np_pos[3];
- PyArrayObject *np_masses;
+ npy_float64 *np_densities;
+ npy_float64 *np_pos[3];
+ npy_float64 *np_masses;
float totalmass;
} * KD;
Modified: trunk/yt/raven/Callbacks.py
==============================================================================
--- trunk/yt/raven/Callbacks.py (original)
+++ trunk/yt/raven/Callbacks.py Tue May 18 12:36:46 2010
@@ -734,7 +734,7 @@
dx = (plot.xlim[1] - plot.xlim[0])/nx
dy = (plot.ylim[1] - plot.ylim[0])/ny
- unit_conversion = plot.data.hierarchy[plot.im["Unit"]]
+ unit_conversion = plot.pf[plot.im["Unit"]]
aspect = (plot.xlim[1]-plot.xlim[0])/(plot.ylim[1]-plot.ylim[0])
print "aspect ratio = ", aspect
More information about the yt-svn
mailing list