[Yt-svn] commit/yt: 11 new changesets
Bitbucket
commits-noreply at bitbucket.org
Wed Nov 9 12:23:07 PST 2011
11 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/948ed53bbde3/
changeset: 948ed53bbde3
branch: yt
user: Sam Skillman
date: 2011-09-12 22:32:40
summary: Modify move_to to accept yield_snapshots (default True) keyword. If False, can be used to move the camera without taking a snapshot, which is useful.
affected #: 1 file
diff -r 03c0d3cc2e1bd5e4e80b122fa95b0990b868f2a8 -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -413,7 +413,7 @@
self.zoom(f)
yield self.snapshot()
- def move_to(self, final, n_steps, final_width=None):
+ def move_to(self, final, n_steps, final_width=None, yield_snapshots=True):
r"""Loop over a look_at
This will yield `n_steps` snapshots until the current view has been
@@ -439,13 +439,15 @@
dW = None
if final_width is not None:
if not iterable(final_width):
- width = na.array([final_width, final_width, final_width]) # front/back, left/right, top/bottom
+ final_width = na.array([final_width, final_width, final_width]) # front/back, left/right, top/bottom
dW = (1.0*final_width-na.array(self.width))/n_steps
dx = (na.array(final)-self.center)*1.0/n_steps
for i in xrange(n_steps):
self.switch_view(center=self.center+dx, width=self.width+dW)
- yield self.snapshot()
-
+ if yield_snapshots:
+ yield self.snapshot()
+ else:
+ yield self
def rotate(self, theta, rot_vector=None):
r"""Rotate by a given angle
https://bitbucket.org/yt_analysis/yt/changeset/6155d020b12e/
changeset: 6155d020b12e
branch: yt
user: Sam Skillman
date: 2011-09-12 22:33:24
summary: merge.
affected #: 6 files
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -29,14 +29,15 @@
"""
from .data_structures import \
- ChomboGrid, \
- ChomboHierarchy, \
- ChomboStaticOutput
+ GDFGrid, \
+ GDFHierarchy, \
+ GDFStaticOutput
from .fields import \
- ChomboFieldContainer, \
- ChomboFieldInfo, \
- add_chombo_field
+ GDFFieldContainer, \
+ GDFFieldInfo, \
+ add_gdf_field
from .io import \
- IOHandlerChomboHDF5
+ IOHandlerGDFHDF5
+
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -24,6 +24,9 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import h5py
+import numpy as na
+import weakref
from yt.funcs import *
from yt.data_objects.grid_patch import \
AMRGridPatch
@@ -33,6 +36,7 @@
StaticOutput
from .fields import GDFFieldContainer
+import pdb
class GDFGrid(AMRGridPatch):
_id_offset = 0
@@ -58,6 +62,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # pdb.set_trace()
self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
class GDFHierarchy(AMRHierarchy):
@@ -66,6 +71,7 @@
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
+ self.data_style = data_style
# for now, the hierarchy file is the parameter file!
self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
@@ -78,8 +84,7 @@
pass
def _detect_fields(self):
- ncomp = int(self._fhandle['/'].attrs['num_components'])
- self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+ self.field_list = self._fhandle['field_types'].keys()
def _setup_classes(self):
dd = self._get_data_reader_dict()
@@ -87,9 +92,7 @@
self.object_types.sort()
def _count_grids(self):
- self.num_grids = 0
- for lev in self._levels:
- self.num_grids += self._fhandle[lev]['Processors'].len()
+ self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
def _parse_hierarchy(self):
f = self._fhandle # shortcut
@@ -98,24 +101,22 @@
# 'Chombo_global'
levels = f.listnames()[1:]
self.grids = []
- i = 0
- for lev in levels:
- level_number = int(re.match('level_(\d+)',lev).groups()[0])
- boxes = f[lev]['boxes'].value
- dx = f[lev].attrs['dx']
- for level_id, box in enumerate(boxes):
- si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
- ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
- pg = self.grid(len(self.grids),self,level=level_number,
- start = si, stop = ei)
- self.grids.append(pg)
- self.grids[-1]._level_id = level_id
- self.grid_left_edge[i] = dx*si.astype(self.float_type)
- self.grid_right_edge[i] = dx*(ei.astype(self.float_type) + 1)
- self.grid_particle_count[i] = 0
- self.grid_dimensions[i] = ei - si + 1
- i += 1
+ for i, grid in enumerate(f['data'].keys()):
+ self.grids.append(self.grid(i, self, f['grid_level'][i],
+ f['grid_left_index'][i],
+ f['grid_dimensions'][i]))
+ self.grids[-1]._level_id = f['grid_level'][i]
+
+ dx = (self.parameter_file.domain_right_edge-
+ self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+ dx = dx/self.parameter_file.refine_by**(f['grid_level'][:])
+
+ self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
+ self.grid_dimensions = f['grid_dimensions'][:]
+ self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+ self.grid_particle_count = f['grid_particle_count'][:]
self.grids = na.array(self.grids, dtype='object')
+ # pdb.set_trace()
def _populate_grid_objects(self):
for g in self.grids:
@@ -147,11 +148,9 @@
def __init__(self, filename, data_style='grid_data_format',
storage_filename = None):
StaticOutput.__init__(self, filename, data_style)
- self._handle = h5py.File(self.filename, "r")
self.storage_filename = storage_filename
+ self.filename = filename
self.field_info = self._fieldinfo_class()
- self._handle.close()
- del self._handle
def _set_units(self):
"""
@@ -168,16 +167,20 @@
self.time_units['years'] = seconds / (365*3600*24.0)
self.time_units['days'] = seconds / (3600*24.0)
# This should be improved.
+ self._handle = h5py.File(self.parameter_filename, "r")
for field_name in self._handle["/field_types"]:
- self.units[field_name] = self._handle["/%s/field_to_cgs" % field_name]
-
+ self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+ del self._handle
+
def _parse_parameter_file(self):
+ self._handle = h5py.File(self.parameter_filename, "r")
sp = self._handle["/simulation_parameters"].attrs
self.domain_left_edge = sp["domain_left_edge"][:]
self.domain_right_edge = sp["domain_right_edge"][:]
- self.refine_by = sp["refine_by"][:]
- self.dimensionality = sp["dimensionality"][:]
- self.current_time = sp["current_time"][:]
+ self.domain_dimensions = sp["domain_dimensions"][:]
+ self.refine_by = sp["refine_by"]
+ self.dimensionality = sp["dimensionality"]
+ self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
if sp["num_ghost_zones"] != 0: raise RuntimeError
@@ -191,7 +194,8 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
-
+ del self._handle
+
@classmethod
def _is_valid(self, *args, **kwargs):
try:
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,5 +1,5 @@
"""
-Chombo-specific fields
+GDF-specific fields
Author: J. S. Oishi <jsoishi at gmail.com>
Affiliation: KIPAC/SLAC/Stanford
@@ -32,82 +32,45 @@
ValidateGridType
import yt.data_objects.universal_fields
-class ChomboFieldContainer(CodeFieldInfoContainer):
+class GDFFieldContainer(CodeFieldInfoContainer):
_shared_state = {}
_field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
-add_chombo_field = ChomboFieldInfo.add_field
+GDFFieldInfo = GDFFieldContainer()
+add_gdf_field = GDFFieldInfo.add_field
-add_field = add_chombo_field
+add_field = add_gdf_field
add_field("density", function=lambda a,b: None, take_log=True,
validators = [ValidateDataField("density")],
units=r"\rm{g}/\rm{cm}^3")
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+GDFFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("X-Momentum")],
- units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_field("specific_energy", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("specific_energy")],
+ units=r"\rm{erg}/\rm{g}")
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Y-Momentum")],
- units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_field("velocity_x", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_x")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Z-Momentum")],
- units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_field("velocity_y", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_y")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("X-Magnfield")],
- units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_field("velocity_z", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_z")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Y-Magnfield")],
- units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_field("mag_field_x", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_x")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Z-Magnfield")],
- units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_field("mag_field_y", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_y")],
+ units=r"\rm{cm}/\rm{s}")
-def _MagneticEnergy(field,data):
- return (data["X-magnfield"]**2 +
- data["Y-magnfield"]**2 +
- data["Z-magnfield"]**2)/2.
-add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
- units=r"",display_name=r"B^2/8\pi")
-ChomboFieldInfo["MagneticEnergy"]._projected_units=r""
-
-def _xVelocity(field, data):
- """generate x-velocity from x-momentum and density
-
- """
- return data["X-momentum"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
-
-def _yVelocity(field,data):
- """generate y-velocity from y-momentum and density
-
- """
- #try:
- # return data["xvel"]
- #except KeyError:
- return data["Y-momentum"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field,data):
- """generate z-velocity from z-momentum and density
-
- """
- return data["Z-momentum"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
+add_field("mag_field_z", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_z")],
+ units=r"\rm{cm}/\rm{s}")
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -25,44 +25,48 @@
"""
from yt.utilities.io_handler import \
BaseIOHandler
+import h5py
-class IOHandlerChomboHDF5(BaseIOHandler):
- _data_style = "chombo_hdf5"
+class IOHandlerGDFHDF5(BaseIOHandler):
+ _data_style = "grid_data_format"
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
def _field_dict(self,fhandle):
- ncomp = int(fhandle['/'].attrs['num_components'])
- temp = fhandle['/'].attrs.listitems()[-ncomp:]
- val, keys = zip(*temp)
- val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+ keys = fhandle['field_types'].keys()
+ val = fhandle['field_types'].keys()
+ # ncomp = int(fhandle['/'].attrs['num_components'])
+ # temp = fhandle['/'].attrs.listitems()[-ncomp:]
+ # val, keys = zip(*temp)
+ # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
return dict(zip(keys,val))
def _read_field_names(self,grid):
fhandle = h5py.File(grid.filename,'r')
- ncomp = int(fhandle['/'].attrs['num_components'])
-
- return [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+ return fhandle['field_types'].keys()
def _read_data_set(self,grid,field):
fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+ return fhandle['/data/grid_%010i/'%grid.id+field][:]
+ # field_dict = self._field_dict(fhandle)
+ # lstring = 'level_%i' % grid.Level
+ # lev = fhandle[lstring]
+ # dims = grid.ActiveDimensions
+ # boxsize = dims.prod()
+
+ # grid_offset = lev[self._offset_string][grid._level_id]
+ # start = grid_offset+field_dict[field]*boxsize
+ # stop = start + boxsize
+ # data = lev[self._data_string][start:stop]
- field_dict = self._field_dict(fhandle)
- lstring = 'level_%i' % grid.Level
- lev = fhandle[lstring]
- dims = grid.ActiveDimensions
- boxsize = dims.prod()
-
- grid_offset = lev[self._offset_string][grid._level_id]
- start = grid_offset+field_dict[field]*boxsize
- stop = start + boxsize
- data = lev[self._data_string][start:stop]
-
- return data.reshape(dims, order='F')
+ # return data.reshape(dims, order='F')
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
- return self._read_data_set(grid,field)[sl]
+ fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+ return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
+ # return self._read_data_set(grid,field)[sl]
+
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -6,6 +6,7 @@
config = Configuration('frontends',parent_package,top_path)
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
+ config.add_subpackage("gdf")
config.add_subpackage("chombo")
config.add_subpackage("enzo")
config.add_subpackage("flash")
diff -r 948ed53bbde3fe0fa9f55eeeca4570ff21bad9bc -r 6155d020b12e93c80314d9bbe7969f668ead15e5 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -77,6 +77,9 @@
from yt.frontends.chombo.api import \
ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
+from yt.frontends.gdf.api import \
+ GDFStaticOutput, GDFFieldInfo, add_gdf_field
+
from yt.frontends.art.api import \
ARTStaticOutput, ARTFieldInfo, add_art_field
https://bitbucket.org/yt_analysis/yt/changeset/e743b0ef27f1/
changeset: e743b0ef27f1
branch: yt
user: samskillman
date: 2011-09-24 20:29:24
summary: Merging
affected #: 7 files
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -29,14 +29,15 @@
"""
from .data_structures import \
- ChomboGrid, \
- ChomboHierarchy, \
- ChomboStaticOutput
+ GDFGrid, \
+ GDFHierarchy, \
+ GDFStaticOutput
from .fields import \
- ChomboFieldContainer, \
- ChomboFieldInfo, \
- add_chombo_field
+ GDFFieldContainer, \
+ GDFFieldInfo, \
+ add_gdf_field
from .io import \
- IOHandlerChomboHDF5
+ IOHandlerGDFHDF5
+
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -24,6 +24,9 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import h5py
+import numpy as na
+import weakref
from yt.funcs import *
from yt.data_objects.grid_patch import \
AMRGridPatch
@@ -33,6 +36,7 @@
StaticOutput
from .fields import GDFFieldContainer
+import pdb
class GDFGrid(AMRGridPatch):
_id_offset = 0
@@ -58,6 +62,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ # pdb.set_trace()
self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
class GDFHierarchy(AMRHierarchy):
@@ -66,6 +71,7 @@
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
+ self.data_style = data_style
# for now, the hierarchy file is the parameter file!
self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
@@ -78,8 +84,7 @@
pass
def _detect_fields(self):
- ncomp = int(self._fhandle['/'].attrs['num_components'])
- self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+ self.field_list = self._fhandle['field_types'].keys()
def _setup_classes(self):
dd = self._get_data_reader_dict()
@@ -87,9 +92,7 @@
self.object_types.sort()
def _count_grids(self):
- self.num_grids = 0
- for lev in self._levels:
- self.num_grids += self._fhandle[lev]['Processors'].len()
+ self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
def _parse_hierarchy(self):
f = self._fhandle # shortcut
@@ -98,24 +101,22 @@
# 'Chombo_global'
levels = f.listnames()[1:]
self.grids = []
- i = 0
- for lev in levels:
- level_number = int(re.match('level_(\d+)',lev).groups()[0])
- boxes = f[lev]['boxes'].value
- dx = f[lev].attrs['dx']
- for level_id, box in enumerate(boxes):
- si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
- ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
- pg = self.grid(len(self.grids),self,level=level_number,
- start = si, stop = ei)
- self.grids.append(pg)
- self.grids[-1]._level_id = level_id
- self.grid_left_edge[i] = dx*si.astype(self.float_type)
- self.grid_right_edge[i] = dx*(ei.astype(self.float_type) + 1)
- self.grid_particle_count[i] = 0
- self.grid_dimensions[i] = ei - si + 1
- i += 1
+ for i, grid in enumerate(f['data'].keys()):
+ self.grids.append(self.grid(i, self, f['grid_level'][i],
+ f['grid_left_index'][i],
+ f['grid_dimensions'][i]))
+ self.grids[-1]._level_id = f['grid_level'][i]
+
+ dx = (self.parameter_file.domain_right_edge-
+ self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+ dx = dx/self.parameter_file.refine_by**(f['grid_level'][:])
+
+ self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
+ self.grid_dimensions = f['grid_dimensions'][:]
+ self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+ self.grid_particle_count = f['grid_particle_count'][:]
self.grids = na.array(self.grids, dtype='object')
+ # pdb.set_trace()
def _populate_grid_objects(self):
for g in self.grids:
@@ -147,11 +148,9 @@
def __init__(self, filename, data_style='grid_data_format',
storage_filename = None):
StaticOutput.__init__(self, filename, data_style)
- self._handle = h5py.File(self.filename, "r")
self.storage_filename = storage_filename
+ self.filename = filename
self.field_info = self._fieldinfo_class()
- self._handle.close()
- del self._handle
def _set_units(self):
"""
@@ -168,16 +167,20 @@
self.time_units['years'] = seconds / (365*3600*24.0)
self.time_units['days'] = seconds / (3600*24.0)
# This should be improved.
+ self._handle = h5py.File(self.parameter_filename, "r")
for field_name in self._handle["/field_types"]:
- self.units[field_name] = self._handle["/%s/field_to_cgs" % field_name]
-
+ self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+ del self._handle
+
def _parse_parameter_file(self):
+ self._handle = h5py.File(self.parameter_filename, "r")
sp = self._handle["/simulation_parameters"].attrs
self.domain_left_edge = sp["domain_left_edge"][:]
self.domain_right_edge = sp["domain_right_edge"][:]
- self.refine_by = sp["refine_by"][:]
- self.dimensionality = sp["dimensionality"][:]
- self.current_time = sp["current_time"][:]
+ self.domain_dimensions = sp["domain_dimensions"][:]
+ self.refine_by = sp["refine_by"]
+ self.dimensionality = sp["dimensionality"]
+ self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
if sp["num_ghost_zones"] != 0: raise RuntimeError
@@ -191,7 +194,8 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
-
+ del self._handle
+
@classmethod
def _is_valid(self, *args, **kwargs):
try:
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,5 +1,5 @@
"""
-Chombo-specific fields
+GDF-specific fields
Author: J. S. Oishi <jsoishi at gmail.com>
Affiliation: KIPAC/SLAC/Stanford
@@ -32,82 +32,45 @@
ValidateGridType
import yt.data_objects.universal_fields
-class ChomboFieldContainer(CodeFieldInfoContainer):
+class GDFFieldContainer(CodeFieldInfoContainer):
_shared_state = {}
_field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
-add_chombo_field = ChomboFieldInfo.add_field
+GDFFieldInfo = GDFFieldContainer()
+add_gdf_field = GDFFieldInfo.add_field
-add_field = add_chombo_field
+add_field = add_gdf_field
add_field("density", function=lambda a,b: None, take_log=True,
validators = [ValidateDataField("density")],
units=r"\rm{g}/\rm{cm}^3")
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+GDFFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("X-Momentum")],
- units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_field("specific_energy", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("specific_energy")],
+ units=r"\rm{erg}/\rm{g}")
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Y-Momentum")],
- units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_field("velocity_x", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_x")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Z-Momentum")],
- units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_field("velocity_y", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_y")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("X-Magnfield")],
- units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_field("velocity_z", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("velocity_z")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Y-Magnfield")],
- units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_field("mag_field_x", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_x")],
+ units=r"\rm{cm}/\rm{s}")
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
- validators = [ValidateDataField("Z-Magnfield")],
- units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_field("mag_field_y", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_y")],
+ units=r"\rm{cm}/\rm{s}")
-def _MagneticEnergy(field,data):
- return (data["X-magnfield"]**2 +
- data["Y-magnfield"]**2 +
- data["Z-magnfield"]**2)/2.
-add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
- units=r"",display_name=r"B^2/8\pi")
-ChomboFieldInfo["MagneticEnergy"]._projected_units=r""
-
-def _xVelocity(field, data):
- """generate x-velocity from x-momentum and density
-
- """
- return data["X-momentum"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
-
-def _yVelocity(field,data):
- """generate y-velocity from y-momentum and density
-
- """
- #try:
- # return data["xvel"]
- #except KeyError:
- return data["Y-momentum"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field,data):
- """generate z-velocity from z-momentum and density
-
- """
- return data["Z-momentum"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
- units=r'\rm{cm}/\rm{s}')
+add_field("mag_field_z", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("mag_field_z")],
+ units=r"\rm{cm}/\rm{s}")
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -25,44 +25,48 @@
"""
from yt.utilities.io_handler import \
BaseIOHandler
+import h5py
-class IOHandlerChomboHDF5(BaseIOHandler):
- _data_style = "chombo_hdf5"
+class IOHandlerGDFHDF5(BaseIOHandler):
+ _data_style = "grid_data_format"
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
def _field_dict(self,fhandle):
- ncomp = int(fhandle['/'].attrs['num_components'])
- temp = fhandle['/'].attrs.listitems()[-ncomp:]
- val, keys = zip(*temp)
- val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+ keys = fhandle['field_types'].keys()
+ val = fhandle['field_types'].keys()
+ # ncomp = int(fhandle['/'].attrs['num_components'])
+ # temp = fhandle['/'].attrs.listitems()[-ncomp:]
+ # val, keys = zip(*temp)
+ # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
return dict(zip(keys,val))
def _read_field_names(self,grid):
fhandle = h5py.File(grid.filename,'r')
- ncomp = int(fhandle['/'].attrs['num_components'])
-
- return [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+ return fhandle['field_types'].keys()
def _read_data_set(self,grid,field):
fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+ return fhandle['/data/grid_%010i/'%grid.id+field][:]
+ # field_dict = self._field_dict(fhandle)
+ # lstring = 'level_%i' % grid.Level
+ # lev = fhandle[lstring]
+ # dims = grid.ActiveDimensions
+ # boxsize = dims.prod()
+
+ # grid_offset = lev[self._offset_string][grid._level_id]
+ # start = grid_offset+field_dict[field]*boxsize
+ # stop = start + boxsize
+ # data = lev[self._data_string][start:stop]
- field_dict = self._field_dict(fhandle)
- lstring = 'level_%i' % grid.Level
- lev = fhandle[lstring]
- dims = grid.ActiveDimensions
- boxsize = dims.prod()
-
- grid_offset = lev[self._offset_string][grid._level_id]
- start = grid_offset+field_dict[field]*boxsize
- stop = start + boxsize
- data = lev[self._data_string][start:stop]
-
- return data.reshape(dims, order='F')
+ # return data.reshape(dims, order='F')
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
- return self._read_data_set(grid,field)[sl]
+ fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+ return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
+ # return self._read_data_set(grid,field)[sl]
+
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -6,6 +6,7 @@
config = Configuration('frontends',parent_package,top_path)
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
+ config.add_subpackage("gdf")
config.add_subpackage("chombo")
config.add_subpackage("enzo")
config.add_subpackage("flash")
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -77,6 +77,9 @@
from yt.frontends.chombo.api import \
ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
+from yt.frontends.gdf.api import \
+ GDFStaticOutput, GDFFieldInfo, add_gdf_field
+
from yt.frontends.art.api import \
ARTStaticOutput, ARTFieldInfo, add_art_field
diff -r a16954f66aefb4382c5a4f1f821d5e5635ea4417 -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -413,7 +413,7 @@
self.zoom(f)
yield self.snapshot()
- def move_to(self, final, n_steps, final_width=None):
+ def move_to(self, final, n_steps, final_width=None, yield_snapshots=True):
r"""Loop over a look_at
This will yield `n_steps` snapshots until the current view has been
@@ -439,13 +439,15 @@
dW = None
if final_width is not None:
if not iterable(final_width):
- width = na.array([final_width, final_width, final_width]) # front/back, left/right, top/bottom
+ final_width = na.array([final_width, final_width, final_width]) # front/back, left/right, top/bottom
dW = (1.0*final_width-na.array(self.width))/n_steps
dx = (na.array(final)-self.center)*1.0/n_steps
for i in xrange(n_steps):
self.switch_view(center=self.center+dx, width=self.width+dW)
- yield self.snapshot()
-
+ if yield_snapshots:
+ yield self.snapshot()
+ else:
+ yield self
def rotate(self, theta, rot_vector=None):
r"""Rotate by a given angle
https://bitbucket.org/yt_analysis/yt/changeset/4cdba5efd298/
changeset: 4cdba5efd298
branch: yt
user: samskillman
date: 2011-09-24 20:33:26
summary: Adding a translation dictionary for gdf fields so that things like
Density are picked up, and VelocityMagnitude works. Current Status of
GDF, as tested by converting a unigrid Athena dataset:
Slices - Works, though some pixels are not being filled in some situations, leading to zeros.
Projections - Works
Rendering - Works with no_ghost=True
Phase Diagrams - Works
Still work to do:
Ghost zones are not working
Multiple grid datsets have not been tested.
CGS Conversion
affected #: 2 files
diff -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 -r 4cdba5efd298e38896efacee3b0054ae13a6d036 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -162,7 +162,8 @@
self._parse_parameter_file()
self.time_units['1'] = 1
self.units['1'] = 1.0
- self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_right_edge).max()
+ self.units['cm'] = 1.0
+ self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
seconds = 1
self.time_units['years'] = seconds / (365*3600*24.0)
self.time_units['days'] = seconds / (3600*24.0)
@@ -206,4 +207,6 @@
pass
return False
-
+ def __repr__(self):
+ return self.basename.rsplit(".", 1)[0]
+
diff -r e743b0ef27f1a0558ee54782aadfa762a7e1c8b7 -r 4cdba5efd298e38896efacee3b0054ae13a6d036 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -32,6 +32,13 @@
ValidateGridType
import yt.data_objects.universal_fields
+log_translation_dict = {"Density": "density",
+ "Pressure": "pressure"}
+
+translation_dict = {"x-velocity": "velocity_x",
+ "y-velocity": "velocity_y",
+ "z-velocity": "velocity_z"}
+
class GDFFieldContainer(CodeFieldInfoContainer):
_shared_state = {}
_field_list = {}
@@ -50,27 +57,45 @@
validators = [ValidateDataField("specific_energy")],
units=r"\rm{erg}/\rm{g}")
-add_field("velocity_x", function=lambda a,b: None, take_log=True,
+add_field("pressure", function=lambda a,b: None, take_log=True,
+ validators = [ValidateDataField("pressure")],
+ units=r"\rm{erg}/\rm{g}")
+
+add_field("velocity_x", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("velocity_x")],
units=r"\rm{cm}/\rm{s}")
-add_field("velocity_y", function=lambda a,b: None, take_log=True,
+add_field("velocity_y", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("velocity_y")],
units=r"\rm{cm}/\rm{s}")
-add_field("velocity_z", function=lambda a,b: None, take_log=True,
+add_field("velocity_z", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("velocity_z")],
units=r"\rm{cm}/\rm{s}")
-add_field("mag_field_x", function=lambda a,b: None, take_log=True,
+add_field("mag_field_x", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("mag_field_x")],
units=r"\rm{cm}/\rm{s}")
-add_field("mag_field_y", function=lambda a,b: None, take_log=True,
+add_field("mag_field_y", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("mag_field_y")],
units=r"\rm{cm}/\rm{s}")
-add_field("mag_field_z", function=lambda a,b: None, take_log=True,
+add_field("mag_field_z", function=lambda a,b: None, take_log=False,
validators = [ValidateDataField("mag_field_z")],
units=r"\rm{cm}/\rm{s}")
-
+
+def _get_alias(alias):
+ def _alias(field, data):
+ return data[alias]
+ return _alias
+
+def _generate_translation(mine, theirs ,log_field=True):
+ add_field(theirs, function=_get_alias(mine), take_log=log_field)
+
+for f,v in log_translation_dict.items():
+ _generate_translation(v, f, log_field=True)
+
+for f,v in translation_dict.items():
+ _generate_translation(v, f, log_field=False)
+
https://bitbucket.org/yt_analysis/yt/changeset/45551605b94d/
changeset: 45551605b94d
branch: yt
user: samskillman
date: 2011-09-26 19:14:01
summary: Adding an interactive camera object that displays the current rendering and transfer function. Needs work and documentation if it is found to be useful.
affected #: 2 files
diff -r 4cdba5efd298e38896efacee3b0054ae13a6d036 -r 45551605b94d266fa189971dfa75eee702d9b133 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -517,6 +517,62 @@
data_object_registry["camera"] = Camera
+class InteractiveCamera(Camera):
+ def __init__(self, center, normal_vector, width,
+ resolution, transfer_function,
+ north_vector = None, steady_north=False,
+ volume = None, fields = None,
+ log_fields = None,
+ sub_samples = 5, pf = None,
+ use_kd=True, l_max=None, no_ghost=True,
+ tree_type='domain',expand_factor=1.0,
+ le=None, re=None):
+ self.frames = []
+ Camera.__init__(self, center, normal_vector, width,
+ resolution, transfer_function,
+ north_vector = north_vector, steady_north=steady_north,
+ volume = volume, fields = fields,
+ log_fields = log_fields,
+ sub_samples = sub_samples, pf = pf,
+ use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+ tree_type=tree_type,expand_factor=expand_factor,
+ le=le, re=re)
+
+ def snapshot(self, fn = None, clip_ratio = None):
+ import matplotlib
+ matplotlib.pylab.figure(2)
+ self.transfer_function.show()
+ matplotlib.pylab.draw()
+ im = Camera.snapshot(self, fn, clip_ratio)
+ matplotlib.pylab.figure(1)
+ matplotlib.pylab.imshow(im/im.max())
+ matplotlib.pylab.draw()
+ self.frames.append(im)
+
+ def rotation(self, theta, n_steps, rot_vector=None):
+ for frame in Camera.rotation(self, theta, n_steps, rot_vector):
+ if frame is not None:
+ self.frames.append(frame)
+
+ def zoomin(self, final, n_steps):
+ for frame in Camera.zoomin(self, final, n_steps):
+ if frame is not None:
+ self.frames.append(frame)
+
+ def clear_frames(self):
+ del self.frames
+ self.frames = []
+
+ def save_frames(self, basename, clip_ratio=None):
+ for i, frame in enumerate(self.frames):
+ fn = basename + '_%04i.png'%i
+ if clip_ratio is not None:
+ write_bitmap(frame, fn, clip_ratio*image.std())
+ else:
+ write_bitmap(frame, fn)
+
+data_object_registry["interactive_camera"] = InteractiveCamera
+
class PerspectiveCamera(Camera):
def get_vector_plane(self, image):
# We should move away from pre-generation of vectors like this and into
diff -r 4cdba5efd298e38896efacee3b0054ae13a6d036 -r 45551605b94d266fa189971dfa75eee702d9b133 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -199,6 +199,28 @@
pylab.ylim(0.0, 1.0)
pylab.savefig(filename)
+ def show(self):
+ r"""Display an image of the transfer function
+
+ This function loads up matplotlib and displays the current transfer function.
+
+ Parameters
+ ----------
+
+ Examples
+ --------
+
+ >>> tf = TransferFunction( (-10.0, -5.0) )
+ >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+ >>> tf.show()
+ """
+ import matplotlib;import pylab
+ pylab.clf()
+ pylab.plot(self.x, self.y, 'xk-')
+ pylab.xlim(*self.x_bounds)
+ pylab.ylim(0.0, 1.0)
+ pylab.draw()
+
class MultiVariateTransferFunction(object):
def __init__(self):
r"""This object constructs a set of field tables that allow for
@@ -447,6 +469,46 @@
ax.set_xlabel("Value")
pyplot.savefig(filename)
+ def show(self):
+ r"""Display an image of the transfer function
+
+ This function loads up matplotlib and displays the current transfer function.
+
+ Parameters
+ ----------
+
+ Examples
+ --------
+
+ >>> tf = TransferFunction( (-10.0, -5.0) )
+ >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+ >>> tf.show()
+ """
+ from matplotlib import pyplot
+ from matplotlib.ticker import FuncFormatter
+ pyplot.clf()
+ ax = pyplot.axes()
+ i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+ i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
+ i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
+ i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+ ax.imshow(i_data, origin='lower')
+ ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+ ax.set_xlim(0, self.alpha.x.size)
+ xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+ xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
+ ax.xaxis.set_ticks(xticks)
+ def x_format(x, pos):
+ return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
+ ax.xaxis.set_major_formatter(FuncFormatter(x_format))
+ yticks = na.linspace(0,1,5) * self.alpha.y.size
+ ax.yaxis.set_ticks(yticks)
+ def y_format(y, pos):
+ return (y / self.alpha.y.size)
+ ax.yaxis.set_major_formatter(FuncFormatter(y_format))
+ ax.set_ylabel("Transmission")
+ ax.set_xlabel("Value")
+
def sample_colormap(self, v, w, alpha=None, colormap="gist_stern", col_bounds=None):
r"""Add a Gaussian based on an existing colormap.
https://bitbucket.org/yt_analysis/yt/changeset/c894d8fce6b3/
changeset: c894d8fce6b3
branch: yt
user: samskillman
date: 2011-09-28 18:23:08
summary: Adding some modifications to the gdf frontend.
affected #: 2 files
diff -r 45551605b94d266fa189971dfa75eee702d9b133 -r c894d8fce6b3f3c3773da898db2e56449daa9764 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -95,11 +95,12 @@
self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
def _parse_hierarchy(self):
- f = self._fhandle # shortcut
+ f = self._fhandle
# this relies on the first Group in the H5 file being
# 'Chombo_global'
levels = f.listnames()[1:]
+ dxs=[]
self.grids = []
for i, grid in enumerate(f['data'].keys()):
self.grids.append(self.grid(i, self, f['grid_level'][i],
@@ -107,10 +108,11 @@
f['grid_dimensions'][i]))
self.grids[-1]._level_id = f['grid_level'][i]
- dx = (self.parameter_file.domain_right_edge-
- self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx = dx/self.parameter_file.refine_by**(f['grid_level'][:])
-
+ dx = (self.parameter_file.domain_right_edge-
+ self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+ dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+ dxs.append(dx)
+ dx = na.array(dxs)
self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
self.grid_dimensions = f['grid_dimensions'][:]
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
@@ -150,7 +152,7 @@
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
- self.field_info = self._fieldinfo_class()
+ self.field_info = self._fieldinfo_class()
def _set_units(self):
"""
@@ -185,6 +187,7 @@
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
if sp["num_ghost_zones"] != 0: raise RuntimeError
+ self.num_ghost_zones = sp["num_ghost_zones"]
self.field_ordering = sp["field_ordering"]
self.boundary_conditions = sp["boundary_conditions"][:]
if self.cosmological_simulation:
@@ -195,6 +198,8 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
+ self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+
del self._handle
@classmethod
diff -r 45551605b94d266fa189971dfa75eee702d9b133 -r c894d8fce6b3f3c3773da898db2e56449daa9764 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -39,6 +39,10 @@
"y-velocity": "velocity_y",
"z-velocity": "velocity_z"}
+# translation_dict = {"mag_field_x": "cell_centered_B_x ",
+# "mag_field_y": "cell_centered_B_y ",
+# "mag_field_z": "cell_centered_B_z "}
+
class GDFFieldContainer(CodeFieldInfoContainer):
_shared_state = {}
_field_list = {}
https://bitbucket.org/yt_analysis/yt/changeset/67fc87eea9cb/
changeset: 67fc87eea9cb
branch: yt
user: samskillman
date: 2011-11-01 20:54:46
summary: Merging.
affected #: 100 files
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -257,11 +257,12 @@
cd $1
if [ ! -z `echo $1 | grep h5py` ]
then
- echo "${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR}"
- ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ shift
+ ( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ else
+ shift
+ ( ${DEST_DIR}/bin/python2.7 setup.py build $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
fi
- shift
- ( ${DEST_DIR}/bin/python2.7 setup.py build $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
( ${DEST_DIR}/bin/python2.7 setup.py install 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
@@ -292,7 +293,7 @@
if [ -z "$HDF5_DIR" ]
then
echo "Downloading HDF5"
- get_enzotools hdf5-1.8.6.tar.gz
+ get_enzotools hdf5-1.8.7.tar.gz
fi
[ $INST_ZLIB -eq 1 ] && get_enzotools zlib-1.2.3.tar.bz2
@@ -300,17 +301,17 @@
[ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
[ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
[ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
-get_enzotools Python-2.7.1.tgz
-get_enzotools numpy-1.5.1.tar.gz
-get_enzotools matplotlib-1.0.0.tar.gz
+get_enzotools Python-2.7.2.tgz
+get_enzotools numpy-1.6.1.tar.gz
+get_enzotools matplotlib-1.1.0.tar.gz
get_enzotools mercurial-1.8.1.tar.gz
get_enzotools ipython-0.10.tar.gz
-get_enzotools h5py-1.3.1.tar.gz
-get_enzotools Cython-0.15.tar.gz
-get_enzotools Forthon-0.8.4.tar.gz
+get_enzotools h5py-2.0.1.tar.gz
+get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Forthon-0.8.5.tar.gz
get_enzotools ext-3.3.2.zip
get_enzotools ext-slate-110328.zip
-get_enzotools PhiloGL-1.1.0.zip
+get_enzotools PhiloGL-1.4.2.zip
if [ $INST_BZLIB -eq 1 ]
then
@@ -392,11 +393,11 @@
if [ -z "$HDF5_DIR" ]
then
- if [ ! -e hdf5-1.8.6/done ]
+ if [ ! -e hdf5-1.8.7/done ]
then
- [ ! -e hdf5-1.8.6 ] && tar xfz hdf5-1.8.6.tar.gz
+ [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
echo "Installing HDF5"
- cd hdf5-1.8.6
+ cd hdf5-1.8.7
( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
@@ -422,11 +423,11 @@
fi
fi
-if [ ! -e Python-2.7.1/done ]
+if [ ! -e Python-2.7.2/done ]
then
echo "Installing Python. This may take a while, but don't worry. YT loves you."
- [ ! -e Python-2.7.1 ] && tar xfz Python-2.7.1.tgz
- cd Python-2.7.1
+ [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
+ cd Python-2.7.2
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -488,7 +489,7 @@
echo "Installing pip"
( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
-do_setup_py numpy-1.5.1 ${NUMPY_ARGS}
+do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
if [ -n "${MPL_SUPP_LDFLAGS}" ]
then
@@ -509,10 +510,10 @@
echo "Setting CFLAGS ${CFLAGS}"
fi
# Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.0.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.0.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.0.0/setup.cfg
-do_setup_py matplotlib-1.0.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
+do_setup_py matplotlib-1.1.0
if [ -n "${OLD_LDFLAGS}" ]
then
export LDFLAG=${OLD_LDFLAGS}
@@ -521,9 +522,9 @@
[ -n "${OLD_CXXFLAGS}" ] && export CXXFLAGS=${OLD_CXXFLAGS}
[ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
do_setup_py ipython-0.10
-do_setup_py h5py-1.3.1
-do_setup_py Cython-0.15
-[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.4
+do_setup_py h5py-2.0.1
+do_setup_py Cython-0.15.1
+[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
MY_PWD=`pwd`
@@ -566,12 +567,12 @@
fi
# Now we open up PhiloGL
-if [ ! -e PhiloGL-1.1.0/done ]
+if [ ! -e PhiloGL-1.4.2/done ]
then
- ( unzip -o PhiloGL-1.1.0.zip 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( echo "Symlinking PhiloGL-1.1.0 as PhiloGL" 2>&1 ) 1>> ${LOG_FILE}
- ln -sf PhiloGL-1.1.0 PhiloGL
- touch PhiloGL-1.1.0/done
+ ( unzip -o PhiloGL-1.4.2.zip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( echo "Symlinking PhiloGL-1.4.2 as PhiloGL" 2>&1 ) 1>> ${LOG_FILE}
+ ln -sf PhiloGL-1.4.2 PhiloGL
+ touch PhiloGL-1.4.2/done
fi
if [ -e $HOME/.matplotlib/fontList.cache ] && \
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -150,7 +150,7 @@
return self[self._key_numbers[key]]
return UserDict.__getitem__(self, key)
def __iter__(self):
- return itertools.chain(self.data.iterkeys(),
+ return itertools.chain(self.field_data.iterkeys(),
self._key_numbers.iterkeys())
def __repr__(self):
s = "{" + ", \n ".join(
@@ -158,9 +158,9 @@
for i in sorted(self._key_numbers)]) + "}"
return s
def has_key(self, key):
- return self.data.has_key(key) or self._key_numbers.has_key(key)
+ return self.field_data.has_key(key) or self._key_numbers.has_key(key)
def keys(self):
- return self.data.key(key) + self._key_numbers.key(key)
+ return self.field_data.key(key) + self._key_numbers.key(key)
pfs = ParameterFileDict()
pcs = []
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da tests/halos.py
--- /dev/null
+++ b/tests/halos.py
@@ -0,0 +1,10 @@
+from yt.utilities.answer_testing.output_tests import \
+ SingleOutputTest, create_test
+from yt.utilities.answer_testing.halo_tests import \
+ TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
+
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
+
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
+
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -3,7 +3,7 @@
from yt.utilities.answer_testing.output_tests import \
YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
+from yt.funcs import ensure_list, iterable
from fields_to_test import field_list, particle_field_list
class FieldHashesDontMatch(RegressionTestException):
@@ -16,26 +16,50 @@
return func
@register_object
-def centered_sphere(self):
- center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
- width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
- self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+ center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+ width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+ tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
@register_object
-def off_centered_sphere(self):
- center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
- width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
- self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+ center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+ width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+ tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
@register_object
-def corner_sphere(self):
- width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
- self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+ width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+ tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
@register_object
+def disk(self):
+ center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+ radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+ height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+ normal = na.array([1.]*3)
+ self.data_object = self.pf.h.disk(center, normal, radius, height)
+
+ at register_object
def all_data(self):
self.data_object = self.pf.h.all_data()
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+ for object_name in known_objects:
+ def _rfunc(oname, fname):
+ def func(tobj):
+ known_objects[oname](tobj)
+ tobj.orig_data_object = tobj.data_object
+ avg_value = tobj.orig_data_object.quantities[
+ "WeightedAverageQuantity"](fname, "Density")
+ tobj.data_object = tobj.orig_data_object.cut_region(
+ ["grid['%s'] > %s" % (fname, avg_value)])
+ return func
+ _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+ _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
+
class YTFieldValuesTest(YTStaticOutputTest):
def run(self):
vals = self.data_object[self.field].copy()
@@ -51,6 +75,69 @@
for object_name in known_objects:
for field in field_list + particle_field_list:
+ if "cut_region" in object_name and field in particle_field_list:
+ continue
create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
field = field, object_name = object_name)
+
+class YTDerivedQuantityTest(YTStaticOutputTest):
+ def setup(self):
+ YTStaticOutputTest.setup(self)
+ known_objects[self.object_name](self)
+ def compare(self, old_result):
+ if hasattr(self.result, 'tostring'):
+ self.compare_array_delta(self.result, old_result, 1e-7)
+ return
+ elif iterable(self.result):
+ a1 = na.array(self.result)
+ a2 = na.array(old_result)
+ self.compare_array_delta(a1, a2, 1e-7)
+ else:
+ if self.result != old_result: raise FieldHashesDontMatch
+
+ def run(self):
+ # This only works if it takes no arguments
+ self.result = self.data_object.quantities[self.dq_name]()
+
+dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
+ "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
+
+# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
+# MinLocation
+
+for object_name in known_objects:
+ for dq in dq_names:
+ # Some special exceptions
+ if "cut_region" in object_name and (
+ "SpinParameter" in dq or
+ "TotalMass" in dq):
+ continue
+ create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
+ dq_name = dq, object_name = object_name)
+
+class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+ def run(self):
+ self.result = self.data_object.quantities[self.dq_name](
+ self.field_name)
+
+for object_name in known_objects:
+ for field in field_list:
+ for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
+ create_test(YTDerivedQuantityTestField,
+ "%s_%s" % (object_name, field),
+ field_name = field, dq_name = dq,
+ object_name = object_name)
+
+class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+ def run(self):
+ self.result = self.data_object.quantities["WeightedAverageQuantity"](
+ self.field_name, weight="CellMassMsun")
+
+for object_name in known_objects:
+ for field in field_list:
+ create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
+ "%s_%s" % (object_name, field),
+ field_name = field,
+ object_name = object_name)
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -1,16 +1,35 @@
from yt.utilities.answer_testing.output_tests import \
SingleOutputTest, create_test
from yt.utilities.answer_testing.hydro_tests import \
- TestProjection, TestGasDistribution
+ TestProjection, TestOffAxisProjection, TestSlice, \
+ TestRay, TestGasDistribution, Test2DGasDistribution
+
from fields_to_test import field_list
+for field in field_list:
+ create_test(TestRay, "%s" % field, field = field)
+
for axis in range(3):
for field in field_list:
- create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+ create_test(TestSlice, "%s_%s" % (axis, field),
field = field, axis = axis)
- create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
+
+for axis in range(3):
+ for field in field_list:
+ create_test(TestProjection, "%s_%s" % (axis, field),
+ field = field, axis = axis)
+ create_test(TestProjection, "%s_%s_Density" % (axis, field),
field = field, axis = axis, weight_field = "Density")
for field in field_list:
- create_test(TestGasDistribution, "profile_density_test_%s" % field,
+ create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+ field = field, axis = axis)
+ create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+ field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+ create_test(TestGasDistribution, "density_%s" % field,
field_x = "Density", field_y = field)
+ create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+ field_x = "Density", field_y = "x-velocity", field_z = field,
+ weight = "CellMassMsun")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,3 +1,4 @@
+import matplotlib; matplotlib.use('Agg')
from yt.config import ytcfg
ytcfg["yt","loglevel"] = "50"
ytcfg["yt","serialize"] = "False"
@@ -6,7 +7,6 @@
RegressionTestRunner, clear_registry, create_test, \
TestFieldStatistics, TestAllProjections, registry_entries, \
Xunit
-
from yt.utilities.command_line import get_yt_version
from yt.mods import *
@@ -14,6 +14,7 @@
import imp
import optparse
import itertools
+import time
#
# We assume all tests are to be run, unless explicitly given the name of a
@@ -48,9 +49,13 @@
return mapping
if __name__ == "__main__":
+ clear_registry()
mapping = find_and_initialize_tests()
test_storage_directory = ytcfg.get("yt","test_storage_dir")
- my_hash = get_yt_version()
+ try:
+ my_hash = get_yt_version()
+ except:
+ my_hash = "UNKNOWN%s" % (time.time())
parser = optparse.OptionParser()
parser.add_option("-f", "--parameter-file", dest="parameter_file",
default = os.path.join(cwd, "DD0010/moving7_0010"),
@@ -71,7 +76,15 @@
help = "The name we'll call this set of tests")
opts, args = parser.parse_args()
if opts.list_tests:
- print "\n ".join(sorted(itertools.chain(*mapping.values())))
+ tests_to_run = []
+ for m, vals in mapping.items():
+ new_tests = fnmatch.filter(vals, opts.test_pattern)
+ if len(new_tests) == 0: continue
+ load_tests(m, cwd)
+ keys = set(registry_entries())
+ tests_to_run += [t for t in new_tests if t in keys]
+ tests = list(set(tests_to_run))
+ print "\n ".join(tests)
sys.exit(0)
pf = load(opts.parameter_file)
if pf is None:
@@ -93,11 +106,19 @@
for m, vals in mapping.items():
new_tests = fnmatch.filter(vals, opts.test_pattern)
if len(new_tests) == 0: continue
- tests_to_run += new_tests
load_tests(m, cwd)
+ keys = set(registry_entries())
+ tests_to_run += [t for t in new_tests if t in keys]
for test_name in sorted(tests_to_run):
+ print "RUNNING TEST", test_name
rtr.run_test(test_name)
if watcher is not None:
rtr.watcher.report()
+ failures = 0
+ passes = 1
for test_name, result in sorted(rtr.passed_tests.items()):
print "TEST %s: %s" % (test_name, result)
+ if result: passes += 1
+ else: failures += 1
+ print "Number of passes : %s" % passes
+ print "Number of failures: %s" % failures
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da tests/volume_rendering.py
--- /dev/null
+++ b/tests/volume_rendering.py
@@ -0,0 +1,38 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+ YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+ pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+ name = "volume_rendering_consistency"
+ def run(self):
+ c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+ W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+ N = 512
+ n_contours=5
+ cmap = 'algae'
+ field = 'Density'
+ mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+ mi, ma = na.log10(mi), na.log10(ma)
+ contour_width=(ma-mi)/100.
+ L = na.array([1.]*3)
+ tf = ColorTransferFunction((mi-2, ma+2))
+ tf.add_layers(n_contours,w=contour_width,
+ col_bounds = (mi*1.001,ma*0.999),
+ colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+ cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+ image = cam.snapshot()
+ # image = cam.snapshot('test_rendering_%s.png'%field)
+ self.result = image
+
+ def compare(self, old_result):
+ # Compare the deltas; give a leeway of 1e-8
+ delta = na.nanmax( na.abs(self.result - old_result) /
+ (self.result + old_result) )
+ if delta > 1e-9: raise VolumeRenderingInconsistent()
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -106,6 +106,9 @@
find_unique_solutions, \
project_unique_light_cones
+from .radial_column_density.api import \
+ RadialColumnDensity
+
from .simulation_handler.api import \
EnzoSimulation
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -426,7 +426,7 @@
"""
if self.max_dens_point is not None:
return self.max_dens_point[0]
- max = self._mpi_allmax(self._max_dens[self.id][0])
+ max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
return max
def maximum_density_location(self):
@@ -450,7 +450,7 @@
else:
value = na.array([0,0,0])
# This works, and isn't appropriate but for now will be fine...
- value = self._mpi_allsum(value)
+ value = self.comm.mpi_allreduce(value, op='sum')
return value
def center_of_mass(self):
@@ -479,8 +479,8 @@
else:
my_mass = 0.
my_com = na.array([0.,0.,0.])
- global_mass = self._mpi_allsum(my_mass)
- global_com = self._mpi_allsum(my_com)
+ global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
+ global_com = self.comm.mpi_allreduce(my_com, op='sum')
return global_com / global_mass
def total_mass(self):
@@ -499,7 +499,7 @@
my_mass = self["ParticleMassMsun"].sum()
else:
my_mass = 0.
- global_mass = self._mpi_allsum(float(my_mass))
+ global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
return global_mass
def bulk_velocity(self):
@@ -528,7 +528,7 @@
vy = 0.
vz = 0.
bv = na.array([vx,vy,vz,pm])
- global_bv = self._mpi_allsum(bv)
+ global_bv = self.comm.mpi_allreduce(bv, op='sum')
return global_bv[:3]/global_bv[3]
def rms_velocity(self):
@@ -558,7 +558,7 @@
ss = na.array([s, float(size)])
else:
ss = na.array([0.,0.])
- global_ss = self._mpi_allsum(ss)
+ global_ss = self.comm.mpi_allreduce(ss, op='sum')
ms = global_ss[0] / global_ss[1]
return na.sqrt(ms) * global_ss[1]
@@ -598,7 +598,7 @@
else:
my_max = 0.
- return self._mpi_allmax(my_max)
+ return self.comm.mpi_allreduce(my_max, op='max')
def get_size(self):
if self.size is not None:
@@ -607,7 +607,7 @@
my_size = self.indices.size
else:
my_size = 0
- global_size = self._mpi_allsum(my_size)
+ global_size = self.comm.mpi_allreduce(my_size, op='sum')
return global_size
def __getitem__(self, key):
@@ -736,8 +736,8 @@
dist_max = 0.0
# In this parallel case, we're going to find the global dist extrema
# and built identical bins on all tasks.
- dist_min = self._mpi_allmin(dist_min)
- dist_max = self._mpi_allmax(dist_max)
+ dist_min = self.comm.mpi_allreduce(dist_min, op='min')
+ dist_max = self.comm.mpi_allreduce(dist_max, op='max')
# Set up the radial bins.
# Multiply min and max to prevent issues with digitize below.
self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY),
@@ -752,7 +752,7 @@
for i in xrange(self.bin_count):
self.mass_bins[i+1] += self.mass_bins[i]
# Sum up the mass_bins globally
- self.mass_bins = self._mpi_Allsum_double(self.mass_bins)
+ self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
# Calculate the over densities in the bins.
self.overdensity = self.mass_bins * Msun2g / \
(4./3. * math.pi * rho_crit * \
@@ -1028,19 +1028,14 @@
else: ii = slice(None)
self.particle_fields = {}
for field in self._fields:
- if ytcfg.getboolean("yt","inline") == False:
- tot_part = self._data_source[field].size
- if field == "particle_index":
- self.particle_fields[field] = self._data_source[field][ii].astype('int64')
- else:
- self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+ tot_part = self._data_source[field].size
+ if field == "particle_index":
+ self.particle_fields[field] = self._data_source[field][ii].astype('int64')
else:
- tot_part = self._data_source[field].size
- if field == "particle_index":
- self.particle_fields[field] = self._data_source[field][ii].astype('int64')
- else:
- self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+ self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+ del self._data_source[field]
self._base_indices = na.arange(tot_part)[ii]
+ gc.collect()
def _get_dm_indices(self):
if 'creation_time' in self._data_source.hierarchy.field_list:
@@ -1240,11 +1235,11 @@
if group.tasks is not None:
fn = ""
for task in group.tasks:
- fn += "%s.h5 " % self._get_filename(prefix, rank=task)
+ fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
elif self._distributed:
- fn = "%s.h5" % self._get_filename(prefix, rank=group._owner)
+ fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
else:
- fn = "%s.h5" % self._get_filename(prefix)
+ fn = "%s.h5" % self.comm.get_filename(prefix)
gn = "Halo%08i" % (group.id)
f.write("%s %s\n" % (gn, fn))
f.flush()
@@ -1387,6 +1382,7 @@
*dm_only* is set, only run it on the dark matter particles, otherwise
on all particles. Returns an iterable collection of *HopGroup* items.
"""
+ ParallelAnalysisInterface.__init__(self)
self.threshold = threshold
self.num_neighbors = num_neighbors
self.bounds = bounds
@@ -1411,14 +1407,20 @@
self.particle_fields["particle_index"].size:
mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
exit = True
- self._mpi_exit_test(exit)
+
+ self.comm.mpi_exit_test(exit)
+ # Try to do this in a memory conservative way.
+ na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+ self.particle_fields['ParticleMassMsun'])
+ na.divide(self.particle_fields["particle_position_x"],
+ self.old_period[0], self.particle_fields["particle_position_x"])
+ na.divide(self.particle_fields["particle_position_y"],
+ self.old_period[1], self.particle_fields["particle_position_y"])
+ na.divide(self.particle_fields["particle_position_z"],
+ self.old_period[2], self.particle_fields["particle_position_z"])
obj = ParallelHOPHaloFinder(self.period, self.padding,
self.num_neighbors, self.bounds,
- self.particle_fields["particle_position_x"] / self.old_period[0],
- self.particle_fields["particle_position_y"] / self.old_period[1],
- self.particle_fields["particle_position_z"] / self.old_period[2],
- self.particle_fields["particle_index"],
- self.particle_fields["ParticleMassMsun"]/self.total_mass,
+ self.particle_fields,
self.threshold, rearrange=self.rearrange, premerge=self.premerge)
self.densities, self.tags = obj.density, obj.chainID
# I'm going to go ahead and delete self.densities because it's not
@@ -1445,15 +1447,12 @@
yt_counters("Precomp bulk vel.")
self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
yt_counters("bulk vel. reading data")
- pm = self.particle_fields["ParticleMassMsun"]
- if ytcfg.getboolean("yt","inline") == False:
- xv = self._data_source["particle_velocity_x"][self._base_indices]
- yv = self._data_source["particle_velocity_y"][self._base_indices]
- zv = self._data_source["particle_velocity_z"][self._base_indices]
- else:
- xv = self._data_source["particle_velocity_x"][self._base_indices]
- yv = self._data_source["particle_velocity_y"][self._base_indices]
- zv = self._data_source["particle_velocity_z"][self._base_indices]
+ pm = obj.mass
+ # Fix this back to un-normalized units.
+ na.multiply(pm, self.total_mass, pm)
+ xv = self._data_source["particle_velocity_x"][self._base_indices]
+ yv = self._data_source["particle_velocity_y"][self._base_indices]
+ zv = self._data_source["particle_velocity_z"][self._base_indices]
yt_counters("bulk vel. reading data")
yt_counters("bulk vel. computing")
select = (self.tags >= 0)
@@ -1479,7 +1478,7 @@
del diff_subchain
# Bring it together, and divide by the previously computed total mass
# of each halo.
- self.bulk_vel = self._mpi_Allsum_double(self.bulk_vel)
+ self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
for groupID in xrange(self.group_count):
self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
yt_counters("bulk vel. computing")
@@ -1501,7 +1500,7 @@
rms_vel_temp[u][1] = marks[i+1] - marks[i]
del vel, marks, uniq_subchain
# Bring it together.
- rms_vel_temp = self._mpi_Allsum_double(rms_vel_temp)
+ rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
self.rms_vel = na.empty(self.group_count, dtype='float64')
for groupID in xrange(self.group_count):
# Here we do the Mean and the Root.
@@ -1513,6 +1512,7 @@
self.taskID = obj.mine
self.halo_taskmap = obj.halo_taskmap # A defaultdict.
del obj
+ gc.collect()
yt_counters("Precomp bulk vel.")
def _parse_output(self):
@@ -1547,7 +1547,7 @@
bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
rms_vel=self.rms_vel[index])
# I don't own this halo
- self._do_not_claim_object(self._groups[index])
+ self.comm.do_not_claim_object(self._groups[index])
self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
self.max_dens_point[index][2], self.max_dens_point[index][3]]
index += 1
@@ -1560,7 +1560,7 @@
bulk_vel=self.bulk_vel[i], tasks=self.halo_taskmap[index],
rms_vel=self.rms_vel[i])
# This halo may be owned by many, including this task
- self._claim_object(self._groups[index])
+ self.comm.claim_object(self._groups[index])
self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
self.max_dens_point[i][2], self.max_dens_point[i][3]]
cp += counts[i+1]
@@ -1573,7 +1573,7 @@
group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
rms_vel=self.rms_vel[index])
- self._do_not_claim_object(self._groups[index])
+ self.comm.do_not_claim_object(self._groups[index])
self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
self.max_dens_point[index][2], self.max_dens_point[index][3]]
index += 1
@@ -1606,6 +1606,7 @@
class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
def __init__(self, pf, ds, dm_only=True, padding=0.0):
+ ParallelAnalysisInterface.__init__(self)
self.pf = pf
self.hierarchy = pf.h
self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0
@@ -1625,7 +1626,7 @@
max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
groups.append(self._halo_class(self, hi))
groups[-1].indices = halo.indices
- self._claim_object(groups[-1])
+ self.comm.claim_object(groups[-1])
hi += 1
del self._groups, self._max_dens # explicit >> implicit
self._groups = groups
@@ -1638,7 +1639,7 @@
# about processors and ownership and so forth.
# _mpi_info_dict returns a dict of {proc: whatever} where whatever is
# what is fed in on each proc.
- mine, halo_info = self._mpi_info_dict(len(self))
+ mine, halo_info = self.comm.mpi_info_dict(len(self))
nhalos = sum(halo_info.values())
# Figure out our offset
my_first_id = sum([v for k,v in halo_info.items() if k < mine])
@@ -1701,7 +1702,7 @@
--------
>>> halos.write_out("HopAnalysis.out")
"""
- f = self._write_on_root(filename)
+ f = self.comm.write_on_root(filename)
HaloList.write_out(self, f)
def write_particle_lists_txt(self, prefix):
@@ -1720,7 +1721,7 @@
--------
>>> halos.write_particle_lists_txt("halo-parts")
"""
- f = self._write_on_root("%s.txt" % prefix)
+ f = self.comm.write_on_root("%s.txt" % prefix)
HaloList.write_particle_lists_txt(self, prefix, fp=f)
@parallel_blocking_call
@@ -1743,10 +1744,10 @@
--------
>>> halos.write_particle_lists("halo-parts")
"""
- fn = "%s.h5" % self._get_filename(prefix)
+ fn = "%s.h5" % self.comm.get_filename(prefix)
f = h5py.File(fn, "w")
for halo in self._groups:
- if not self._is_mine(halo): continue
+ if not self.comm.is_mine(halo): continue
halo.write_particle_list(f)
def dump(self, basename="HopAnalysis"):
@@ -1778,7 +1779,8 @@
class parallelHF(GenericHaloFinder, parallelHOPHaloList):
def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
resize=True, rearrange=True,\
- fancy_padding=True, safety=1.5, premerge=True, sample=0.03):
+ fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
+ total_mass=None, num_particles=None):
r"""Parallel HOP halo finder.
Halos are built by:
@@ -1827,6 +1829,23 @@
sample : float
The fraction of the full dataset on which load-balancing is
performed. Default = 0.03.
+ total_mass : float
+ If HOP is run on the same dataset mulitple times, the total mass
+ of particles in Msun units in the full volume can be supplied here
+ to save time.
+ This must correspond to the particles being operated on, meaning
+ if stars are included in the halo finding, they must be included
+ in this mass as well, and visa-versa.
+ If halo finding on a subvolume, this still corresponds with the
+ mass in the entire volume.
+ Default = None, which means the total mass is automatically
+ calculated.
+ num_particles : integer
+ The total number of particles in the volume, in the same fashion
+ as `total_mass` is calculated. Specifying this turns off
+ fancy_padding.
+ Default = None, which means the number of particles is
+ automatically calculated.
Examples
-------
@@ -1847,7 +1866,7 @@
topbounds = na.array([[0., 0., 0.], period])
# Cut up the volume evenly initially, with no padding.
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds=self._data_source,
+ self.partition_hierarchy_3d(ds=self._data_source,
padding=self.padding)
# also get the total mass of particles
yt_counters("Reading Data")
@@ -1855,26 +1874,24 @@
# analyzing a subvolume.
ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
if ytcfg.getboolean("yt","inline") == False and \
- resize and self._mpi_get_size() != 1 and subvolume is None:
- random.seed(self._mpi_get_rank())
- cut_list = self._partition_hierarchy_3d_bisection_list()
+ resize and self.comm.size != 1 and subvolume is None:
+ random.seed(self.comm.rank)
+ cut_list = self.partition_hierarchy_3d_bisection_list()
root_points = self._subsample_points()
self.bucket_bounds = []
- if self._mpi_get_rank() == 0:
+ if self.comm.rank == 0:
self._recursive_divide(root_points, topbounds, 0, cut_list)
- self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
- my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+ self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
+ my_bounds = self.bucket_bounds[self.comm.rank]
LE, RE = my_bounds[0], my_bounds[1]
self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
# If this isn't parallel, define the region as an AMRRegionStrict so
# particle IO works.
- if self._mpi_get_size() == 1:
+ if self.comm.size == 1:
self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
# get the average spacing between particles for this region
# The except is for the serial case, where the full box is what we want.
- if ytcfg.getboolean("yt","inline") == False:
- data = self._data_source["particle_position_x"]
- else:
+ if num_particles is None:
data = self._data_source["particle_position_x"]
try:
l = self._data_source.right_edge - self._data_source.left_edge
@@ -1883,14 +1900,16 @@
vol = l[0] * l[1] * l[2]
full_vol = vol
# We will use symmetric padding when a subvolume is being used.
- if not fancy_padding or subvolume is not None:
- avg_spacing = (float(vol) / data.size)**(1./3.)
+ if not fancy_padding or subvolume is not None or num_particles is not None:
+ if num_particles is None:
+ num_particles = data.size
+ avg_spacing = (float(vol) / num_particles)**(1./3.)
# padding is a function of inter-particle spacing, this is an
# approximation, but it's OK with the safety factor
padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
self.padding = (na.ones(3,dtype='float64')*padding, na.ones(3,dtype='float64')*padding)
mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
- (str(self.padding), avg_spacing, vol, data.size))
+ (str(self.padding), avg_spacing, vol, num_particles))
# Another approach to padding, perhaps more accurate.
elif fancy_padding and self._distributed:
LE_padding, RE_padding = na.empty(3,dtype='float64'), na.empty(3,dtype='float64')
@@ -1934,10 +1953,9 @@
(str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
# Now we get the full box mass after we have the final composition of
# subvolumes.
- if ytcfg.getboolean("yt","inline") == False:
- total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
- else:
- total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+ if total_mass is None:
+ total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+ op='sum')
if not self._distributed:
self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
# If we're using a subvolume, we now re-divide.
@@ -1945,7 +1963,7 @@
self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
# Cut up the volume.
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds=self._data_source,
+ self.partition_hierarchy_3d(ds=self._data_source,
padding=0.)
self.bounds = (LE, RE)
(LE_padding, RE_padding) = self.padding
@@ -1959,13 +1977,13 @@
# Read in a random subset of the points in each domain, and then
# collect them on the root task.
xp = self._data_source["particle_position_x"]
- n_parts = self._mpi_allsum(xp.size)
+ n_parts = self.comm.mpi_allreduce(xp.size, op='sum')
local_parts = xp.size
random_points = int(self.sample * n_parts)
# We want to get a representative selection of random particles in
# each subvolume.
- adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
- n_random = int(adjust * float(random_points) / self._mpi_get_size())
+ adjust = float(local_parts) / ( float(n_parts) / self.comm.size)
+ n_random = int(adjust * float(random_points) / self.comm.size)
mylog.info("Reading in %d random particles." % n_random)
# Get unique random particles.
my_points = na.empty((n_random, 3), dtype='float64')
@@ -1980,7 +1998,7 @@
self._data_source.clear_data()
del uni
# Collect them on the root task.
- mine, sizes = self._mpi_info_dict(n_random)
+ mine, sizes = self.comm.mpi_info_dict(n_random)
if mine == 0:
tot_random = sum(sizes.values())
root_points = na.empty((tot_random, 3), dtype='float64')
@@ -1988,7 +2006,8 @@
else:
root_points = na.empty([])
my_points.shape = (1, n_random*3)
- root_points = self._mpi_concatenate_array_on_root_double(my_points[0])
+ root_points = self.comm.par_combine_object(my_points[0],
+ datatype="array", op="cat")
del my_points
if mine == 0:
root_points.shape = (tot_random, 3)
@@ -2056,7 +2075,7 @@
class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
- padding=0.02):
+ padding=0.02, total_mass=None):
r"""HOP halo finder.
Halos are built by:
@@ -2090,6 +2109,17 @@
with duplicated particles for halo finidng to work. This number
must be no smaller than the radius of the largest halo in the box
in code units. Default = 0.02.
+ total_mass : float
+ If HOP is run on the same dataset mulitple times, the total mass
+ of particles in Msun units in the full volume can be supplied here
+ to save time.
+ This must correspond to the particles being operated on, meaning
+ if stars are included in the halo finding, they must be included
+ in this mass as well, and visa-versa.
+ If halo finding on a subvolume, this still corresponds with the
+ mass in the entire volume.
+ Default = None, which means the total mass is automatically
+ calculated.
Examples
--------
@@ -2107,14 +2137,15 @@
# a small part is actually going to be used.
self.padding = 0.0
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+ self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
# For scaling the threshold, note that it's a passthrough
- if dm_only:
- select = self._get_dm_indices()
- total_mass = \
- self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
- else:
- total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+ if total_mass is None:
+ if dm_only:
+ select = self._get_dm_indices()
+ total_mass = \
+ self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+ else:
+ total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
# MJT: Note that instead of this, if we are assuming that the particles
# are all on different processors, we should instead construct an
# object representing the entire domain and sum it "lazily" with
@@ -2123,7 +2154,7 @@
self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
self.padding = padding #* pf["unitary"] # This should be clevererer
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds = self._data_source,
+ self.partition_hierarchy_3d(ds = self._data_source,
padding=self.padding)
self.bounds = (LE, RE)
# reflect particles around the periodic boundary
@@ -2191,10 +2222,10 @@
self.padding = 0.0 #* pf["unitary"] # This should be clevererer
# get the total number of particles across all procs, with no padding
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds=self._data_source,
+ self.partition_hierarchy_3d(ds=self._data_source,
padding=self.padding)
if link > 0.0:
- n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+ n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
# get the average spacing between particles
#l = pf.domain_right_edge - pf.domain_left_edge
#vol = l[0] * l[1] * l[2]
@@ -2209,7 +2240,7 @@
if subvolume is not None:
self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
padded, LE, RE, self._data_source = \
- self._partition_hierarchy_3d(ds=self._data_source,
+ self.partition_hierarchy_3d(ds=self._data_source,
padding=self.padding)
self.bounds = (LE, RE)
# reflect particles around the periodic boundary
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -26,6 +26,7 @@
from collections import defaultdict
import itertools, sys
import numpy as na
+import gc
from yt.funcs import *
from yt.utilities.performance_counters import yt_counters, time_function
@@ -43,8 +44,9 @@
class ParallelHOPHaloFinder(ParallelAnalysisInterface):
def __init__(self,period, padding, num_neighbors, bounds,
- xpos, ypos, zpos, index, mass, threshold=160.0, rearrange=True,
+ particle_fields, threshold=160.0, rearrange=True,
premerge=True):
+ ParallelAnalysisInterface.__init__(self)
self.threshold = threshold
self.rearrange = rearrange
self.premerge = premerge
@@ -54,12 +56,12 @@
self.padding = padding
self.num_neighbors = num_neighbors
self.bounds = bounds
- self.xpos = xpos
- self.ypos = ypos
- self.zpos = zpos
+ self.xpos = particle_fields.pop("particle_position_x")
+ self.ypos = particle_fields.pop("particle_position_y")
+ self.zpos = particle_fields.pop("particle_position_z")
self.real_size = len(self.xpos)
- self.index = na.array(index, dtype='int64')
- self.mass = mass
+ self.index = particle_fields.pop("particle_index")
+ self.mass = particle_fields.pop("ParticleMassMsun")
self.padded_particles = []
self.nMerge = 4
yt_counters("chainHOP")
@@ -74,7 +76,7 @@
tasks are our geometric neighbors.
"""
self.neighbors = set([])
- self.mine, global_bounds = self._mpi_info_dict(self.bounds)
+ self.mine, global_bounds = self.comm.mpi_info_dict(self.bounds)
my_LE, my_RE = self.bounds
# Put the vertices into a big list, each row is
# array[x,y,z, taskID]
@@ -198,7 +200,7 @@
# lists us as their neighbor, we add them as our neighbor. This is
# probably not needed because the stuff above should be symmetric,
# but it isn't a big issue.
- self.mine, global_neighbors = self._mpi_info_dict(self.neighbors)
+ self.mine, global_neighbors = self.comm.mpi_info_dict(self.neighbors)
for taskID in global_neighbors:
if taskID == self.mine: continue
if self.mine in global_neighbors[taskID]:
@@ -215,7 +217,7 @@
"""
if round == 'first':
max_pad = na.max(self.padding)
- self.mine, self.global_padding = self._mpi_info_dict(max_pad)
+ self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
self.max_padding = max(self.global_padding.itervalues())
elif round == 'second':
self.max_padding = 0.
@@ -234,14 +236,14 @@
temp_LE = LE - LE_padding
temp_RE = RE + RE_padding
expanded_bounds = (temp_LE, temp_RE)
- self.mine, global_exp_bounds = self._mpi_info_dict(expanded_bounds)
+ self.mine, global_exp_bounds = self.comm.mpi_info_dict(expanded_bounds)
send_real_indices = {}
send_points = {}
send_mass = {}
send_size = {}
# This will reduce the size of the loop over particles.
yt_counters("Picking padding data to send.")
- send_count = len(na.where(self.is_inside_annulus == True)[0])
+ send_count = self.is_inside_annulus.sum()
points = na.empty((send_count, 3), dtype='float64')
points[:,0] = self.xpos[self.is_inside_annulus]
points[:,1] = self.ypos[self.is_inside_annulus]
@@ -261,11 +263,11 @@
send_real_indices[neighbor] = real_indices[is_inside].copy()
send_points[neighbor] = shift_points[is_inside].copy()
send_mass[neighbor] = mass[is_inside].copy()
- send_size[neighbor] = len(na.where(is_inside == True)[0])
+ send_size[neighbor] = is_inside.sum()
del points, shift_points, mass, real_indices
yt_counters("Picking padding data to send.")
# Communicate the sizes to send.
- self.mine, global_send_count = self._mpi_info_dict(send_size)
+ self.mine, global_send_count = self.comm.mpi_info_dict(send_size)
del send_size
# Initialize the arrays to receive data.
yt_counters("Initalizing recv arrays.")
@@ -284,19 +286,19 @@
yt_counters("MPI stuff.")
hooks = []
for opp_neighbor in self.neighbors:
- hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
- hooks.append(self._mpi_Irecv_double(recv_points[opp_neighbor], opp_neighbor))
- hooks.append(self._mpi_Irecv_double(recv_mass[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
# Let's wait here to be absolutely sure that all the receive buffers
# have been created before any sending happens!
- self._barrier()
+ self.comm.barrier()
# Now we send the data.
for neighbor in self.neighbors:
- hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
- hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
- hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(send_points[neighbor], neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(send_mass[neighbor], neighbor))
# Now we use the data, after all the comms are done.
- self._mpi_Request_Waitall(hooks)
+ self.comm.mpi_Request_Waitall(hooks)
yt_counters("MPI stuff.")
yt_counters("Processing padded data.")
del send_real_indices, send_points, send_mass
@@ -342,13 +344,22 @@
yt_counters("init kd tree")
# Yes, we really do need to initialize this many arrays.
# They're deleted in _parallelHOP.
- fKD.dens = na.asfortranarray(na.zeros(self.size, dtype='float64'))
+ fKD.dens = na.zeros(self.size, dtype='float64', order='F')
fKD.mass = na.concatenate((self.mass, self.mass_pad))
- fKD.pos = na.asfortranarray(na.empty((3, self.size), dtype='float64'))
+ del self.mass
+ fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
# This actually copies the data into the fortran space.
- fKD.pos[0, :] = na.concatenate((self.xpos, self.xpos_pad))
- fKD.pos[1, :] = na.concatenate((self.ypos, self.ypos_pad))
- fKD.pos[2, :] = na.concatenate((self.zpos, self.zpos_pad))
+ self.psize = self.xpos.size
+ fKD.pos[0, :self.psize] = self.xpos
+ fKD.pos[1, :self.psize] = self.ypos
+ fKD.pos[2, :self.psize] = self.zpos
+ del self.xpos, self.ypos, self.zpos
+ gc.collect()
+ fKD.pos[0, self.psize:] = self.xpos_pad
+ fKD.pos[1, self.psize:] = self.ypos_pad
+ fKD.pos[2, self.psize:] = self.zpos_pad
+ del self.xpos_pad, self.ypos_pad, self.zpos_pad
+ gc.collect()
fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
fKD.nn = self.num_neighbors
# Plus 2 because we're looking for that neighbor, but only keeping
@@ -647,10 +658,8 @@
if self.chainID[i] != -1:
self.chainID[i] = map[self.chainID[i]]
del map
- self.densest_in_chain = dic_new.copy()
- del dic_new
- self.densest_in_chain_real_index = dicri_new.copy()
- del dicri_new
+ self.densest_in_chain = dic_new
+ self.densest_in_chain_real_index = dicri_new
self.__max_memory()
yt_counters("preconnect pregrouping.")
mylog.info("Preconnected %d chains." % removed)
@@ -664,7 +673,7 @@
"""
yt_counters("globally_assign_chainIDs")
# First find out the number of chains on each processor.
- self.mine, chain_info = self._mpi_info_dict(chain_count)
+ self.mine, chain_info = self.comm.mpi_info_dict(chain_count)
self.nchains = sum(chain_info.values())
# Figure out our offset.
self.my_first_id = sum([v for k,v in chain_info.iteritems() if k < self.mine])
@@ -683,8 +692,11 @@
# Shift the values over effectively by concatenating them in the same
# order as the values have been shifted in _globally_assign_chainIDs()
yt_counters("global chain MPI stuff.")
- self.densest_in_chain = self._mpi_concatenate_array_double(self.densest_in_chain)
- self.densest_in_chain_real_index = self._mpi_concatenate_array_long(self.densest_in_chain_real_index)
+ self.densest_in_chain = self.comm.par_combine_object(self.densest_in_chain,
+ datatype="array", op="cat")
+ self.densest_in_chain_real_index = self.comm.par_combine_object(
+ self.densest_in_chain_real_index,
+ datatype="array", op="cat")
yt_counters("global chain MPI stuff.")
# Sort the chains by density here. This is an attempt to make it such
# that the merging stuff in a few steps happens in the same order
@@ -774,16 +786,16 @@
# Set up the receives, but don't actually use them.
hooks = []
for opp_neighbor in self.neighbors:
- hooks.append(self._mpi_Irecv_long(temp_indices[opp_neighbor], opp_neighbor))
- hooks.append(self._mpi_Irecv_long(temp_chainIDs[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
# Make sure all the receive buffers are set before continuing.
- self._barrier()
+ self.comm.barrier()
# Send padded particles to our neighbors.
for neighbor in self.neighbors:
- hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
- hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
# Now actually use the data once it's good to go.
- self._mpi_Request_Waitall(hooks)
+ self.comm.mpi_Request_Waitall(hooks)
self.__max_memory()
so_far = 0
for opp_neighbor in self.neighbors:
@@ -828,7 +840,7 @@
"""
yt_counters("connect_chains_across_tasks")
# Remote (lower dens) chain -> local (higher) chain.
- chainID_translate_map_local = na.arange(self.nchains)
+ chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
# Build the stuff to send.
self.uphill_real_indices = na.concatenate((
self.index, self.index_pad))[self.padded_particles]
@@ -837,7 +849,8 @@
# Now we make a global dict of how many particles each task is
# sending.
self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
- self.global_padded_count = self._mpi_joindict(self.global_padded_count)
+ self.global_padded_count = self.comm.par_combine_object(
+ self.global_padded_count, datatype = "dict", op = "join")
# Send/receive 'em.
self._communicate_uphill_info()
del self.global_padded_count
@@ -878,7 +891,8 @@
# it. Therefore each key (a chain) in this dict is unique, but the items
# the keys point to are not necessarily unique.
chainID_translate_map_global = \
- self._mpi_minimum_array_long(chainID_translate_map_local)
+ self.comm.mpi_allreduce(chainID_translate_map_local, op='min',
+ dtype='int64')
# Loop over chains, smallest to largest density, recursively until
# we reach a self-assigned chain. Then we assign that final chainID to
# the *current* one only.
@@ -932,7 +946,8 @@
# but there's so many places in this that need to be globally synched
# that it's not worth the effort right now to make this one spot better.
global_annulus_count = {self.mine:send_count}
- global_annulus_count = self._mpi_joindict(global_annulus_count)
+ global_annulus_count = self.comm.par_combine_object(
+ global_annulus_count, datatype = "dict", op = "join")
# Set up the receiving arrays.
recv_real_indices = dict.fromkeys(self.neighbors)
recv_chainIDs = dict.fromkeys(self.neighbors)
@@ -943,16 +958,16 @@
# Set up the receving hooks.
hooks = []
for opp_neighbor in self.neighbors:
- hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
- hooks.append(self._mpi_Irecv_long(recv_chainIDs[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+ hooks.append(self.comm.mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
# Make sure the recv buffers are set before continuing.
- self._barrier()
+ self.comm.barrier()
# Now we send them.
for neighbor in self.neighbors:
- hooks.append(self._mpi_Isend_long(real_indices, neighbor))
- hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(real_indices, neighbor))
+ hooks.append(self.comm.mpi_nonblocking_send(chainIDs, neighbor))
# Now we use them when they're nice and ripe.
- self._mpi_Request_Waitall(hooks)
+ self.comm.mpi_Request_Waitall(hooks)
self.__max_memory()
for opp_neighbor in self.neighbors:
opp_size = global_annulus_count[opp_neighbor]
@@ -1061,11 +1076,35 @@
"""
yt_counters("make_global_chain_densest_n")
(self.top_keys, self.bot_keys, self.vals) = \
- self._mpi_maxdict_dict(self.chain_densest_n)
+ self.linearize_chain_dict(self.chain_densest_n)
self.__max_memory()
del self.chain_densest_n
yt_counters("make_global_chain_densest_n")
-
+
+ def linearize_chain_dict(self, data):
+ """
+ Similar to above, but finds maximums for dicts of dicts. This is
+ specificaly for a part of chainHOP.
+ """
+ top_keys = []
+ bot_keys = []
+ vals = []
+ for top_key in data:
+ for bot_key in data[top_key]:
+ top_keys.append(top_key)
+ bot_keys.append(bot_key)
+ vals.append(data[top_key][bot_key])
+ top_keys = na.array(top_keys, dtype='int64')
+ bot_keys = na.array(bot_keys, dtype='int64')
+ vals = na.array(vals, dtype='float64')
+
+ data.clear()
+
+ top_keys = self.comm.par_combine_object(top_keys, datatype='array', op='cat')
+ bot_keys = self.comm.par_combine_object(bot_keys, datatype='array', op='cat')
+ vals = self.comm.par_combine_object(vals, datatype='array', op='cat')
+ return (top_keys, bot_keys, vals)
+
def _build_groups(self):
"""
With the collection of possible chain links, build groups.
@@ -1155,7 +1194,7 @@
Set_list = []
# We only want the holes that are modulo mine.
keys = na.arange(groupID, dtype='int64')
- size = self._mpi_get_size()
+ size = self.comm.size
select = (keys % size == self.mine)
groupIDs = keys[select]
mine_groupIDs = set([]) # Records only ones modulo mine.
@@ -1202,7 +1241,7 @@
del Set_list
# To bring it all together, find the minimum values at each entry
# globally.
- lookup = self._mpi_minimum_array_long(lookup)
+ lookup = self.comm.mpi_allreduce(lookup, op='min')
# Now apply this to reverse_map
for chainID,groupID in enumerate(self.reverse_map):
if groupID == -1:
@@ -1330,7 +1369,7 @@
# Now we broadcast this, effectively, with an allsum. Even though
# some groups are on multiple tasks, there is only one densest_in_chain
# and only that task contributed above.
- self.max_dens_point = self._mpi_Allsum_double(max_dens_point)
+ self.max_dens_point = self.comm.mpi_allreduce(max_dens_point, op='sum')
del max_dens_point
yt_counters("max dens point")
# Now CoM.
@@ -1385,9 +1424,9 @@
CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
CoM_M[groupID] *= Tot_M[groupID]
# Now we find their global values
- self.group_sizes = self._mpi_Allsum_long(size)
- CoM_M = self._mpi_Allsum_double(CoM_M)
- self.Tot_M = self._mpi_Allsum_double(Tot_M)
+ self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
+ CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
+ self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
self.CoM = na.empty((self.group_count,3), dtype='float64')
for groupID in xrange(int(self.group_count)):
self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1444,7 @@
max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
# Find the maximum across all tasks.
mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
- self.max_radius = self._mpi_double_array_max(max_radius)
+ self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
self.max_radius = na.sqrt(self.max_radius)
yt_counters("max radius")
yt_counters("Precomp.")
@@ -1457,7 +1496,15 @@
self._communicate_annulus_chainIDs()
mylog.info('Connecting %d chains into groups...' % self.nchains)
self._connect_chains()
+ self.mass = fKD.mass[:self.psize]
+ self.mass_pad = fKD.mass[self.psize:]
del fKD.dens, fKD.mass, fKD.dens
+ self.xpos = fKD.pos[0, :self.psize]
+ self.ypos = fKD.pos[1, :self.psize]
+ self.zpos = fKD.pos[2, :self.psize]
+ self.xpos_pad = fKD.pos[0, self.psize:]
+ self.ypos_pad = fKD.pos[1, self.psize:]
+ self.zpos_pad = fKD.pos[2, self.psize:]
del fKD.pos, fKD.chunk_tags
free_tree(0) # Frees the kdtree object.
del self.densestNN
@@ -1477,13 +1524,13 @@
self.density = self.density[:self.real_size]
# We'll make this a global object, which can be used to write a text
# file giving the names of hdf5 files the particles for each halo.
- self.mine, self.I_own = self._mpi_info_dict(self.I_own)
+ self.mine, self.I_own = self.comm.mpi_info_dict(self.I_own)
self.halo_taskmap = defaultdict(set)
for taskID in self.I_own:
for groupID in self.I_own[taskID]:
self.halo_taskmap[groupID].add(taskID)
del self.I_own
- del self.mass, self.xpos, self.ypos, self.zpos
+ del self.xpos, self.ypos, self.zpos
def __add_to_array(self, arr, key, value, type):
"""
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -79,6 +79,7 @@
:param mass_column (int): The column of halo_file that contains the
masses of the haloes. Default=4.
"""
+ ParallelAnalysisInterface.__init__(self)
self.pf = pf
self.halo_file = halo_file
self.omega_matter0 = omega_matter0
@@ -147,7 +148,7 @@
# First the fit file.
if fit:
fitname = prefix + '-fit.dat'
- fp = self._write_on_root(fitname)
+ fp = self.comm.write_on_root(fitname)
line = \
"""#Columns:
#1. log10 of mass (Msolar, NOT Msolar/h)
@@ -163,7 +164,7 @@
fp.close()
if self.mode == 'haloes' and haloes:
haloname = prefix + '-haloes.dat'
- fp = self._write_on_root(haloname)
+ fp = self.comm.write_on_root(haloname)
line = \
"""#Columns:
#1. log10 of mass (Msolar, NOT Msolar/h)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -156,6 +156,7 @@
>>> MergerTree(rf, database = '/home/user/sim1-halos.db',
... halo_finder_function=parallelHF)
"""
+ ParallelAnalysisInterface.__init__(self)
self.restart_files = restart_files # list of enzo restart files
self.with_halos = na.ones(len(restart_files), dtype='bool')
self.database = database # the sqlite database of haloes.
@@ -168,10 +169,10 @@
if self.sleep <= 0.:
self.sleep = 5
# MPI stuff
- self.mine = self._mpi_get_rank()
+ self.mine = self.comm.rank
if self.mine is None:
self.mine = 0
- self.size = self._mpi_get_size()
+ self.size = self.comm.size
if self.size is None:
self.size = 1
# Get to work.
@@ -180,7 +181,7 @@
os.unlink(self.database)
except:
pass
- self._barrier()
+ self.comm.barrier()
self._open_create_database()
self._create_halo_table()
self._run_halo_finder_add_to_db()
@@ -203,7 +204,7 @@
# Now update the database with all the writes.
mylog.info("Updating database with parent-child relationships.")
self._copy_and_update_db()
- self._barrier()
+ self.comm.barrier()
mylog.info("Done!")
def _read_halo_lists(self):
@@ -275,7 +276,7 @@
line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
self.cursor.execute(line, values)
self.conn.commit()
- self._barrier()
+ self.comm.barrier()
del hp
def _open_create_database(self):
@@ -283,7 +284,7 @@
# doesn't already exist. Open it first on root, and then on the others.
if self.mine == 0:
self.conn = sql.connect(self.database)
- self._barrier()
+ self.comm.barrier()
self._ensure_db_sync()
if self.mine != 0:
self.conn = sql.connect(self.database)
@@ -294,7 +295,7 @@
# parallel file system funniness, things will go bad very quickly.
# Therefore, just to be very, very careful, we will ensure that the
# md5 hash of the file is identical across all tasks before proceeding.
- self._barrier()
+ self.comm.barrier()
for i in range(5):
try:
file = open(self.database)
@@ -305,7 +306,7 @@
file = open(self.database)
hash = md5.md5(file.read()).hexdigest()
file.close()
- ignore, hashes = self._mpi_info_dict(hash)
+ ignore, hashes = self.comm.mpi_info_dict(hash)
hashes = set(hashes.values())
if len(hashes) == 1:
break
@@ -338,7 +339,7 @@
self.conn.commit()
except sql.OperationalError:
pass
- self._barrier()
+ self.comm.barrier()
def _find_likely_children(self, parentfile, childfile):
# For each halo in the parent list, identify likely children in the
@@ -548,11 +549,16 @@
child_IDs_tosend = child_IDs[child_send]
child_halos_tosend = child_halos[child_send]
- parent_IDs_tosend = self._mpi_concatenate_array_on_root_long(parent_IDs_tosend)
- parent_masses_tosend = self._mpi_concatenate_array_on_root_double(parent_masses_tosend)
- parent_halos_tosend = self._mpi_concatenate_array_on_root_int(parent_halos_tosend)
- child_IDs_tosend = self._mpi_concatenate_array_on_root_long(child_IDs_tosend)
- child_halos_tosend = self._mpi_concatenate_array_on_root_int(child_halos_tosend)
+ parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
+ datatype="array", op="cat")
+ parent_masses_tosend = self.comm.par_combine_object(parent_masses_tosend,
+ datatype="array", op="cat")
+ parent_halos_tosend = self.comm.par_combine_object(parent_halos_tosend,
+ datatype="array", op="cat")
+ child_IDs_tosend = self.comm.par_combine_object(child_IDs_tosend,
+ datatype="array", op="cat")
+ child_halos_tosend = self.comm.par_combine_object(child_halos_tosend,
+ datatype="array", op="cat")
# Resort the received particles.
Psort = parent_IDs_tosend.argsort()
@@ -599,7 +605,7 @@
(matched, parent_IDs_tosend.size, child_IDs_tosend.size))
# Now we sum up the contributions globally.
- self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+ self.child_mass_arr = self.comm.mpi_allreduce(self.child_mass_arr)
# Turn these Msol masses into percentages of the parent.
line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
@@ -712,7 +718,7 @@
temp_cursor.close()
temp_conn.close()
self._close_database()
- self._barrier()
+ self.comm.barrier()
if self.mine == 0:
os.rename(temp_name, self.database)
@@ -962,6 +968,7 @@
>>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
... dotfile = 'halo-182842.gv')
"""
+ ParallelAnalysisInterface.__init__(self)
self.database = database
self.link_min = link_min
if halos is None:
@@ -1108,6 +1115,7 @@
>>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
... outfile='halos-db.txt')
"""
+ ParallelAnalysisInterface.__init__(self)
self.database = database
self.outfile = outfile
result = self._open_database()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,4 +34,5 @@
from .multi_halo_profiler import \
HaloProfiler, \
FakeProfile, \
- shift_projections
+ shift_projections, \
+ standard_fields
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -164,6 +164,7 @@
>>> hp = HP.halo_profiler("DD0242/DD0242")
"""
+ ParallelAnalysisInterface.__init__(self)
self.dataset = dataset
self.output_dir = output_dir
@@ -494,11 +495,13 @@
updated_halos.append(halo)
# And here is where we bring it all together.
- updated_halos = self._mpi_catlist(updated_halos)
+ updated_halos = self.comm.par_combine_object(updated_halos,
+ datatype="list", op="cat")
updated_halos.sort(key = lambda a:a['id'])
self.all_halos = updated_halos
- self.filtered_halos = self._mpi_catlist(self.filtered_halos)
+ self.filtered_halos = self.comm.par_combine_object(self.filtered_halos,
+ datatype="list", op="cat")
self.filtered_halos.sort(key = lambda a:a['id'])
if filename is not None:
@@ -582,8 +585,14 @@
except EmptyProfileData:
mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
return None
+ # Figure out which fields to add simultaneously
+ field_groupings = defaultdict(lambda: defaultdict(list))
for hp in self.profile_fields:
- profile.add_fields(hp['field'], weight=hp['weight_field'], accumulation=hp['accumulation'])
+ field_groupings[hp['weight_field']][hp['accumulation']].append(hp['field'])
+ for weight_field in field_groupings:
+ for accum, fields in field_groupings[weight_field].items():
+ profile.add_fields(fields, weight=weight_field,
+ accumulation=accum)
if virial_filter:
self._add_actual_overdensity(profile)
@@ -995,7 +1004,7 @@
for plot in projections:
# Get name of data field.
other_fields = {'px':True, 'py':True, 'pdx':True, 'pdy':True, 'weight_field':True}
- for pfield in plot.data.keys():
+ for pfield in plot.field_data.keys():
if not(other_fields.has_key(pfield)):
field = pfield
break
@@ -1050,12 +1059,12 @@
add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
# Add the hanging cells back to the projection data.
- plot.data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
- plot.data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
- plot.data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
- plot.data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
- plot.data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
- plot.data['weight_field'] = na.concatenate([plot['weight_field'],
+ plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
+ plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
+ plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
+ plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
+ plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+ plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
add_x_weight_field, add_y_weight_field,
add2_x_weight_field, add2_y_weight_field])
@@ -1072,6 +1081,7 @@
This is used to mimic a profile object when reading profile data from disk.
"""
def __init__(self, pf):
+ ParallelAnalysisInterface.__init__(self)
self.pf = pf
self._data = {}
@@ -1080,3 +1090,34 @@
def keys(self):
return self._data.keys()
+
+standard_fields = [
+ ("Density", "CellMassMsun", False),
+ ("Temperature", "CellMassMsun", False),
+ ("VelocityMagnitude", "CellMassMsun", False),
+ ("Ones", None, False),
+ ("Entropy", "CellMassMsun", False),
+ ("RadialVelocity", "CellMassMsun", False),
+ ("SpecificAngularMomentumX", "CellMassMsun", False),
+ ("SpecificAngularMomentumY", "CellMassMsun", False),
+ ("SpecificAngularMomentumZ", "CellMassMsun", False),
+ ("CoolingTime", "CellMassMsun", False),
+ ("DynamicalTime", "CellMassMsun", False),
+ ("CellMassMsun", None, True),
+ ("TotalMassMsun", None, True),
+ ("Dark_Matter_Density", "CellMassMsun", False),
+ #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
+ #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
+ #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
+ ("OverDensity", "CellMassMsun", False),
+ #("ParticleMassMsun", None),
+ ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+ #("StarParticleMassMsun", None),
+ ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+]
+
+standard_fields += [("%s_Fraction" % (s), "CellMassMsun", False)
+ for s in ["HI","HII","HeI","HeII","HeIII","H2I","H2II",
+ "HM","Electron", "DI","DII","HDI","Metal"]
+]
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -28,37 +28,6 @@
from yt.data_objects.profiles import BinnedProfile1D
from yt.funcs import *
-analysis_field_list = [
- "Density",
- "Temperature",
- "VelocityMagnitude",
- ("Ones", None),
- "Entropy",
- "RadialVelocity",
- "SpecificAngularMomnetumX",
- "SpecificAngularMomnetumY",
- "SpecificAngularMomnetumZ",
- "CoolingTime",
- "DynamicalTime",
- ("CellMassMsun", None),
- "Dark_Matter_Density",
- #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
- #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
- #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
- ("TotalMass", None),
- "OverDensity",
- #("ParticleMassMsun", None),
- ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
- #("StarParticleMassMsun", None),
- ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-]
-
-analysis_field_list += ["%s_Fraction" % (s) for s in
- ["HI","HII","HeI","HeII","HeIII","H2I","H2II","HM","Electron",
- "DI","DII","HDI","Metal"]
-]
-
-
class StandardRadialAnalysis(object):
def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
self.pf = pf
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py
@@ -27,6 +27,7 @@
import numpy as na
from yt.funcs import *
+from yt.data_objects.data_containers import YTFieldData
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.data_objects.static_output import \
@@ -65,7 +66,7 @@
self.base_grid = base_pf.h.smoothed_covering_grid(level, self.LeftEdge,
self.RightEdge, dims=dims)
self.base_grid.Level = self.base_grid.level
- self.data = {}
+ self.field_data = YTFieldData()
#self._calculate_child_masks()
self.Parent = None
self.Children = []
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -77,8 +77,11 @@
pbar.update(gi+1)
cm = data_source._get_cut_mask(grid)
if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+ old_field_parameters = grid.field_parameters
+ grid.field_parameters = data_source.field_parameters
local_ind = na.where( (grid[field] > min_val)
& (grid[field] < max_val) & cm )
+ grid.field_parameters = old_field_parameters
if local_ind[0].size == 0: continue
kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1
@@ -129,7 +132,7 @@
print "Finished joining in %0.2e seconds" % (t2-t1)
pbar.finish()
data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.data["tempContours"] # Force a reload from the grids
+ del data_source.field_data["tempContours"] # Force a reload from the grids
data_source.get_data("tempContours", in_grids=True)
contour_ind = {}
i = 0
@@ -141,6 +144,6 @@
mylog.info("Identified %s contours between %0.5e and %0.5e",
len(contour_ind.keys()),min_val,max_val)
for grid in chain(grid_set):
- grid.data.pop("tempContours", None)
- del data_source.data["tempContours"]
+ grid.field_data.pop("tempContours", None)
+ del data_source.field_data["tempContours"]
return contour_ind
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/light_cone/halo_mask.py
--- a/yt/analysis_modules/light_cone/halo_mask.py
+++ b/yt/analysis_modules/light_cone/halo_mask.py
@@ -45,14 +45,14 @@
light_cone_mask.append(_make_slice_mask(slice, halo_list, pixels))
# Write out cube of masks from each slice.
- if cube_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+ if cube_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
mylog.info("Saving halo mask cube to %s." % cube_file)
output = h5py.File(cube_file, 'a')
output.create_dataset('haloMaskCube', data=na.array(light_cone_mask))
output.close()
# Write out final mask.
- if mask_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+ if mask_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
# Final mask is simply the product of the mask from each slice.
mylog.info("Saving halo mask to %s." % mask_file)
finalMask = na.ones(shape=(pixels, pixels))
@@ -76,7 +76,7 @@
haloMap.extend(_make_slice_halo_map(slice, halo_list))
# Write out file.
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
mylog.info("Saving halo map to %s." % map_file)
f = open(map_file, 'w')
f.write("#z x y M [Msun] R [Mpc] R [image]\n")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/light_cone/light_cone.py
--- a/yt/analysis_modules/light_cone/light_cone.py
+++ b/yt/analysis_modules/light_cone/light_cone.py
@@ -108,7 +108,7 @@
self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
self.image_resolution_in_arcseconds)
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
# Create output directory.
if (os.path.exists(self.output_dir)):
if not(os.path.isdir(self.output_dir)):
@@ -243,7 +243,7 @@
else:
halo_mask_cube = light_cone_halo_mask(self, mask_file=mask_file, **kwargs)
# Collapse cube into final mask.
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
self.halo_mask = na.ones(shape=(self.pixels, self.pixels), dtype=bool)
for mask in halo_mask_cube:
self.halo_mask *= mask
@@ -302,7 +302,7 @@
output['object'].parameters.update(self.set_parameters)
frb = _light_cone_projection(output, field, self.pixels,
weight_field=weight_field, node=node)
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
if save_slice_images:
write_image(na.log10(frb[field]), "%s_%s.png" % (name, field), cmap_name=cmap_name)
@@ -342,7 +342,7 @@
if (q < len(self.light_cone_solution) - 1):
del output['object']
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
# Add up slices to make light cone projection.
if (weight_field is None):
lightConeProjection = sum(self.projection_stack)
@@ -356,7 +356,7 @@
# Save the last fixed resolution buffer for the plot collection,
# but replace the data with the full light cone projection data.
- frb.data[field] = lightConeProjection
+ frb.field_data[field] = lightConeProjection
# Write image.
if save_slice_images:
@@ -370,7 +370,7 @@
if apply_halo_mask:
if len(self.halo_mask) > 0:
mylog.info("Applying halo mask.")
- frb.data[field] *= self.halo_mask
+ frb.field_data[field] *= self.halo_mask
else:
mylog.error("No halo mask loaded, call get_halo_mask.")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py
@@ -88,7 +88,7 @@
field_cuts=these_field_cuts, node_name=node_name)
# If parallel: all the processes have the whole projection object, but we only need to do the tiling, shifting, and cutting once.
- if ytcfg.getint("yt", "__parallel_rank") == 0:
+ if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
# 2. The Tile Problem
# Tile projection to specified width.
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/radial_column_density/api.py
--- /dev/null
+++ b/yt/analysis_modules/radial_column_density/api.py
@@ -0,0 +1,28 @@
+"""
+API for radial_column_density
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: CU Boulder
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2010-2011 Stephen Skory. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .radial_column_density import RadialColumnDensity
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/radial_column_density/radial_column_density.py
--- /dev/null
+++ b/yt/analysis_modules/radial_column_density/radial_column_density.py
@@ -0,0 +1,277 @@
+"""
+Calculate the radial column density around a point.
+
+Author: Stephen Skory <s at skory.us>
+Affiliation: CU Boulder
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2008-2011 Stephen Skory. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+import yt.visualization.volume_rendering.camera as camera
+import yt.utilities.amr_utils as au
+from yt.utilities.math_utils import periodic_dist
+from yt.data_objects.field_info_container import FieldDetector
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ ParallelAnalysisInterface
+
+def _col_dens(field, data):
+ return data[field]
+
+class RadialColumnDensity(ParallelAnalysisInterface):
+ def __init__(self, pf, field, center, max_radius = 0.5, steps = 10,
+ base='lin', Nside = 32, ang_divs = 800j):
+ r"""
+ Calculate radial column densities in preparation to
+ adding them as a derived field.
+
+ This class is the first step in calculating a derived radial
+ column density field.
+ Given a central point, this calculates the column density to all cell
+ centers within the given radius in units of centimeters times
+ the units of the basis field.
+ For example, if the basis field is `NumberDensity`, which has units
+ of 1 / cm^3, the units of the derived field will be 1 / cm^2.
+ Please see the documentation or the example below on how to
+ use this to make the derived field which can be used like any other
+ derived field.
+
+ This builds a number of spherical
+ surfaces where the column density is calculated
+ using HEALPix Volume Rendering. The values of the column density at
+ grid points is then linearly interpolated between the two nearest
+ surfaces (one inward, one outward).
+ Please see the HEALPix Volume Rendering documentation for more on
+ that part of this calculation.
+
+ Parameters
+ ----------
+ pf : `StaticOutput`
+ The dataset to operate on.
+ field : string
+ The name of the basis field over which to
+ calculate a column density.
+ center : array_like
+ A list or array giving the location of where to
+ calculate the start of
+ the column density.
+ This will probably be "an object of interest" like
+ a star, black hole, or the center of a galaxy.
+ max_radius : float
+ How far out to calculate the column density, in code units. This
+ value will be automatically reduced if the supplied value would
+ result in calculating column densities outside the volume.
+ Default = 0.5.
+ steps : integer
+ How many surfaces to use. A higher number is more accurate, but
+ takes more resources.
+ Default = 10
+ base : string
+ How to evenly space the surfaces: linearly with "lin" or
+ logarithmically with "log".
+ Default = "lin".
+ Nside : int
+ The resolution of column density calculation as performed by
+ HEALPix. Higher numbers mean higher quality. Max = 8192.
+ Default = 32.
+ ang_divs : imaginary integer
+ This number controls the gridding of the HEALPix projection onto
+ the spherical surfaces. Higher numbers mean higher quality.
+ Default = 800j.
+
+ Examples
+ --------
+
+ >>> rcdnumdens = RadialColumnDensity(pf, 'NumberDensity', [0.5, 0.5, 0.5])
+ >>> def _RCDNumberDensity(field, data, rcd = rcdnumdens):
+ return rcd._build_derived_field(data)
+ >>> add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
+ """
+ ParallelAnalysisInterface.__init__(self)
+ self.pf = pf
+ self.center = na.asarray(center)
+ self.max_radius = max_radius
+ self.steps = steps
+ self.base = base
+ self.Nside = Nside
+ self.ang_divs = ang_divs
+ self.real_ang_divs = int(na.abs(ang_divs))
+ self.phi, self.theta = na.mgrid[0.0:2*na.pi:ang_divs, 0:na.pi:ang_divs]
+ self.phi1d = self.phi[:,0]
+ self.theta1d = self.theta[0,:]
+ self.dphi = self.phi1d[1] - self.phi1d[0]
+ self.dtheta = self.theta1d[1] - self.theta1d[0]
+ self.pixi = au.arr_ang2pix_nest(Nside, self.theta.ravel(), self.
+ phi.ravel())
+ self.dw = pf.domain_right_edge - pf.domain_left_edge
+ # Here's where we actually do stuff.
+ self._fix_max_radius()
+ self._make_bins()
+ self._build_surfaces(field)
+
+ def _fix_max_radius(self):
+ # The max_radius can only be the distance from the center point to
+ # the closest face of the volume. This is because the column density
+ # for a surface outside the volume is ill-defined due to the way
+ # normalization is handled in the volume render.
+ # It may be possible to fix this in
+ # the future, and allow these calculations in the whole volume,
+ # but this will work for now.
+ right = self.pf.domain_right_edge - self.center
+ left = self.center - self.pf.domain_left_edge
+ min_r = na.min(right)
+ min_l = na.min(left)
+ self.max_radius = na.min([self.max_radius, min_r, min_l])
+
+ def _make_bins(self):
+ # We'll make the bins start from the smallest cell size to the
+ # specified radius. Column density inside the same cell as our
+ # center is kind of ill-defined, anyway.
+ if self.base == 'lin':
+ self.bins = na.linspace(self.pf.h.get_smallest_dx(), self.max_radius,
+ self.steps)
+ elif self.base == 'log':
+ self.bins = na.logspace(na.log10(self.pf.h.get_smallest_dx()),
+ na.log10(self.max_radius), self.steps)
+
+ def _build_surfaces(self, field):
+ # This will be index by bin index.
+ self.surfaces = {}
+ for i, radius in enumerate(self.bins):
+ cam = camera.HEALpixCamera(self.center, radius, self.Nside,
+ pf = self.pf, log_fields = [False], fields = field)
+ bitmap = cam.snapshot()
+ self.surfaces[i] = radius * self.pf['cm'] * \
+ bitmap[:,0,0][self.pixi].reshape((self.real_ang_divs,self.real_ang_divs))
+ self.surfaces[i] = self.comm.mpi_allreduce(self.surfaces[i], op='max')
+
+ def _build_derived_field(self, data, minval=None):
+ r"""
+ Parameters
+ ----------
+
+ minval : float
+ This parameter will set any values of the
+ field that are zero to this minimum value.
+ Values of zero are found outside the maximum radius and
+ in the cell of the user-specified center point.
+ This setting is useful if the field is going to be logged
+ (e.g. na.log10) where zeros are inconvenient.
+ Default = None
+ """
+ x = data['x']
+ sh = x.shape
+ ad = na.prod(sh)
+ if type(data) == type(FieldDetector()):
+ return na.ones(sh)
+ y = data['y']
+ z = data['z']
+ pos = na.array([x.reshape(ad), y.reshape(ad), z.reshape(ad)]).T
+ del x, y, z
+ vals = self._interpolate_value(pos)
+ del pos
+ if minval:
+ zeros = (vals == 0.)
+ vals[zeros] = minval
+ del zeros
+ vals.shape = sh
+ return vals
+
+ def _interpolate_value(self, pos):
+ # Given a position, find the two surfaces it's in between,
+ # and the interpolate values from the surfaces to the point
+ # according to the points angle.
+ # 1. Find the angle from the center point to the position.
+ vec = pos - self.center
+ phi = na.arctan2(vec[:, 1], vec[:, 0])
+ # Convert the convention from [-pi, pi) to [0, 2pi).
+ sel = (phi < 0)
+ phi[sel] += 2 * na.pi
+ # Find the radius.
+ r = na.sqrt(na.sum(vec * vec, axis = 1))
+ # Keep track of the points outside of self.max_radius, which we'll
+ # handle separately before we return.
+ outside = (r > self.max_radius)
+ theta = na.arccos(vec[:, 2] / r)
+ # 2. Find the bin for this position.
+ digi = na.digitize(r, self.bins)
+ # Find the values on the inner and outer surfaces.
+ in_val = na.zeros_like(r)
+ out_val = na.zeros_like(r)
+ # These two will be used for interpolation.
+ in_r = na.zeros_like(r)
+ out_r = na.zeros_like(r)
+ for bin in na.unique(digi):
+ sel = (digi == bin)
+ # Special case if we're outside the largest sphere.
+ if bin == len(self.bins):
+ in_val[sel] = 0.
+ out_val[sel] = 0.
+ # Just something non-zero so we don't get divide errors later.
+ in_r[sel] = .1
+ out_r[sel] = .2
+ continue
+ # Special case if we're inside the smallest sphere.
+ elif bin == 0:
+ in_val[sel] = na.zeros_like(phi[sel])
+ in_r[sel] = 0.
+ out_val[sel] = self._interpolate_surface_value(1,
+ phi[sel], theta[sel])
+ out_r[sel] = self.bins[1]
+ continue
+ # General case.
+ else:
+ in_val[sel] = self._interpolate_surface_value(bin - 1,
+ phi[sel], theta[sel])
+ in_r[sel] = self.bins[bin - 1]
+ out_val[sel] = self._interpolate_surface_value(bin,
+ phi[sel], theta[sel])
+ out_r[sel] = self.bins[bin]
+ # Interpolate using a linear fit in column density / r space.
+ val = na.empty_like(r)
+ # Special case for inside smallest sphere.
+ sel = (digi == 0)
+ val[sel] = (1. - (out_r[sel] - r[sel]) / out_r[sel]) * out_val[sel]
+ na.invert(sel, sel) # In-place operation!
+ val[sel] = (out_val[sel] - in_val[sel]) / (out_r[sel] - in_r[sel]) * \
+ (r[sel] - in_r[sel]) + in_val[sel]
+ # Fix the things to zero that should be zero.
+ val[outside] = 0.
+ return val
+
+ def _interpolate_surface_value(self, bin, phi, theta):
+ # Given a surface bin and an angle, interpolate the value on
+ # that surface to the angle.
+ # 1. Find the four values closest to the angle.
+ phi_bin = na.digitize(phi, self.phi1d)
+ theta_bin = na.digitize(theta, self.theta1d)
+ val00 = self.surfaces[bin][phi_bin - 1, theta_bin - 1]
+ val01 = self.surfaces[bin][phi_bin - 1, theta_bin]
+ val10 = self.surfaces[bin][phi_bin, theta_bin - 1]
+ val11 = self.surfaces[bin][phi_bin, theta_bin]
+ # 2. Linearly interpolate the four values to the points.
+ int_val0 = (val10 - val00) / self.dphi * \
+ (phi - self.phi1d[phi_bin - 1]) + val00
+ int_val1 = (val11 - val01) / self.dphi * \
+ (phi - self.phi1d[phi_bin - 1]) + val01
+ vals = (int_val1 - int_val0) / self.dtheta * \
+ (theta - self.theta1d[theta_bin - 1]) + int_val0
+ return vals
+
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -16,6 +16,7 @@
config.add_subpackage("level_sets")
config.add_subpackage("light_ray")
config.add_subpackage("light_cone")
+ config.add_subpackage("radial_column_density")
config.add_subpackage("simulation_handler")
config.add_subpackage("spectral_integrator")
config.add_subpackage("star_analysis")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/simulation_handler/enzo_simulation.py
--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py
@@ -31,6 +31,9 @@
dt_Tolerance = 1e-3
+from yt.data_objects.time_series import \
+ TimeSeriesData
+
from yt.utilities.cosmology import \
Cosmology, \
EnzoCosmology
@@ -38,13 +41,15 @@
from yt.convenience import \
load
-class EnzoSimulation(object):
+class EnzoSimulation(TimeSeriesData):
r"""Super class for performing the same operation over all data dumps in
a simulation from one redshift to another.
"""
- def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
- links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
- get_data_by_force=False):
+ def __init__(self, enzo_parameter_file, initial_time=None, final_time=None,
+ initial_redshift=None, final_redshift=None,
+ links=False, enzo_parameters=None,
+ get_time_outputs=True, get_redshift_outputs=True,
+ get_available_data=False, get_data_by_force=False):
r"""Initialize an Enzo Simulation object.
initial_time : float
@@ -122,6 +127,11 @@
# Get all the appropriate datasets.
self._get_all_outputs(brute_force=get_data_by_force)
+ # Instantiate a TimeSeriesData object.
+ time_series_outputs = [load(output['filename']) \
+ for output in self.allOutputs]
+ TimeSeriesData.__init__(self, outputs=time_series_outputs)
+
def _calculate_redshift_dump_times(self):
"Calculates time from redshift of redshift dumps."
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -98,6 +98,7 @@
... length_number=10, length_range=[1./128, .5],
... length_type="log")
"""
+ ParallelAnalysisInterface.__init__(self)
try:
fKD
except NameError:
@@ -107,8 +108,8 @@
self.constant_theta = theta
self.constant_phi = phi
# MPI stuff.
- self.size = self._mpi_get_size()
- self.mine = self._mpi_get_rank()
+ self.size = self.comm.size
+ self.mine = self.comm.rank
self.vol_ratio = vol_ratio
if self.vol_ratio == -1:
self.vol_ratio = self.size
@@ -160,7 +161,7 @@
ds = pf.h.periodic_region_strict([0.]*3, self.left_edge,
self.right_edge)
padded, self.LE, self.RE, self.ds = \
- self._partition_hierarchy_3d(ds = ds, padding=0.,
+ self.partition_hierarchy_3d(ds = ds, padding=0.,
rank_ratio = self.vol_ratio)
else:
self.left_edge = left_edge
@@ -168,10 +169,10 @@
# We do this twice, first with no 'buffer' to get the unbuffered
# self.LE/RE, and then second to get a buffered self.ds.
padded, self.LE, self.RE, temp = \
- self._partition_region_3d(left_edge, right_edge,
+ self.partition_region_3d(left_edge, right_edge,
rank_ratio=self.vol_ratio)
padded, temp, temp, self.ds = \
- self._partition_region_3d(left_edge - self.lengths[-1], \
+ self.partition_region_3d(left_edge - self.lengths[-1], \
right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
self.width = self.ds.right_edge - self.ds.left_edge
@@ -273,8 +274,8 @@
self._setup_recv_arrays()
self._send_arrays()
t0 = time.time()
- self._mpi_Request_Waitall(self.send_hooks)
- self._mpi_Request_Waitall(self.recv_hooks)
+ self.comm.mpi_Request_Waitall(self.send_hooks)
+ self.comm.mpi_Request_Waitall(self.recv_hooks)
t1 = time.time()
t_waiting += (t1-t0)
if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
@@ -363,7 +364,7 @@
for task in xrange(self.size):
if task == self.mine: continue
self.recv_done[task] = na.zeros(1, dtype='int64')
- self.done_hooks.append(self._mpi_Irecv_long(self.recv_done[task], \
+ self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
task, tag=15))
def _send_done_to_root(self):
@@ -376,7 +377,7 @@
# I send when I *think* things should finish.
self.send_done = na.ones(1, dtype='int64') * \
(self.size / self.vol_ratio -1) + self.comm_cycle_count
- self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+ self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
0, tag=15))
else:
# As root, I need to mark myself!
@@ -390,7 +391,7 @@
"""
if self.mine == 0:
# If other tasks aren't finished, this will return False.
- status = self._mpi_Request_Testall(self.done_hooks)
+ status = self.comm.mpi_Request_Testall(self.done_hooks)
# Convolve this with with root's status.
status = status * (self.generated_points == self.total_values)
if status == 1:
@@ -402,7 +403,7 @@
status = 0
# Broadcast the status from root - we stop only if root thinks we should
# stop.
- status = self._mpi_bcast_pickled(status)
+ status = self.comm.mpi_bcast_pickled(status)
if status == 0: return True
if self.comm_cycle_count < status:
return True
@@ -418,22 +419,22 @@
self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
dtype='float64')
self.recv_gen_array = na.zeros(self.size, dtype='int64')
- self.recv_hooks.append(self._mpi_Irecv_double(self.recv_points, \
+ self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
(self.mine-1)%self.size, tag=10))
- self.recv_hooks.append(self._mpi_Irecv_double(self.recv_fields_vals, \
+ self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
(self.mine-1)%self.size, tag=20))
- self.recv_hooks.append(self._mpi_Irecv_long(self.recv_gen_array, \
+ self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_gen_array, \
(self.mine-1)%self.size, tag=40))
def _send_arrays(self):
"""
Send the data arrays to the right-hand neighbor.
"""
- self.send_hooks.append(self._mpi_Isend_double(self.points,\
+ self.send_hooks.append(self.comm.mpi_nonblocking_send(self.points,\
(self.mine+1)%self.size, tag=10))
- self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+ self.send_hooks.append(self.comm.mpi_nonblocking_send(self.fields_vals,\
(self.mine+1)%self.size, tag=20))
- self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+ self.send_hooks.append(self.comm.mpi_nonblocking_send(self.gen_array, \
(self.mine+1)%self.size, tag=40))
def _allsum_bin_hits(self):
@@ -441,8 +442,8 @@
Add up the hits to all the bins globally for all functions.
"""
for fset in self._fsets:
- fset.too_low = self._mpi_allsum(fset.too_low)
- fset.too_high = self._mpi_allsum(fset.too_high)
+ fset.too_low = self.comm.mpi_allreduce(fset.too_low, op='sum')
+ fset.too_high = self.comm.mpi_allreduce(fset.too_high, op='sum')
fset.binned = {}
if self.mine == 0:
mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +453,7 @@
(field, fset.too_high[i], fset.too_low[i]))
for length in self.lengths:
fset.length_bin_hits[length] = \
- self._mpi_Allsum_long(fset.length_bin_hits[length])
+ self.comm.mpi_allreduce(fset.length_bin_hits[length], op='sum')
# Find out how many were successfully binned.
fset.binned[length] = fset.length_bin_hits[length].sum()
# Normalize the counts.
@@ -621,7 +622,7 @@
>>> tpf.write_out_means()
"""
for fset in self._fsets:
- fp = self._write_on_root(fn % fset.function.__name__)
+ fp = self.comm.write_on_root(fn % fset.function.__name__)
fset._avg_bin_hits()
line = "# length".ljust(sep)
line += "count".ljust(sep)
@@ -689,7 +690,7 @@
for fset in self._fsets:
# Only operate on correlation functions.
if fset.corr_norm == None: continue
- fp = self._write_on_root("%s_correlation.txt" % fset.function.__name__)
+ fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__)
line = "# length".ljust(sep)
line += "\\xi".ljust(sep)
fp.write(line + "\n")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -38,8 +38,10 @@
inline = 'False',
__withinreason = 'False',
__parallel = 'False',
- __parallel_rank = '0',
- __parallel_size = '1',
+ __global_parallel_rank = '0',
+ __global_parallel_size = '1',
+ __topcomm_parallel_rank = '0',
+ __topcomm_parallel_size = '1',
storeparameterfiles = 'True',
parameterfilestore = 'parameter_files.csv',
maximumstoredpfs = '500',
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -40,7 +40,8 @@
from yt.data_objects.particle_io import particle_handler_registry
from yt.utilities.amr_utils import find_grids_in_inclined_box, \
grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
- QuadTree, get_box_grids_below_level, ghost_zone_interpolate
+ QuadTree, get_box_grids_below_level, ghost_zone_interpolate, \
+ march_cubes_grid, march_cubes_grid_flux
from yt.utilities.data_point_utilities import CombineGrids, \
DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -69,11 +70,11 @@
"""
def save_state(self, grid, field=None):
old_params = grid.field_parameters
- old_keys = grid.data.keys()
+ old_keys = grid.field_data.keys()
grid.field_parameters = self.field_parameters
tr = func(self, grid, field)
grid.field_parameters = old_params
- grid.data = dict( [(k, grid.data[k]) for k in old_keys] )
+ grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
return tr
return save_state
@@ -119,6 +120,12 @@
return self._vc_data[field][grid.id]
return check_cache
+class YTFieldData(dict):
+ """
+ A Container object for field data, instead of just having it be a dict.
+ """
+ pass
+
class FakeGridForParticles(object):
"""
Mock up a grid to insert particle positions and radii
@@ -127,20 +134,20 @@
def __init__(self, grid):
self._corners = grid._corners
self.field_parameters = {}
- self.data = {'x':grid['particle_position_x'],
- 'y':grid['particle_position_y'],
- 'z':grid['particle_position_z'],
- 'dx':grid['dx'],
- 'dy':grid['dy'],
- 'dz':grid['dz']}
+ self.field_data = YTFieldData({'x':grid['particle_position_x'],
+ 'y':grid['particle_position_y'],
+ 'z':grid['particle_position_z'],
+ 'dx':grid['dx'],
+ 'dy':grid['dy'],
+ 'dz':grid['dz']})
self.dds = grid.dds.copy()
self.real_grid = grid
self.child_mask = 1
- self.ActiveDimensions = self.data['x'].shape
+ self.ActiveDimensions = self.field_data['x'].shape
self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
def __getitem__(self, field):
- if field not in self.data.keys():
+ if field not in self.field_data.keys():
if field == "RadiusCode":
center = self.field_parameters['center']
tempx = na.abs(self['x'] - center[0])
@@ -152,7 +159,7 @@
tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
else:
raise KeyError(field)
- else: tr = self.data[field]
+ else: tr = self.field_data[field]
return tr
class AMRData(object):
@@ -186,7 +193,7 @@
mylog.debug("Appending object to %s (type: %s)", self.pf, type(self))
if fields == None: fields = []
self.fields = ensure_list(fields)[:]
- self.data = {}
+ self.field_data = YTFieldData()
self.field_parameters = {}
self.__set_default_field_parameters()
self._cut_masks = {}
@@ -248,7 +255,7 @@
"""
Clears out all data from the AMRData instance, freeing memory.
"""
- self.data.clear()
+ self.field_data.clear()
if self._grids is not None:
for grid in self._grids: grid.clear_data()
@@ -265,7 +272,7 @@
"""
Checks if a data field already exists.
"""
- return self.data.has_key(key)
+ return self.field_data.has_key(key)
def _refresh_data(self):
"""
@@ -275,24 +282,24 @@
self.get_data()
def keys(self):
- return self.data.keys()
+ return self.field_data.keys()
def __getitem__(self, key):
"""
Returns a single field. Will add if necessary.
"""
- if not self.data.has_key(key):
+ if not self.field_data.has_key(key):
if key not in self.fields:
self.fields.append(key)
self.get_data(key)
- return self.data[key]
+ return self.field_data[key]
def __setitem__(self, key, val):
"""
Sets a field to be some other value.
"""
if key not in self.fields: self.fields.append(key)
- self.data[key] = val
+ self.field_data[key] = val
def __delitem__(self, key):
"""
@@ -302,21 +309,21 @@
del self.fields[self.fields.index(key)]
except ValueError:
pass
- del self.data[key]
+ del self.field_data[key]
def _generate_field_in_grids(self, fieldName):
pass
_key_fields = None
def write_out(self, filename, fields=None, format="%0.16e"):
- if fields is None: fields=sorted(self.data.keys())
+ if fields is None: fields=sorted(self.field_data.keys())
if self._key_fields is None: raise ValueError
field_order = self._key_fields[:]
for field in field_order: self[field]
field_order += [field for field in fields if field not in field_order]
fid = open(filename,"w")
fid.write("\t".join(["#"] + field_order + ["\n"]))
- field_data = na.array([self.data[field] for field in field_order])
+ field_data = na.array([self.field_data[field] for field in field_order])
for line in range(field_data.shape[1]):
field_data[:,line].tofile(fid, sep="\t", format=format)
fid.write("\n")
@@ -464,11 +471,11 @@
else:
fields_to_get = ensure_list(fields)
if not self.sort_by in fields_to_get and \
- self.sort_by not in self.data:
+ self.sort_by not in self.field_data:
fields_to_get.insert(0, self.sort_by)
mylog.debug("Going to obtain %s", fields_to_get)
for field in fields_to_get:
- if self.data.has_key(field):
+ if self.field_data.has_key(field):
continue
mylog.info("Getting field %s from %s", field, len(self._grids))
if field not in self.hierarchy.field_list and not in_grids:
@@ -477,7 +484,7 @@
self[field] = na.concatenate(
[self._get_data_from_grid(grid, field)
for grid in self._grids])
- if not self.data.has_key(field):
+ if not self.field_data.has_key(field):
continue
if self._sortkey is None:
self._sortkey = na.argsort(self[self.sort_by])
@@ -763,6 +770,7 @@
Prepares the AMR2DData, normal to *axis*. If *axis* is 4, we are not
aligned with any axis.
"""
+ ParallelAnalysisInterface.__init__(self)
self.axis = axis
AMRData.__init__(self, pf, fields, **kwargs)
self.field = ensure_list(fields)[0]
@@ -788,7 +796,7 @@
fields_to_get = ensure_list(fields)
temp_data = {}
for field in fields_to_get:
- if self.data.has_key(field): continue
+ if self.field_data.has_key(field): continue
if field not in self.hierarchy.field_list:
if self._generate_field(field):
continue # A "True" return means we did it
@@ -803,12 +811,12 @@
self[field] = temp_data[field]
# We finalize
if temp_data != {}:
- temp_data = self._mpi_catdict(temp_data)
+ temp_data = self.comm.par_combine_object(temp_data,
+ datatype='dict', op='cat')
# And set, for the next group
for field in temp_data.keys():
self[field] = temp_data[field]
-
def _generate_field(self, field):
if self.pf.field_info.has_key(field):
# First we check the validator
@@ -828,6 +836,25 @@
for grid in self._grids:
temp = grid[field]
+ def to_frb(self, width, resolution, center = None):
+ if center is None:
+ center = self.get_field_parameter("center")
+ if center is None:
+ center = (self.pf.domain_right_edge
+ + self.pf.domain_left_edge)/2.0
+ if iterable(width):
+ w, u = width
+ width = w/self.pf[u]
+ if not iterable(resolution):
+ resolution = (resolution, resolution)
+ from yt.visualization.fixed_resolution import FixedResolutionBuffer
+ xax = x_dict[self.axis]
+ yax = y_dict[self.axis]
+ bounds = (center[xax] - width/2.0, center[xax] + width/2.0,
+ center[yax] - width/2.0, center[yax] + width/2.0)
+ frb = FixedResolutionBuffer(self, bounds, resolution)
+ return frb
+
def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
"""
This returns a uniform grid of points between *LE* and *RE*,
@@ -972,12 +999,14 @@
points.append(self._generate_grid_coords(grid))
if len(points) == 0:
points = None
- t = self._mpi_catarray(None)
+ t = self.comm.par_combine_object(None, datatype="array", op="cat")
else:
points = na.concatenate(points)
- # We have to transpose here so that _mpi_catarray works properly, as
- # it and the alltoall assume the long axis is the last one.
- t = self._mpi_catarray(points.transpose())
+ # We have to transpose here so that _par_combine_object works
+ # properly, as it and the alltoall assume the long axis is the last
+ # one.
+ t = self.comm.par_combine_object(points.transpose(),
+ datatype="array", op="cat")
self['px'] = t[0,:]
self['py'] = t[1,:]
self['pz'] = t[2,:]
@@ -1192,7 +1221,7 @@
points.append(self._generate_grid_coords(grid))
if len(points) == 0: points = None
else: points = na.concatenate(points)
- t = self._mpi_catarray(points)
+ t = self.comm.par_combine_object(points, datatype="array", op="cat")
pos = (t[:,0:3] - self.center)
self['px'] = na.dot(pos, self._x_vec)
self['py'] = na.dot(pos, self._y_vec)
@@ -1403,7 +1432,7 @@
temp_data = {}
_size = self.dims * self.dims
for field in fields_to_get:
- if self.data.has_key(field): continue
+ if self.field_data.has_key(field): continue
if field not in self.hierarchy.field_list:
if self._generate_field(field):
continue # A "True" return means we did it
@@ -1412,8 +1441,8 @@
self[field] = na.zeros(_size, dtype='float64')
for grid in self._get_grids():
self._get_data_from_grid(grid, field)
- self[field] = self._mpi_allsum(\
- self[field]).reshape([self.dims]*2).transpose()
+ self[field] = self.comm.mpi_allreduce(\
+ self[field], op='sum').reshape([self.dims]*2).transpose()
def interpolate_discretize(self, *args, **kwargs):
pass
@@ -1572,7 +1601,7 @@
else: fields = ensure_list(fields)
# We need a new tree for every single set of fields we add
self._obtain_fields(fields, self._node_name)
- fields = [f for f in fields if f not in self.data]
+ fields = [f for f in fields if f not in self.field_data]
if len(fields) == 0: return
tree = self._get_tree(len(fields))
coord_data = []
@@ -1584,21 +1613,21 @@
if self.preload_style == 'all':
print "Preloading %s grids and getting %s" % (
len(self.source._get_grid_objs()),
- self._get_dependencies(fields))
- self._preload([g for g in self._get_grid_objs()],
- self._get_dependencies(fields), self.hierarchy.io)
+ self.get_dependencies(fields))
+ self.comm.preload([g for g in self._get_grid_objs()],
+ self.get_dependencies(fields), self.hierarchy.io)
# By changing the remove-from-tree method to accumulate, we can avoid
# having to do this by level, and instead do it by CPU file
for level in range(0, self._max_level+1):
if self.preload_style == 'level':
- self._preload([g for g in self._get_grid_objs()
+ self.comm.preload([g for g in self._get_grid_objs()
if g.Level == level],
- self._get_dependencies(fields), self.hierarchy.io)
+ self.get_dependencies(fields), self.hierarchy.io)
self._add_level_to_tree(tree, level, fields)
mylog.debug("End of projecting level level %s, memory usage %0.3e",
level, get_memory_usage()/1024.)
# Note that this will briefly double RAM usage
- tree = self.merge_quadtree_buffers(tree)
+ tree = self.comm.merge_quadtree_buffers(tree)
coord_data, field_data, weight_data, dxs = [], [], [], []
for level in range(0, self._max_level + 1):
npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -1812,7 +1841,7 @@
def _initialize_source(self, source = None):
if source is None:
- check, source = self._partition_hierarchy_2d(self.axis)
+ check, source = self.partition_hierarchy_2d(self.axis)
self._check_region = check
#self._okay_to_serialize = (not check)
else:
@@ -1962,7 +1991,7 @@
if fields is None: fields = ensure_list(self.fields)[:]
else: fields = ensure_list(fields)
self._obtain_fields(fields, self._node_name)
- fields = [f for f in fields if f not in self.data]
+ fields = [f for f in fields if f not in self.field_data]
if len(fields) == 0: return
coord_data = []
field_data = []
@@ -1973,13 +2002,13 @@
# _project_level, then it would be more memory conservative
if self.preload_style == 'all':
print "Preloading %s grids and getting %s" % (
- len(self.source._grids), self._get_dependencies(fields))
- self._preload(self.source._grids,
- self._get_dependencies(fields), self.hierarchy.io)
+ len(self.source._grids), self.get_dependencies(fields))
+ self.comm.preload(self.source._grids,
+ self.get_dependencies(fields), self.hierarchy.io)
for level in range(0, self._max_level+1):
if self.preload_style == 'level':
- self._preload(self.source.select_grids(level),
- self._get_dependencies(fields), self.hierarchy.io)
+ self.comm.preload(self.source.select_grids(level),
+ self.get_dependencies(fields), self.hierarchy.io)
self.__calculate_overlap(level)
my_coords, my_pdx, my_pdy, my_fields = \
self.__project_level(level, fields)
@@ -2015,7 +2044,7 @@
data['pdy'] *= 0.5
data['fields'] = field_data
# Now we run the finalizer, which is ignored if we don't need it
- data = self._mpi_catdict(data)
+ data = self.comm.par_combine_object(data, datatype='dict', op='cat')
field_data = na.vsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
self[field] = field_data[fi].ravel()
@@ -2201,7 +2230,7 @@
self._get_data_from_grid(grid, fields_to_get, dls)
mylog.info("IO completed; summing")
for field in fields_to_get:
- self[field] = self._mpi_Allsum_double(self[field])
+ self[field] = self.comm.mpi_allreduce(self[field], op='sum')
conv = self.pf.units[self.pf.field_info[field].projection_conversion]
self[field] *= conv
@@ -2284,7 +2313,7 @@
fields_to_get = ensure_list(fields)
mylog.debug("Going to obtain %s", fields_to_get)
for field in fields_to_get:
- if self.data.has_key(field):
+ if self.field_data.has_key(field):
continue
if field not in self.hierarchy.field_list and not in_grids:
if self._generate_field(field):
@@ -2296,14 +2325,14 @@
self.pf.field_info[field].particle_type and \
self.pf.h.io._particle_reader:
self.particles.get_data(field)
- if field not in self.data:
+ if field not in self.field_data:
if self._generate_field(field): continue
mylog.info("Getting field %s from %s", field, len(self._grids))
self[field] = na.concatenate(
[self._get_data_from_grid(grid, field)
for grid in self._grids])
for field in fields_to_get:
- if not self.data.has_key(field):
+ if not self.field_data.has_key(field):
continue
self[field] = self[field]
@@ -2402,7 +2431,8 @@
Return an ExtractedRegion where the points contained in it are defined
as the points in `this` data object with the given *indices*.
"""
- return ExtractedRegionBase(self, indices)
+ fp = self.field_parameters.copy()
+ return ExtractedRegionBase(self, indices, **fp)
def __get_quantities(self):
if self.__quantities is None:
@@ -2411,6 +2441,168 @@
__quantities = None
quantities = property(__get_quantities)
+ def extract_isocontours(self, field, value, filename = None,
+ rescale = False, sample_values = None):
+ r"""This identifies isocontours on a cell-by-cell basis, with no
+ consideration of global connectedness, and returns the vertices of the
+ Triangles in that isocontour.
+
+ This function simply returns the vertices of all the triangles
+ calculated by the marching cubes algorithm; for more complex
+ operations, such as identifying connected sets of cells above a given
+ threshold, see the extract_connected_sets function. This is more
+ useful for calculating, for instance, total isocontour area, or
+ visualizing in an external program (such as `MeshLab
+ <http://meshlab.sf.net>`_.)
+
+ Parameters
+ ----------
+ field : string
+ Any field that can be obtained in a data object. This is the field
+ which will be isocontoured.
+ value : float
+ The value at which the isocontour should be calculated.
+ filename : string, optional
+ If supplied, this file will be filled with the vertices in .obj
+ format. Suitable for loading into meshlab.
+ rescale : bool, optional
+ If true, the vertices will be rescaled within their min/max.
+
+ Returns
+ -------
+ verts : array of floats
+ The array of vertices, x,y,z. Taken in threes, these are the
+ triangle vertices.
+
+ References
+ ----------
+
+ .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+ Examples
+ --------
+ This will create a data object, find a nice value in the center, and
+ output the vertices to "triangles.obj" after rescaling them.
+
+ >>> dd = pf.h.all_data()
+ >>> rho = dd.quantities["WeightedAverageQuantity"](
+ ... "Density", weight="CellMassMsun")
+ >>> verts = dd.extract_isocontours("Density", rho,
+ ... "triangles.obj", True)
+ """
+ verts = []
+ samples = []
+ pb = get_pbar("Extracting Isocontours", len(self._grids))
+ for i, g in enumerate(self._grids):
+ pb.update(i)
+ mask = self._get_cut_mask(g) * g.child_mask
+ vals = g.get_vertex_centered_data(field)
+ if sample_values is not None:
+ svals = g.get_vertex_centered_data(sample_values)
+ else:
+ svals = None
+ my_verts = march_cubes_grid(value, vals, mask, g.LeftEdge, g.dds,
+ svals)
+ if sample_values is not None:
+ my_verts, svals = my_verts
+ samples.append(svals)
+ verts.append(my_verts)
+ pb.finish()
+ verts = na.concatenate(verts)
+ if sample_values is not None:
+ samples = na.concatenate(samples)
+ if rescale:
+ mi = na.min(verts, axis=0)
+ ma = na.max(verts, axis=0)
+ verts = (verts - mi) / (ma - mi).max()
+ if filename is not None:
+ f = open(filename, "w")
+ for v1 in verts:
+ f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
+ for i in range(len(verts)/3):
+ f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
+ if sample_values is not None:
+ return verts, samples
+ return verts
+
+ def calculate_isocontour_flux(self, field, value,
+ field_x, field_y, field_z, fluxing_field = None):
+ r"""This identifies isocontours on a cell-by-cell basis, with no
+ consideration of global connectedness, and calculates the flux over
+ those contours.
+
+ This function will conduct marching cubes on all the cells in a given
+ data container (grid-by-grid), and then for each identified triangular
+ segment of an isocontour in a given cell, calculate the gradient (i.e.,
+ normal) in the isocontoured field, interpolate the local value of the
+ "fluxing" field, the area of the triangle, and then return:
+
+ area * local_flux_value * (n dot v)
+
+ Where area, local_value, and the vector v are interpolated at the barycenter
+ (weighted by the vertex values) of the triangle. Note that this
+ specifically allows for the field fluxing across the surface to be
+ *different* from the field being contoured. If the fluxing_field is
+ not specified, it is assumed to be 1.0 everywhere, and the raw flux
+ with no local-weighting is returned.
+
+ Additionally, the returned flux is defined as flux *into* the surface,
+ not flux *out of* the surface.
+
+ Parameters
+ ----------
+ field : string
+ Any field that can be obtained in a data object. This is the field
+ which will be isocontoured and used as the "local_value" in the
+ flux equation.
+ value : float
+ The value at which the isocontour should be calculated.
+ field_x : string
+ The x-component field
+ field_y : string
+ The y-component field
+ field_z : string
+ The z-component field
+ fluxing_field : string, optional
+ The field whose passage over the surface is of interest. If not
+ specified, assumed to be 1.0 everywhere.
+
+ Returns
+ -------
+ flux : float
+ The summed flux. Note that it is not currently scaled; this is
+ simply the code-unit area times the fields.
+
+ References
+ ----------
+
+ .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+ Examples
+ --------
+ This will create a data object, find a nice value in the center, and
+ calculate the metal flux over it.
+
+ >>> dd = pf.h.all_data()
+ >>> rho = dd.quantities["WeightedAverageQuantity"](
+ ... "Density", weight="CellMassMsun")
+ >>> flux = dd.calculate_isocontour_flux("Density", rho,
+ ... "x-velocity", "y-velocity", "z-velocity", "Metal_Density")
+ """
+ flux = 0.0
+ for g in self._grids:
+ mask = self._get_cut_mask(g) * g.child_mask
+ vals = g.get_vertex_centered_data(field)
+ if fluxing_field is None:
+ ff = na.ones(vals.shape, dtype="float64")
+ else:
+ ff = g.get_vertex_centered_data(fluxing_field)
+ xv, yv, zv = [g.get_vertex_centered_data(f) for f in
+ [field_x, field_y, field_z]]
+ flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
+ ff, mask, g.LeftEdge, g.dds)
+ return flux
+
def extract_connected_sets(self, field, num_levels, min_val, max_val,
log_space=True, cumulative=True, cache=False):
"""
@@ -2472,7 +2664,8 @@
_type_name = "extracted_region"
_con_args = ('_base_region', '_indices')
def __init__(self, base_region, indices, force_refresh=True, **kwargs):
- cen = base_region.get_field_parameter("center")
+ cen = kwargs.pop("center", None)
+ if cen is None: cen = base_region.get_field_parameter("center")
AMR3DData.__init__(self, center=cen,
fields=None, pf=base_region.pf, **kwargs)
self._base_region = base_region # We don't weakly reference because
@@ -2725,7 +2918,8 @@
grid.RightEdge, grid.dds,
grid.child_mask, 1)
if v: grids.append(grid)
- self._grids = na.array(grids, dtype='object')
+ self._grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self._grids[gi] = g
def _is_fully_enclosed(self, grid):
@@ -2938,7 +3132,8 @@
# Now we sort by level
grids = grids.tolist()
grids.sort(key=lambda x: (x.Level, x.LeftEdge[0], x.LeftEdge[1], x.LeftEdge[2]))
- self._grids = na.array(grids, dtype='object')
+ self._grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self._grids[gi] = g
def _is_fully_enclosed(self, grid):
r = na.abs(grid._corners - self.center)
@@ -3015,7 +3210,7 @@
fields = ensure_list(fields)
obtain_fields = []
for field in fields:
- if self.data.has_key(field): continue
+ if self.field_data.has_key(field): continue
if field not in self.hierarchy.field_list:
try:
#print "Generating", field
@@ -3128,7 +3323,7 @@
fields_to_get = ensure_list(field)
for field in fields_to_get:
grid_count = 0
- if self.data.has_key(field):
+ if self.field_data.has_key(field):
continue
mylog.debug("Getting field %s from %s possible grids",
field, len(self._grids))
@@ -3160,9 +3355,9 @@
def _update_level_state(self, level, field = None):
dx = self._base_dx / self.pf.refine_by**level
- self.data['cdx'] = dx[0]
- self.data['cdy'] = dx[1]
- self.data['cdz'] = dx[2]
+ self.field_data['cdx'] = dx[0]
+ self.field_data['cdy'] = dx[1]
+ self.field_data['cdz'] = dx[2]
LL = self.left_edge - self.pf.domain_left_edge
self._old_global_startindex = self.global_startindex
self.global_startindex = na.rint(LL / dx).astype('int64') - 1
@@ -3171,13 +3366,13 @@
if level == 0 and self.level > 0:
# We use one grid cell at LEAST, plus one buffer on all sides
idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
- self.data[field] = na.zeros(idims,dtype='float64')-999
+ self.field_data[field] = na.zeros(idims,dtype='float64')-999
self._cur_dims = idims.astype("int32")
elif level == 0 and self.level == 0:
DLE = self.pf.domain_left_edge
self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
- self.data[field] = na.zeros(idims,dtype='float64')-999
+ self.field_data[field] = na.zeros(idims,dtype='float64')-999
self._cur_dims = idims.astype("int32")
def _refine(self, dlevel, field):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -66,6 +66,7 @@
combine_function, units = "",
n_ret = 0, force_unlazy=False):
# We wrap the function with our object
+ ParallelAnalysisInterface.__init__(self)
self.__doc__ = function.__doc__
self.__name__ = name
self.collection = collection
@@ -85,7 +86,7 @@
e.NumberOfParticles = 1
self.func(e, *args, **kwargs)
mylog.debug("Preloading %s", e.requested)
- self._preload([g for g in self._get_grid_objs()], e.requested,
+ self.comm.preload([g for g in self._get_grid_objs()], e.requested,
self._data_source.pf.h.io)
if lazy_reader and not self.force_unlazy:
return self._call_func_lazy(args, kwargs)
@@ -103,13 +104,14 @@
def _finalize_parallel(self):
# Note that we do some fancy footwork here.
- # _mpi_catarray and its affiliated alltoall function
+ # _par_combine_object and its affiliated alltoall function
# assume that the *long* axis is the last one. However,
# our long axis is the first one!
rv = []
for my_list in self.retvals:
data = na.array(my_list).transpose()
- rv.append(self._mpi_catarray(data).transpose())
+ rv.append(self.comm.par_combine_object(data,
+ datatype="array", op="cat").transpose())
self.retvals = rv
def _call_func_unlazy(self, args, kwargs):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -466,5 +466,5 @@
def __call__(self, data):
# We need to make sure that it's an actual AMR grid
if isinstance(data, FieldDetector): return True
- if data._type_name == 'grid': return True
+ if getattr(data, "_type_name", None) == 'grid': return True
raise NeedsOriginalGrid()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -30,6 +30,7 @@
from yt.funcs import *
+from yt.data_objects.data_containers import YTFieldData
from yt.utilities.definitions import x_dict, y_dict
from .field_info_container import \
NeedsGridType, \
@@ -49,7 +50,7 @@
_con_args = ('id', 'filename')
OverlappingSiblings = None
- __slots__ = ['data', 'field_parameters', 'id', 'hierarchy', 'pf',
+ __slots__ = ['field_data', 'field_parameters', 'id', 'hierarchy', 'pf',
'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
'NumberOfParticles', 'Children', 'Parent',
'start_index', 'filename', '__weakref__', 'dds',
@@ -57,7 +58,7 @@
'_parent_id', '_children_ids']
def __init__(self, id, filename=None, hierarchy=None):
- self.data = {}
+ self.field_data = YTFieldData()
self.field_parameters = {}
self.id = id
if hierarchy: self.hierarchy = weakref.proxy(hierarchy)
@@ -140,36 +141,36 @@
raise exceptions.KeyError, field
def has_key(self, key):
- return (key in self.data)
+ return (key in self.field_data)
def __getitem__(self, key):
"""
Returns a single field. Will add if necessary.
"""
- if not self.data.has_key(key):
+ if not self.field_data.has_key(key):
self.get_data(key)
- return self.data[key]
+ return self.field_data[key]
def __setitem__(self, key, val):
"""
Sets a field to be some other value.
"""
- self.data[key] = val
+ self.field_data[key] = val
def __delitem__(self, key):
"""
Deletes a field
"""
- del self.data[key]
+ del self.field_data[key]
def keys(self):
- return self.data.keys()
+ return self.field_data.keys()
def get_data(self, field):
"""
Returns a field or set of fields for a key or set of keys
"""
- if not self.data.has_key(field):
+ if not self.field_data.has_key(field):
if field in self.hierarchy.field_list:
conv_factor = 1.0
if self.pf.field_info.has_key(field):
@@ -178,7 +179,7 @@
self.NumberOfParticles == 0:
# because this gets upcast to float
self[field] = na.array([],dtype='int64')
- return self.data[field]
+ return self.field_data[field]
try:
temp = self.hierarchy.io.pop(self, field)
self[field] = na.multiply(temp, conv_factor, temp)
@@ -191,7 +192,7 @@
else: raise
else:
self._generate_field(field)
- return self.data[field]
+ return self.field_data[field]
def _setup_dx(self):
# So first we figure out what the index is. We don't assume
@@ -205,7 +206,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@property
def _corners(self):
@@ -247,7 +248,7 @@
"""
self._del_child_mask()
self._del_child_indices()
- self.data.clear()
+ self.field_data.clear()
self._setup_dx()
def check_child_masks(self):
@@ -304,11 +305,11 @@
:meth:`clear_derived_quantities`.
"""
for key in self.keys():
- del self.data[key]
- del self.data
+ del self.field_data[key]
+ del self.field_data
if hasattr(self,"retVal"):
del self.retVal
- self.data = {}
+ self.field_data = YTFieldData()
self.clear_derived_quantities()
def clear_derived_quantities(self):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -48,6 +48,7 @@
float_type = 'float64'
def __init__(self, pf, data_style):
+ ParallelAnalysisInterface.__init__(self)
self.parameter_file = weakref.proxy(pf)
self.pf = self.parameter_file
@@ -120,10 +121,16 @@
# Called by subclass
self.object_types = []
self.objects = []
+ self.plots = []
for name, cls in sorted(data_object_registry.items()):
cname = cls.__name__
if cname.endswith("Base"): cname = cname[:-4]
self._add_object_class(name, cname, cls, dd)
+ if self.pf.refine_by != 2 and hasattr(self, 'proj') and \
+ hasattr(self, 'overlap_proj'):
+ mylog.warning("Refine by something other than two: reverting to"
+ + " overlap_proj")
+ self.proj = self.overlap_proj
self.object_types.sort()
# Now all the object related stuff
@@ -171,7 +178,7 @@
writeable = os.access(fn, os.W_OK)
writeable = writeable and not ytcfg.getboolean('yt','onlydeserialize')
# We now have our conditional stuff
- self._barrier()
+ self.comm.barrier()
if not writeable and not exists: return
if writeable:
try:
@@ -203,10 +210,6 @@
"""
if self._data_mode != 'a': return
- if "ArgsError" in dir(h5py.h5):
- exception = (h5py.h5.ArgsError, KeyError)
- else:
- exception = (h5py.h5.H5Error, KeyError)
try:
node_loc = self._data_file[node]
if name in node_loc and force:
@@ -214,7 +217,7 @@
del self._data_file[node][name]
elif name in node_loc and passthrough:
return
- except exception:
+ except:
pass
myGroup = self._data_file['/']
for q in node.split('/'):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -91,8 +91,7 @@
fields_to_read, rtype, args, grid_list, count_list,
conv_factors)
for [n, v] in zip(fields_to_read, rvs):
- self.source.data[n] = v
- print self.source.data.keys()
+ self.source.field_data[n] = v
class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
periodic = False
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -30,6 +30,7 @@
from yt.funcs import *
+from yt.data_objects.data_containers import YTFieldData
from yt.utilities.data_point_utilities import \
Bin1DProfile, Bin2DProfile, Bin3DProfile
from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -63,9 +64,10 @@
# We could, but I think we instead want to deal with the root datasource.
class BinnedProfile(ParallelAnalysisInterface):
def __init__(self, data_source, lazy_reader):
+ ParallelAnalysisInterface.__init__(self)
self._data_source = data_source
self.pf = data_source.pf
- self._data = {}
+ self.field_data = YTFieldData()
self._pdata = {}
self._lazy_reader = lazy_reader
@@ -79,7 +81,7 @@
def _initialize_parallel(self, fields):
g_objs = [g for g in self._get_grid_objs()]
- self._preload(g_objs, self._get_dependencies(fields),
+ self.comm.preload(g_objs, self.get_dependencies(fields),
self._data_source.hierarchy.io)
def _lazy_add_fields(self, fields, weight, accumulation):
@@ -119,10 +121,10 @@
def _finalize_parallel(self):
for key in self.__data:
- self.__data[key] = self._mpi_allsum(self.__data[key])
+ self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
for key in self.__weight_data:
- self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
- self.__used = self._mpi_allsum(self.__used)
+ self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+ self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
def _unlazy_add_fields(self, fields, weight, accumulation):
for field in fields:
@@ -148,18 +150,18 @@
self._unlazy_add_fields(fields, weight, accumulation)
if fractional:
for field in fields:
- self._data[field] /= self._data[field].sum()
+ self.field_data[field] /= self.field_data[field].sum()
def keys(self):
- return self._data.keys()
+ return self.field_data.keys()
def __getitem__(self, key):
# This raises a KeyError if it doesn't exist
# This is because we explicitly want to add all fields
- return self._data[key]
+ return self.field_data[key]
def __setitem__(self, key, value):
- self._data[key] = value
+ self.field_data[key] = value
def _get_field(self, source, this_field, check_cut):
# This is where we will iterate to get all contributions to a field
@@ -267,11 +269,11 @@
raise EmptyProfileData()
# Truncate at boundaries.
if self.end_collect:
- sd = source_data[:]
+ mi = na.ones_like(source_data).astype('bool')
else:
mi = ((source_data > self._bins.min())
& (source_data < self._bins.max()))
- sd = source_data[mi]
+ sd = source_data[mi]
if sd.size == 0:
raise EmptyProfileData()
# Stick the bins into our fixed bins, set at initialization
@@ -288,7 +290,7 @@
# both: 0...N, left: 0...N-1, right: 1...N
# center: N bins that are the average (both in linear or log
# space) of each pair of left/right edges
- x = self._data[self.bin_field]
+ x = self.field_data[self.bin_field]
if bin_style is 'both': pass
elif bin_style is 'left': x = x[:-1]
elif bin_style is 'right': x = x[1:]
@@ -307,15 +309,15 @@
*bin_style* (left, right, center, both).
'''
fid = open(filename,"w")
- fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+ fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
fields.remove(self.bin_field)
fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
field_data = na.array(self.choose_bins(bin_style))
if bin_style is 'both':
- field_data = na.append([field_data], na.array([self._data[field] for field in fields]), axis=0)
+ field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
else:
- field_data = na.append([field_data], na.array([self._data[field][:-1] for field in fields]), axis=0)
+ field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
for line in range(field_data.shape[1]):
field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -334,7 +336,7 @@
*bin_style* (left, right, center, both).
"""
fid = h5py.File(filename)
- fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.bin_field)]
+ fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.bin_field)]
if group_prefix is None:
name = "%s-1d" % (self.bin_field)
else:
@@ -346,7 +348,7 @@
group = fid.create_group(name)
group.attrs["x-axis-%s" % self.bin_field] = self.choose_bins(bin_style)
for field in fields:
- dset = group.create_dataset("%s" % field, data=self._data[field][:-1])
+ dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1])
fid.close()
def _get_bin_fields(self):
@@ -467,8 +469,8 @@
# center: N bins that are the average (both in linear or log
# space) of each pair of left/right edges
- x = self._data[self.x_bin_field]
- y = self._data[self.y_bin_field]
+ x = self.field_data[self.x_bin_field]
+ y = self.field_data[self.y_bin_field]
if bin_style is 'both':
pass
elif bin_style is 'left':
@@ -498,17 +500,17 @@
both).
"""
fid = open(filename,"w")
- fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+ fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
+ fields + ["\n"]))
x,y = self.choose_bins(bin_style)
x,y = na.meshgrid(x,y)
field_data = [x.ravel(), y.ravel()]
if bin_style is not 'both':
- field_data += [self._data[field][:-1,:-1].ravel() for field in fields
+ field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
if field not in [self.x_bin_field, self.y_bin_field]]
else:
- field_data += [self._data[field].ravel() for field in fields
+ field_data += [self.field_data[field].ravel() for field in fields
if field not in [self.x_bin_field, self.y_bin_field]]
field_data = na.array(field_data)
@@ -529,7 +531,7 @@
right, center, both).
"""
fid = h5py.File(filename)
- fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
+ fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
if group_prefix is None:
name = "%s-%s-2d" % (self.y_bin_field, self.x_bin_field)
else:
@@ -543,7 +545,7 @@
group.attrs["x-axis-%s" % self.x_bin_field] = xbins
group.attrs["y-axis-%s" % self.y_bin_field] = ybins
for field in fields:
- dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1])
+ dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1])
fid.close()
def _get_bin_fields(self):
@@ -727,9 +729,9 @@
# center: N bins that are the average (both in linear or log
# space) of each pair of left/right edges
- x = self._data[self.x_bin_field]
- y = self._data[self.y_bin_field]
- z = self._data[self.z_bin_field]
+ x = self.field_data[self.x_bin_field]
+ y = self.field_data[self.y_bin_field]
+ z = self.field_data[self.z_bin_field]
if bin_style is 'both':
pass
elif bin_style is 'left':
@@ -770,7 +772,7 @@
attributes.
"""
fid = h5py.File(filename)
- fields = [field for field in sorted(self._data.keys())
+ fields = [field for field in sorted(self.field_data.keys())
if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field and field != self.z_bin_field)]
if group_prefix is None:
name = "%s-%s-%s-3d" % (self.z_bin_field, self.y_bin_field, self.x_bin_field)
@@ -788,7 +790,7 @@
group.attrs["z-axis-%s" % self.z_bin_field] = zbins
for field in fields:
- dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1,:-1])
+ dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1,:-1])
fid.close()
@@ -818,7 +820,7 @@
self[self.z_bin_field].size),
'field_order':order }
values = []
- for field in self._data:
+ for field in self.field_data:
if field in set_attr.values(): continue
order.append(field)
values.append(self[field].ravel())
@@ -832,7 +834,7 @@
Given a *pf* parameterfile and the *name* of a stored profile, retrieve
it into a read-only data structure.
"""
- self._data = {}
+ self.field_data = YTFieldData()
prof_arr = pf.h.get_data("/Profiles", name)
if prof_arr is None: raise KeyError("No such array")
for ax in 'xyz':
@@ -840,11 +842,11 @@
setattr(self, base % ax, prof_arr.getAttr(base % ax))
for ax in 'xyz':
fn = getattr(self, '%s_bin_field' % ax)
- self._data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
+ self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
shape = prof_arr.getAttr('shape')
for fn, fd in zip(prof_arr.getAttr('field_order'),
prof_arr.read().transpose()):
- self._data[fn] = fd.reshape(shape)
+ self.field_data[fn] = fd.reshape(shape)
def add_fields(self, *args, **kwargs):
raise RuntimeError("Sorry, you can't add to a stored profile.")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -162,7 +162,7 @@
_instantiated_hierarchy = None
@property
def hierarchy(self):
- if self._instantiated_hierarchy == None:
+ if self._instantiated_hierarchy is None:
if self._hierarchy_class == None:
raise RuntimeError("You should not instantiate StaticOutput.")
self._instantiated_hierarchy = self._hierarchy_class(
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -85,7 +85,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def get_global_startindex(self):
"""
@@ -324,7 +324,8 @@
self.grid_levels[gi,:] = level
grids.append(self.grid(gi, self, level, fl, props[0,:]))
gi += 1
- self.grids = na.array(grids, dtype='object')
+ self.grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self.grids[gi] = g
def _get_grid_parents(self, grid, LE, RE):
mask = na.zeros(self.num_grids, dtype='bool')
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -117,7 +117,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
return "CastroGrid_%04i" % (self.id)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -82,7 +82,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
class ChomboHierarchy(AMRHierarchy):
@@ -148,7 +148,8 @@
self.grid_particle_count[i] = 0
self.grid_dimensions[i] = ei - si + 1
i += 1
- self.grids = na.array(self.grids, dtype='object')
+ self.grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self.grids[gi] = g
def _populate_grid_objects(self):
for g in self.grids:
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -245,7 +245,7 @@
fn.append(["-1"])
if nb > 0: fn[-1] = _next_token_line("BaryonFileName", f)
np.append(int(_next_token_line("NumberOfParticles", f)[0]))
- if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("FileName", f)
+ if nb == 0 and np[-1] > 0: fn[-1] = _next_token_line("ParticleFileName", f)
for line in f:
if len(line) < 2: break
if line.startswith("Pointer:"):
@@ -291,7 +291,7 @@
if not ytcfg.getboolean("yt","serialize"): return False
try:
f = h5py.File(self.hierarchy_filename[:-9] + "harrays")
- except h5py.h5.H5Error:
+ except:
return False
self.grid_dimensions[:] = f["/ActiveDimensions"][:]
self.grid_left_edge[:] = f["/LeftEdges"][:]
@@ -384,7 +384,7 @@
def _detect_fields(self):
self.field_list = []
# Do this only on the root processor to save disk work.
- if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+ if self.comm.rank == 0 or self.comm.rank == None:
field_list = self.get_data("/", "DataFields")
if field_list is None:
mylog.info("Gathering a field list (this may take a moment.)")
@@ -401,7 +401,7 @@
field_list = field_list.union(gf)
else:
field_list = None
- field_list = self._mpi_bcast_pickled(field_list)
+ field_list = self.comm.mpi_bcast_pickled(field_list)
self.save_data(list(field_list),"/","DataFields",passthrough=True)
self.field_list = list(field_list)
@@ -544,12 +544,13 @@
self.grids[pid-1]._children_ids.append(self.grids[-1].id)
self.max_level = self.grid_levels.max()
mylog.debug("Preparing grids")
+ self.grids = na.empty(len(grids), dtype='object')
for i, grid in enumerate(self.grids):
if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = None
grid._prepare_grid()
grid.proc_num = self.grid_procs[i,0]
- self.grids = na.array(self.grids, dtype='object')
+ self.grids[gi] = grid
mylog.debug("Prepared")
def _initialize_grid_arrays(self):
@@ -588,7 +589,7 @@
self.derived_field_list = self.__class__._cached_derived_field_list
def _generate_random_grids(self):
- my_rank = self._mpi_get_rank()
+ my_rank = self.comm.rank
my_grids = self.grids[self.grid_procs.ravel() == my_rank]
if len(my_grids) > 40:
starter = na.random.randint(0, 20)
@@ -793,6 +794,10 @@
self.dimensionality = self.parameters["TopGridRank"]
if self.dimensionality > 1:
self.domain_dimensions = self.parameters["TopGridDimensions"]
+ if len(self.domain_dimensions) < 3:
+ tmp = self.domain_dimensions.tolist()
+ tmp.append(1)
+ self.domain_dimensions = na.array(tmp)
self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
"float64").copy()
self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -131,9 +131,9 @@
# current value. Note that FLASH uses 1-based indexing for refinement
# levels, but we do not, so we reduce the level by 1.
self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
- g = [self.grid(i+1, self, self.grid_levels[i,0])
- for i in xrange(self.num_grids)]
- self.grids = na.array(g, dtype='object')
+ self.grids = na.empty(self.num_grids, dtype='object')
+ for i in xrange(self.num_grids):
+ self.grids[i] = self.grid(i+1, self, self.grid_levels[i,0])
def _populate_grid_objects(self):
# We only handle 3D data, so offset is 7 (nfaces+1)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -121,9 +121,9 @@
args = izip(xrange(self.num_grids), self.grid_levels.flat,
grid_parent_id, LI,
self.grid_dimensions, self.grid_particle_count.flat)
- self.grids = na.array([self.grid(self,j,d,le,lvl,p,n)
- for j,lvl,p, le, d, n in args],
- dtype='object')
+ self.grids = na.empty(len(args), dtype='object')
+ for gi, (j,lvl,p, le, d, n) in enumerate(args):
+ self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
def _populate_grid_objects(self):
for g in self.grids:
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -62,8 +62,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- # pdb.set_trace()
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
class GDFHierarchy(AMRHierarchy):
@@ -118,7 +117,6 @@
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
self.grid_particle_count = f['grid_particle_count'][:]
self.grids = na.array(self.grids, dtype='object')
- # pdb.set_trace()
def _populate_grid_objects(self):
for g in self.grids:
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -110,7 +110,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
return "MaestroGrid_%04i" % (self.id)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -76,7 +76,6 @@
def _prepare_grid(self):
""" Copies all the appropriate attributes from the hierarchy. """
- # This is definitely the slowest part of generating the hierarchy
h = self.hierarchy # alias
h.grid_levels[self.id, 0] = self.Level
h.grid_left_edge[self.id,:] = self.LeftEdge[:]
@@ -94,11 +93,12 @@
if len(pIDs) > 0:
self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
else:
+ # must be root grid
self.Parent = None
def _setup_dx(self):
# So first we figure out what the index is. We don't assume that
- # dx=dy=dz, at least here. We probably do elsewhere.
+ # dx=dy=dz here.
id = self.id - self._id_offset
if self.Parent is not None:
self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +109,7 @@
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
return "NyxGrid_%04i" % (self.id)
@@ -132,7 +132,6 @@
self.read_particle_header()
self.__cache_endianness(self.levels[-1].grids[-1])
- # @todo: should be first line
AMRHierarchy.__init__(self, pf, self.data_style)
self._setup_data_io()
self._setup_field_list()
@@ -142,27 +141,27 @@
""" Read the global header file for an Nyx plotfile output. """
counter = 0
header_file = open(header_path, 'r')
- self.__global_header_lines = header_file.readlines()
+ self._global_header_lines = header_file.readlines()
# parse the file
- self.nyx_version = self.__global_header_lines[0].rstrip()
- self.n_fields = int(self.__global_header_lines[1])
+ self.nyx_pf_version = self._global_header_lines[0].rstrip()
+ self.n_fields = int(self._global_header_lines[1])
# why the 2?
counter = self.n_fields + 2
self.field_list = []
- for i, line in enumerate(self.__global_header_lines[2:counter]):
+ for i, line in enumerate(self._global_header_lines[2:counter]):
self.field_list.append(line.rstrip())
# figure out dimensions and make sure it's 3D
- self.dimension = int(self.__global_header_lines[counter])
+ self.dimension = int(self._global_header_lines[counter])
if self.dimension != 3:
raise RunTimeError("Current data is %iD. yt only supports Nyx data in 3D" % self.dimension)
counter += 1
- self.Time = float(self.__global_header_lines[counter])
+ self.Time = float(self._global_header_lines[counter])
counter += 1
- self.finest_grid_level = int(self.__global_header_lines[counter])
+ self.finest_grid_level = int(self._global_header_lines[counter])
self.n_levels = self.finest_grid_level + 1
counter += 1
@@ -171,28 +170,28 @@
# case in the future we want to enable a "backwards" way of
# taking the data out of the Header file and using it to fill
# in in the case of a missing inputs file
- self.domainLeftEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+ self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
counter += 1
- self.domainRightEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+ self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
counter += 1
- self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int, self.__global_header_lines[counter].split()))
+ self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
counter += 1
- self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
+ self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
counter += 1
- self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+ self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
counter += 1
self.dx = na.zeros((self.n_levels, 3))
- for i, line in enumerate(self.__global_header_lines[counter:counter + self.n_levels]):
+ for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
self.dx[i] = na.array(map(float, line.split()))
counter += self.n_levels
- self.geometry = int(self.__global_header_lines[counter])
+ self.geometry = int(self._global_header_lines[counter])
if self.geometry != 0:
raise RunTimeError("yt only supports cartesian coordinates.")
counter += 1
# @todo: this is just to debug. eventually it should go away.
- linebreak = int(self.__global_header_lines[counter])
+ linebreak = int(self._global_header_lines[counter])
if linebreak != 0:
raise RunTimeError("INTERNAL ERROR! This should be a zero.")
counter += 1
@@ -209,11 +208,11 @@
data_files_finder = re.compile(data_files_pattern)
for level in range(0, self.n_levels):
- tmp = self.__global_header_lines[counter].split()
+ tmp = self._global_header_lines[counter].split()
# should this be grid_time or level_time??
lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
counter += 1
- nsteps = int(self.__global_header_lines[counter])
+ nsteps = int(self._global_header_lines[counter])
counter += 1
self.levels.append(NyxLevel(lev, ngrids))
@@ -227,10 +226,10 @@
key_off = 0
files = {}
offsets = {}
- while nfiles + tmp_offset < len(self.__global_header_lines) \
- and data_files_finder.match(self.__global_header_lines[nfiles + tmp_offset]):
- filen = os.path.join(self.parameter_file.path, \
- self.__global_header_lines[nfiles + tmp_offset].strip())
+ while nfiles + tmp_offset < len(self._global_header_lines) \
+ and data_files_finder.match(self._global_header_lines[nfiles + tmp_offset]):
+ filen = os.path.join(self.parameter_file.path,
+ self._global_header_lines[nfiles + tmp_offset].strip())
# open each "_H" header file, and get the number of
# components within it
level_header_file = open(filen + '_H', 'r').read()
@@ -262,11 +261,11 @@
for grid in range(0, ngrids):
gfn = fn[grid] # filename of file containing this grid
gfo = off[grid] # offset within that file
- xlo, xhi = map(float, self.__global_header_lines[counter].split())
+ xlo, xhi = map(float, self._global_header_lines[counter].split())
counter += 1
- ylo, yhi = map(float, self.__global_header_lines[counter].split())
+ ylo, yhi = map(float, self._global_header_lines[counter].split())
counter += 1
- zlo, zhi = map(float, self.__global_header_lines[counter].split())
+ zlo, zhi = map(float, self._global_header_lines[counter].split())
counter += 1
lo = na.array([xlo, ylo, zlo])
hi = na.array([xhi, yhi, zhi])
@@ -307,6 +306,7 @@
for i in line.split()),
dtype='int64',
count=3*self.num_grids).reshape((self.num_grids, 3))
+ # we need grid_info in `populate_grid_objects`, so save it to self
self.pgrid_info = grid_info
def __cache_endianness(self, test_grid):
@@ -356,17 +356,17 @@
g.NumberOfParticles = pg[1]
g._particle_offset = pg[2]
- self.grid_particle_count[:,0] = self.pgrid_info[:,1]
- del self.pgrid_info # if this is all pgrid_info is used for...
+ self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
+ del self.pgrid_info
gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
self.grid_levels[:] = gls.reshape((self.num_grids, 1))
grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
for level in self.levels], axis=0)
- self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids, 1))
- self.grid_dys = grid_dcs[:,1].reshape((self.num_grids, 1))
- self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids, 1))
+ self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
+ self.grid_dys = grid_dcs[:, 1].reshape((self.num_grids, 1))
+ self.grid_dzs = grid_dcs[:, 2].reshape((self.num_grids, 1))
left_edges = []
right_edges = []
@@ -381,7 +381,7 @@
self.grid_dimensions = na.array(dims)
self.gridReverseTree = [] * self.num_grids
self.gridReverseTree = [ [] for i in range(self.num_grids)] # why the same thing twice?
- self.gridTree = [ [] for i in range(self.num_grids)] # meh
+ self.gridTree = [ [] for i in range(self.num_grids)]
mylog.debug("Done creating grid objects")
@@ -389,7 +389,7 @@
self.__setup_grid_tree()
for i, grid in enumerate(self.grids):
- if (i%1e4) == 0:
+ if (i % 1e4) == 0:
mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid._prepare_grid()
@@ -469,7 +469,7 @@
pass
def _setup_unknown_fields(self):
- # Doesn't seem useful
+ # not sure what the case for this is.
for field in self.field_list:
if field in self.parameter_file.field_info: continue
mylog.info("Adding %s to list of fields", field)
@@ -588,7 +588,6 @@
Parses the parameter file and establishes the various dictionaries.
"""
- # More boxlib madness...
self._parse_header_file()
if os.path.isfile(self.fparameter_file_path):
@@ -638,27 +637,24 @@
self.domain_dimensions = self.parameters["TopGridDimensions"]
self.refine_by = self.parameters.get("RefineBy", 2) # 2 is silent default? Makes sense I suppose.
- if self.parameters.has_key("ComovingCoordinates") \
- and self.parameters["ComovingCoordinates"]:
- self.cosmological_simulation = 1
- self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
- self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
- self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
+ # Nyx is always cosmological.
+ self.cosmological_simulation = 1
+ self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
+ self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
+ self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
- # So broken. We will fix this in the new Nyx output format
- a_file = open(os.path.join(self.path, "comoving_a"))
- line = a_file.readline().strip()
- a_file.close()
- self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
- self.cosmological_scale_factor = float(line)
+ # Read in the `comoving_a` file and parse the value. We should fix this
+ # in the new Nyx output format...
+ a_file = open(os.path.join(self.path, "comoving_a"))
+ a_string = a_file.readline().strip()
+ a_file.close()
- # alias
- self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+ # Set the scale factor and redshift
+ self.cosmological_scale_factor = float(a_string)
+ self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
- else:
- # @todo: automatic defaults
- self.current_redshift = self.omega_lambda = self.omega_matter = \
- self.hubble_constant = self.cosmological_simulation = 0.0
+ # alias
+ self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
def _parse_header_file(self):
"""
@@ -668,13 +664,12 @@
Currently, only Time is read here.
"""
- # @todo: header filename option? probably not.
header_file = open(os.path.join(self.path, "Header"))
lines = header_file.readlines() # hopefully this is small
header_file.close()
n_fields = int(lines[1]) # this could change
- self.current_time = float(lines[3 + n_fields]) # very fragile
+ self.current_time = float(lines[3 + n_fields]) # fragile
def _parse_fparameter_file(self):
"""
@@ -751,7 +746,6 @@
self.time_units["days"] = seconds / (3600 * 24.0)
self.time_units["years"] = seconds / (3600 * 24.0 * 365)
-
# not the most useful right now, but someday
for key in nyx_particle_field_names:
self.conversion_factors[key] = 1.0
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -114,7 +114,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def __repr__(self):
return "OrionGrid_%04i" % (self.id)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -80,7 +80,7 @@
self.dds = na.array((RE-LE)/self.ActiveDimensions)
if self.pf.dimensionality < 2: self.dds[1] = 1.0
if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
def get_global_startindex(self):
"""
@@ -233,7 +233,8 @@
grids.append(self.grid(gi, self, level, fl, props[0,:]))
gi += 1
self.proto_grids = []
- self.grids = na.array(grids, dtype='object')
+ self.grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self.grids[gi] = g
def _populate_grid_objects(self):
mask = na.empty(self.grids.size, dtype='int32')
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -166,21 +166,23 @@
if parent_ids is not None:
reverse_tree = self.stream_handler.parent_ids.tolist()
# Initial setup:
- for id,pid in enumerate(reverse_tree):
+ for gid,pid in enumerate(reverse_tree):
if pid >= 0:
- self.grids[-1]._parent_id = pid
- self.grids[pid]._children_ids.append(self.grids[-1].id)
+ self.grids[id]._parent_id = pid
+ self.grids[pid]._children_ids.append(self.grids[gid].id)
else:
mylog.debug("Reconstructing parent-child relationships")
self._reconstruct_parent_child()
self.max_level = self.grid_levels.max()
mylog.debug("Preparing grids")
+ temp_grids = na.empty(self.num_grids, dtype='object')
for i, grid in enumerate(self.grids):
if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = None
grid._prepare_grid()
grid.proc_num = self.grid_procs[i]
- self.grids = na.array(self.grids, dtype='object')
+ temp_grids[i] = grid
+ self.grids = temp_grids
mylog.debug("Prepared")
def _reconstruct_parent_child(self):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -106,7 +106,8 @@
levels.append(g.Level)
counts.append(g.NumberOfParticles)
i += 1
- self.grids = na.array(grids, dtype='object')
+ self.grids = na.empty(len(grids), dtype='object')
+ for gi, g in enumerate(grids): self.grids[gi] = g
self.grid_dimensions[:] = na.array(dims, dtype='int64')
self.grid_left_edge[:] = na.array(LE, dtype='float64')
self.grid_right_edge[:] = na.array(RE, dtype='float64')
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -187,12 +187,13 @@
def some_root_only_function(...):
"""
+ from yt.config import ytcfg
@wraps(func)
- def donothing(*args, **kwargs):
- return
- from yt.config import ytcfg
- if ytcfg.getint("yt","__parallel_rank") > 0: return donothing
- return func
+ def check_parallel_rank(*args, **kwargs):
+ if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
+ return
+ return func(*args, **kwargs)
+ return check_parallel_rank
def deprecate(func):
"""
@@ -341,9 +342,13 @@
handed back.
"""
from yt.config import ytcfg
+ if kwargs.pop("global_rootonly", False):
+ cfg_option = "__global_parallel_rank"
+ else:
+ cfg_option = "__topcomm_parallel_rank"
if not ytcfg.getboolean("yt","__parallel"):
return func(*args,**kwargs)
- if ytcfg.getint("yt","__parallel_rank") > 0: return
+ if ytcfg.getint("yt", cfg_option) > 0: return
return func(*args, **kwargs)
#
@@ -453,3 +458,6 @@
class NoCUDAException(Exception):
pass
+
+class YTEmptyClass(object):
+ pass
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -77,9 +77,9 @@
if self.record:
self.recorded_payloads += self.payloads
if self.debug:
- orig_stderr.write("**** Delivering %s payloads\n" % (len(payloads)))
+ sys.__stderr__.write("**** Delivering %s payloads\n" % (len(payloads)))
for p in payloads:
- orig_stderr.write("**** %s\n" % p['type'])
+ sys.__stderr__.write("**** %s\n" % p['type'])
self.payloads = []
self.event.clear()
return payloads
@@ -90,7 +90,7 @@
self.count += 1
self.event.set()
if self.debug:
- orig_stderr.write("**** Adding payload of type %s\n" % (to_add['type']))
+ sys.__stderr__.write("**** Adding payload of type %s\n" % (to_add['type']))
def replay_payloads(self):
return self.recorded_payloads
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -555,6 +555,38 @@
'widget_data_name': '_twidget_data'})
@lockit
+ def create_isocontours(self, pfname, field, value, sampling_field):
+ funccall = """
+ _tpf = %(pfname)s
+ _tfield = "%(field)s"
+ _tvalue = %(value)s
+ _tsample_values = "%(sampling_field)s"
+ _tdd = _tpf.h.all_data()
+ _tiso = _tdd.extract_isocontours(_tfield, _tvalue, rescale = True,
+ sample_values = _tsample_values)
+ from yt.funcs import YTEmptyClass
+ _tpw = YTEmptyClass()
+ print "GOT TPW"
+ _tpw._widget_name = 'isocontour_viewer'
+ _tpw._ext_widget_id = None
+ _tverts = _tiso[0].ravel().tolist()
+ _tc = (apply_colormap(na.log10(_tiso[1]))).squeeze()
+ _tcolors = na.empty((_tc.shape[0] * 3, 4), dtype='float32')
+ _tcolors[0::3,:] = _tc
+ _tcolors[1::3,:] = _tc
+ _tcolors[2::3,:] = _tc
+ _tcolors = (_tcolors.ravel()/255.0).tolist()
+ _twidget_data = {'vertex_positions': _tverts, 'vertex_colors': _tcolors}
+ """ % dict(pfname=pfname, value=value, sampling_field=sampling_field, field=field)
+ # There is a call to do this, but I have forgotten it ...
+ funccall = "\n".join((line.strip() for line in funccall.splitlines()))
+ self.execute(funccall, hide = True)
+ self.execution_thread.queue.put({'type': 'add_widget',
+ 'name' : '_tpw',
+ 'widget_data_name': '_twidget_data'})
+
+
+ @lockit
def create_grid_dataview(self, pfname):
funccall = """
_tpf = %(pfname)s
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -116,6 +116,9 @@
<!-- THE PHASE PLOT VIEWER FUNCTIONS --><script type="text/javascript" src="js/widget_phaseplot.js"></script>
+ <!-- THE GRID VIEWER FUNCTIONS -->
+ <script type="text/javascript" src="js/widget_isocontour.js"></script>
+
<script id="gv-shader-fs" type="x-shader/x-fragment">
#ifdef GL_ES
precision highp float;
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/js/functions.js
--- a/yt/gui/reason/html/js/functions.js
+++ b/yt/gui/reason/html/js/functions.js
@@ -252,6 +252,76 @@
return streamlineViewerHandler;
}
+function getIsocontourViewerHandler(node){
+function isocontourViewerHandler(item,pressed){
+ var win = new Ext.Window({
+ layout:'fit',
+ width:320,
+ height:250,
+ modal:true,
+ resizable:false,
+ draggable:false,
+ border:false,
+ title:'Isocontour Extraction in' + node,
+ items: [{
+ xtype: 'form', // FormPanel
+ labelWidth:80,
+ frame:true,
+ items: [{
+ xtype:'combo',
+ fieldLabel: 'Field',
+ id: 'field',
+ store:node.attributes.objdata.field_list,
+ width: 200,
+ allowBlank:false,
+ value: 'Density',
+ triggerAction: 'all',
+ },{
+ xtype:'combo',
+ fieldLabel: 'Sampling Field',
+ id: 'extract_field',
+ store:node.attributes.objdata.field_list,
+ width: 200,
+ allowBlank:false,
+ value: 'Temperature',
+ triggerAction: 'all',
+ },{
+ xtype:'textfield',
+ fieldLabel: 'Value',
+ id: 'value',
+ value: '1e-25',
+ width: 90,
+ allowBlank:false,
+ }],
+ buttons: [
+ {
+ text: 'Extract',
+ handler: function(b, e){
+ var field = Ext.get("field").getValue();
+ var value = Ext.get("value").getValue();
+ var sampling_field = Ext.get("extract_field").getValue();
+ yt_rpc.ExtDirectREPL.create_isocontours({
+ pfname:node.attributes.objdata.varname,
+ field:field, value:value,
+ sampling_field:sampling_field},
+ handle_result);
+ disable_input();
+ win.close();
+ }
+ },{
+ text: 'Cancel',
+ handler: function(b, e){
+ win.close();
+ }
+ }
+ ]
+ }]
+ });
+ win.show(this);
+}
+return isocontourViewerHandler;
+}
+
function getSliceHandler(node){
function sliceHandler(item,pressed){
var win = new Ext.Window({
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/js/reason.js
--- a/yt/gui/reason/html/js/reason.js
+++ b/yt/gui/reason/html/js/reason.js
@@ -186,10 +186,13 @@
} else if (node.attributes.objdata.type == 'pf') {
rightClickMenu = new Ext.menu.Menu({
items: [
- /*{
+ {
text: 'View Grids',
handler: getGridViewerHandler(node),
- },*/ {
+ }, {
+ text: 'View Isocontour',
+ handler: getIsocontourViewerHandler(node),
+ }, {
text: 'View Grid Data',
handler: getGridDataViewerHandler(node),
}, {
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/js/widget_gridviewer.js
--- a/yt/gui/reason/html/js/widget_gridviewer.js
+++ b/yt/gui/reason/html/js/widget_gridviewer.js
@@ -64,11 +64,6 @@
x: 0.5, y: 0.5, z: 0.5
},
},
- program: {
- from: 'ids',
- vs: 'gv-shader-vs',
- fs: 'gv-shader-fs'
- },
events: {
onDragStart: function(e) {
pos = {
@@ -156,46 +151,23 @@
program = app.program,
scene = app.scene,
camera = app.camera;
-
+ var grids = new PhiloGL.O3D.Model({
+ vertices : widget_data['vertex_positions'],
+ drawType : "LINES",
+ colors : widget_data['vertex_colors'],
+ });
+ scene.add(grids);
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(0, 0, 0, 1);
- //gl.clearDepth(1);
- gl.blendFunc(gl.SRC_ALPHA, gl.ONE);
- gl.enable(gl.BLEND);
- //gl.disable(gl.DEPTH_TEST);
- program.setUniform('alpha',0.8);
- gl.depthFunc(gl.LEQUAL);
- examine = camera;
- program.setBuffers({
- 'shapeset': {
- attribute: 'aVertexPosition',
- value: new Float32Array(widget_data['vertex_positions']),
- size: 3
- },
- 'shapesetColors': {
- attribute: 'aVertexColor',
- value: new Float32Array(widget_data['vertex_colors']),
- size: 4
- },
-
- });
- camera.modelView.id();
+ //examine = camera;
+ camera.view.id();
camera.update();
- /*(function animloop(){
- draw();
- requestAnimFrame(animloop, canvas);
- })();*/
-
//Draw the scene
draw = function() {
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
- program.setUniform('uMVMatrix', camera.modelView);
- program.setUniform('uPMatrix', camera.projection);
- program.setBuffer('shapeset');
- program.setBuffer('shapesetColors');
- gl.drawArrays(gl.LINES, 0, widget_data['n_vertices']);
+ scene.render();
}
draw();
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/js/widget_isocontour.js
--- /dev/null
+++ b/yt/gui/reason/html/js/widget_isocontour.js
@@ -0,0 +1,213 @@
+/**********************************************************************
+The isocontour viewer widget
+
+Author: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2011 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+// shim layer with setTimeout fallback
+window.requestAnimFrame = (function(){
+ return window.requestAnimationFrame ||
+ window.webkitRequestAnimationFrame ||
+ window.mozRequestAnimationFrame ||
+ window.oRequestAnimationFrame ||
+ window.msRequestAnimationFrame ||
+ function(/* function */ callback, /* DOMElement */ element){
+ window.setTimeout(callback, 1000 / 60);
+ };
+})();
+
+var exagain;
+var WidgetIsocontourViewer = function(python_varname, widget_data) {
+ this.id = python_varname;
+ this.widget_data = widget_data;
+ examine = "canvas_" + python_varname;
+ var draw;
+ var GridViewerStart = function() {
+ this.curX = 0;
+ this.curY = 0;
+ this.dist = 0;
+ function updateBasedOnOffset(camera, offset){
+ camera.position.x = camera.target.x + offset.x;
+ camera.position.y = camera.target.y + offset.y;
+ camera.position.z = camera.target.z + offset.z;
+ draw();
+ }
+ function camGetOffset(camera){
+ return PhiloGL.Vec3.sub(camera.position, camera.target)
+ }
+ PhiloGL('canvas_' + python_varname, {
+ camera: {
+ position: {
+ x: 0.5, y: 0.5, z: 5
+ },
+ target: {
+ x: 0.5, y: 0.5, z: 0.5
+ },
+ },
+ events: {
+ onDragStart: function(e) {
+ pos = {
+ x: e.x,
+ y: e.y
+ };
+ this.curX = e.x;
+ this.curY = e.y;
+ this.dist = camGetOffset(this.camera).norm();
+ },
+ onDragEnd: function(e) {
+ pos = {
+ x: e.x,
+ y: e.y
+ };
+ },
+ onDragMove: function(e) {
+ var c = this.camera;
+ var off = camGetOffset(c);
+
+ // Get Horizontal vector
+ var horiz = PhiloGL.Vec3.cross(c.up,
+ camGetOffset(c))
+ horiz.$scale(1./horiz.norm());
+
+ if (e.event.button == 0){ // Rotation
+ // Do vertical rotation about horizontal vector
+ var vert_rot = new PhiloGL.Mat4();
+ vert_rot.id();
+ vert_rot.$rotateAxis((e.y-this.curY)/100., horiz);
+ PhiloGL.Mat4.$mulVec3(vert_rot, off);
+ PhiloGL.Mat4.$mulVec3(vert_rot, c.up);
+ c.up.$scale(1./c.up.norm());
+
+ // Do horizontal rotation about up vector
+ var side_rot = new PhiloGL.Mat4();
+ side_rot.id();
+ side_rot.$rotateAxis(-(e.x-this.curX)/100., c.up);
+ side_rot.$mulVec3(off);
+
+ // Update current positions
+ this.curX = e.x;
+ this.curY = e.y;
+ this.dist = off.norm();
+ updateBasedOnOffset(c, off);
+ this.camera.near = this.dist/100000.0;
+ this.camera.far = this.dist*2.0;
+ c.update();
+ } else if (e.event.button = 2){ // Right click - transpose
+ var tscale = 1.0*off.norm()/512.;
+ var move_up = c.up.scale(-(e.y-this.curY)*tscale);
+ var move_over = horiz.scale(-(e.x-this.curX)*tscale);
+ c.position.$add(move_up);
+ c.position.$add(move_over);
+ c.target.$add(move_up);
+ c.target.$add(move_over);
+ // Update current positions
+ this.curX = e.x;
+ this.curY = e.y;
+ this.dist = off.norm();
+ this.camera.near = this.dist/100000.0;
+ this.camera.far = this.dist*2.0;
+ c.update();
+ }
+ draw();
+ },
+ onMouseWheel: function(e){
+ e.stop();
+ var offset = PhiloGL.Vec3.scale(camGetOffset(this.camera),
+ 1.0 - e.wheel/10.);
+ updateBasedOnOffset(this.camera, offset);
+ var dist = offset.norm()
+ this.camera.near = offset.norm()/100000.0;
+ this.camera.far = offset.norm()*2.0;
+ this.camera.update();
+ draw();
+ }
+ },
+ onError: function() {
+ alert("An error ocurred while loading the application");
+ },
+ onLoad: function(app) {
+ var gl = app.gl,
+ canvas = app.canvas,
+ program = app.program,
+ scene = app.scene,
+ camera = app.camera;
+ var grids = new PhiloGL.O3D.Model({
+ vertices : widget_data['vertex_positions'],
+ drawType : "TRIANGLES",
+ colors : widget_data['vertex_colors'],
+ });
+ exagain = grids;
+ scene.add(grids);
+ gl.viewport(0, 0, canvas.width, canvas.height);
+ gl.clearColor(0, 0, 0, 1);
+
+ //examine = camera;
+ camera.view.id();
+ camera.update();
+
+ //Draw the scene
+ draw = function() {
+ gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
+ scene.render();
+ }
+
+ draw();
+
+ }
+ });
+ }
+
+ viewport.get("center-panel").add(
+ {
+ xtype: 'panel',
+ id: "iv_" + python_varname,
+ title: "WebGL Isocontour Viewer",
+ iconCls: 'graph',
+ autoScroll: true,
+ layout:'absolute',
+ closable: true,
+ items: [
+ { xtype:'panel',
+ autoEl: {
+ tag: 'canvas',
+ id: 'canvas_' + python_varname,
+ style: 'border: none;',
+ width: 512, height:512
+ },
+ width: 512,
+ height: 512
+ }],
+ listeners: { afterlayout: GridViewerStart },
+ }
+ );
+
+ viewport.get("center-panel").activate("iv_" + this.id);
+ viewport.doLayout();
+ this.panel = viewport.get("center-panel").get("iv_" + python_varname);
+ this.panel.doLayout();
+
+ this.accept_results = function(payload) { }
+}
+
+widget_types['isocontour_viewer'] = WidgetIsocontourViewer;
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/reason/html/js/widget_streamlineviewer.js
--- a/yt/gui/reason/html/js/widget_streamlineviewer.js
+++ b/yt/gui/reason/html/js/widget_streamlineviewer.js
@@ -159,7 +159,7 @@
});
- camera.modelView.id();
+ camera.view.id();
setInterval(draw, 30/60);
var stream_counter =0;
//Draw the scene
@@ -167,7 +167,7 @@
stream_counter = 0;
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//Draw Triangle
- program.setUniform('uMVMatrix', camera.modelView);
+ program.setUniform('uMVMatrix', camera.view);
program.setUniform('uPMatrix', camera.projection);
program.setBuffer('shapeset');
program.setBuffer('shapesetColors');
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/setup.py
--- a/yt/gui/setup.py
+++ b/yt/gui/setup.py
@@ -6,7 +6,6 @@
from numpy.distutils.misc_util import Configuration
config = Configuration('gui',parent_package,top_path)
config.add_subpackage('opengl_widgets')
- config.add_subpackage('traited_explorer')
config.add_subpackage('reason')
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/traited_explorer/plot_editors.py
--- a/yt/gui/traited_explorer/plot_editors.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-Figure editors for the Traits GUI
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2009 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import sys, matplotlib
-# We want matplotlib to use a wxPython backend
-matplotlib.use('QT4Agg')
-from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
-from matplotlib.figure import Figure
-from matplotlib.axes import Axes
-
-from enthought.traits.api import Any, Instance
-from enthought.traits.ui.qt4.editor import Editor
-from enthought.traits.ui.qt4.basic_editor_factory import BasicEditorFactory
-
-from enthought.pyface.action.api import ActionController
-
-from enthought.traits.ui.menu import \
- Menu, Action, Separator, OKCancelButtons, OKButton
-
-from matplotlib.backend_bases import Event as MPLEvent
-
-class _MPLFigureEditor(Editor):
- """ Snagged from Gael's tutorial """
-
- scrollable = True
- mpl_control = Instance(FigureCanvas)
-
- def init(self, parent):
- self.control = self._create_canvas(parent)
- self.set_tooltip()
-
- def update_editor(self):
- pass
-
- def _create_canvas(self, parent):
- """ Create the MPL canvas. """
- # The panel lets us add additional controls.
- panel = wx.Panel(parent, -1)
- sizer = wx.BoxSizer(wx.VERTICAL)
- panel.SetSizer(sizer)
- # matplotlib commands to create a canvas
- self.mpl_control = FigureCanvas(panel, -1, self.value)
- sizer.Add(self.mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW | wx.SHAPED)
- self.value.canvas.SetMinSize((10,8))
- return panel
-
-class MPLFigureEditor(BasicEditorFactory):
- klass = _MPLFigureEditor
-
-class MPLAction(Action):
- event = Instance(MPLEvent)
-
-class _MPLVMPlotEditor(_MPLFigureEditor, ActionController):
-
- def _create_canvas(self, parent):
- panel = _MPLFigureEditor._create_canvas(self, parent)
- self.mpl_control.mpl_connect("button_press_event", self.on_click)
- return panel
-
- def on_click(self, event):
- if not event.inaxes: return
- if event.button == 3:
- my_menu = Menu(MPLAction(name="Recenter", action="object.recenter",
- event=event),
- MPLAction(name="Yo!", action="object.do_something",
- event=event))
- wxmenu = my_menu.create_menu(self.mpl_control, self)
- self.mpl_control.PopupMenuXY(wxmenu)
-
- def perform ( self, action ):
- """
- This is largely taken/modified from the TreeEditor _perform method.
- """
- object = self.object
- method_name = action.action
- info = self.ui.info
- handler = self.ui.handler
- event = action.event
-
- if method_name.find( '.' ) >= 0:
- if method_name.find( '(' ) < 0:
- method_name += '(event)'
- try:
- eval( method_name, globals(),
- { 'object': object,
- 'editor': self,
- 'info': info,
- 'event': event,
- 'handler': handler } )
- except:
- # fixme: Should the exception be logged somewhere?
- print sys.exc_info()
-
- return
-
- method = getattr( handler, method_name, None )
- if method is not None:
- method( info, object )
- return
-
- if action.on_perform is not None:
- action.on_perform( object )
-
-class MPLVMPlotEditor(BasicEditorFactory):
- klass = _MPLVMPlotEditor
-
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/traited_explorer/setup.py
--- a/yt/gui/traited_explorer/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os, sys, os.path
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('traited_explorer',parent_package,top_path)
- config.make_config_py() # installs __config__.py
- #config.make_svn_version_py()
- return config
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/traited_explorer/traited_explorer.py
--- a/yt/gui/traited_explorer/traited_explorer.py
+++ /dev/null
@@ -1,479 +0,0 @@
-"""
-New version of Reason, using a TraitsUI-based approach
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2009 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from yt.mods import *
-from yt.utilities.definitions import \
- x_dict, \
- y_dict
-#pf = EnzoStaticOutput("/Users/matthewturk/Research/data/galaxy1200.dir/galaxy1200")
-
-from enthought.traits.api import \
- HasTraits, List, Instance, Str, Float, Any, Code, PythonValue, Int, CArray, \
- Property, Enum, cached_property, DelegatesTo, Callable, Array, \
- Button
-from enthought.traits.ui.api import \
- Group, VGroup, HGroup, Tabbed, View, Item, ShellEditor, InstanceEditor, ListStrEditor, \
- ListEditor, VSplit, VFlow, HSplit, VFold, ValueEditor, TreeEditor, TreeNode, RangeEditor, \
- EnumEditor, Handler, Controller, DNDEditor
-from enthought.traits.ui.menu import \
- Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.pyface.action.api import \
- ActionController
-from enthought.tvtk.pyface.scene_editor import SceneEditor
-from enthought.tvtk.pyface.api import \
- DecoratedScene
-from enthought.tvtk.pyface.scene_model import SceneModel
-
-from plot_editors import Figure, MPLFigureEditor, MPLVMPlotEditor, Axes
-
-from yt.visualization.plot_types import VMPlot, ProjectionPlot, SlicePlot
-
-import traceback
-from tvtk_interface import \
- HierarchyImporter, YTScene
-
-class PlotCreationHandler(Controller):
- main_window = Instance(HasTraits)
- pnode = Instance(HasTraits)
-
- format = Str
- plot_type = Any
-
- def close(self, info, is_ok):
- if not is_ok:
- super(Controller, self).close(info, True)
- return
- spt = self.plot_type(plot_spec=self.model, pf=self.pnode.pf,
- name=self.format % (self.model.axis))
- self.pnode.data_objects.append(spt)
- self.main_window.plot_frame_tabs.append(spt)
- spt.plot
-
-class VTKSceneCreationHandler(PlotCreationHandler):
- importer = Instance(HierarchyImporter)
-
- def close(self, info, is_ok):
- if is_ok:
- yt_scene = YTScene(importer=self.importer,
- scene=SceneModel())
- spt = VTKDataObject(name = "VTK: %s" % self.pnode.pf,
- scene=yt_scene.scene,
- yt_scene=yt_scene)
- self.pnode.data_objects.append(spt)
- self.main_window.plot_frame_tabs.append(spt)
- super(Controller, self).close(info, True)
- return True
-
-
-class DataObject(HasTraits):
- name = Str
-
-class VTKDataObject(DataObject):
- yt_scene = Instance(YTScene)
- scene = DelegatesTo("yt_scene")
- add_contours = Button
- add_isocontour = Button
- add_x_plane = Button
- add_y_plane = Button
- add_z_plane = Button
- edit_camera = Button
- edit_operators = Button
- edit_pipeline = Button
- center_on_max = Button
- operators = DelegatesTo("yt_scene")
- traits_view = View(
- Item("scene", editor =
- SceneEditor(scene_class=DecoratedScene),
- resizable=True, show_label=False),
- HGroup(Item("add_contours", show_label=False),
- Item("add_isocontour", show_label=False),
- Item("add_x_plane", show_label=False),
- Item("add_y_plane", show_label=False),
- Item("add_z_plane", show_label=False),
- Item("edit_camera", show_label=False),
- Item("edit_operators", show_label=False),
- Item("edit_pipeline", show_label=False),
- Item("center_on_max", show_label=False),
- ),
- )
-
- operators_edit = View(
- Item("operators", style='custom', show_label=False,
- editor=ListEditor(editor=InstanceEditor(),
- use_notebook=True),
- name="Edit Operators"),
- height=500.0, width=500.0, resizable=True)
-
- def _edit_camera_fired(self):
- self.yt_scene.camera_path.edit_traits()
-
- def _edit_operators_fired(self):
- self.edit_traits(view='operators_edit')
-
- def _edit_pipeline_fired(self):
- from enthought.tvtk.pipeline.browser import PipelineBrowser
- pb = PipelineBrowser(self.scene)
- pb.show()
-
- def _add_contours_fired(self):
- self.yt_scene.add_contour()
-
- def _add_isocontour_fired(self):
- self.yt_scene.add_isocontour()
-
- def _add_x_plane_fired(self):
- self.yt_scene.add_x_plane()
-
- def _add_y_plane_fired(self):
- self.yt_scene.add_y_plane()
-
- def _add_z_plane_fired(self):
- self.yt_scene.add_z_plane()
-
- def _center_on_max_fired(self):
- self.yt_scene.do_center_on_max()
-
-class ParameterFile(HasTraits):
- pf = Instance(EnzoStaticOutput)
- data_objects = List(Instance(DataObject))
- name = Str
-
- def _name_default(self):
- return str(self.pf)
-
- def do_slice(self):
- cons_view = View(
- Item('axis'),
- Item('center'),
- Item('field', editor=EnumEditor(name='field_list')),
- buttons=OKCancelButtons, title="Slicer: %s" % self.pf)
- ps = SlicePlotSpec(pf=self.pf)
- hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
- plot_type=SlicePlotTab, format="Slice: %s")
- ps.edit_traits(cons_view, handler=hand)
-
- def do_proj(self):
- cons_view = View(
- Item('axis'),
- Item('field', editor=EnumEditor(name='field_list')),
- Item('weight_field', editor=EnumEditor(name='none_field_list')),
- buttons=OKCancelButtons, title="Projector: %s" % self.pf)
- ps = ProjPlotSpec(pf=self.pf)
- hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
- plot_type=ProjPlotTab, format="Proj: %s")
- ps.edit_traits(cons_view, handler=hand)
-
- def do_vtk(self):
- from tvtk_interface import HierarchyImporter, \
- HierarchyImportHandler
- importer = HierarchyImporter(pf=self.pf, max_level=self.pf.h.max_level)
- importer.edit_traits(handler = VTKSceneCreationHandler(
- main_window=mw, pnode=self, importer = importer))
-
-class ParameterFileCollection(HasTraits):
- parameter_files = List(Instance(ParameterFile))
- name = Str
- collection = Any
-
- def _parameter_files_default(self):
- my_list = []
- for f in self.collection:
- try:
- pf = EnzoStaticOutput(f)
- my_list.append(
- ParameterFile(pf=pf,
- data_objects = []))
- except IOError: pass
- return my_list
-
- def _name_default(self):
- return str(self.collection)
-
-class ParameterFileCollectionList(HasTraits):
- parameter_file_collections = List(Instance(ParameterFileCollection))
-
- def _parameter_file_collections_default(self):
- return [ParameterFileCollection(collection=c)
- for c in fido.GrabCollections()]
-
-class DataObjectList(HasTraits):
- data_objects = List(Str)
-
- traits_view = View(
- Item('data_objects', show_label=False,
- editor=ListStrEditor())
- )
-
- def _data_objects_default(self):
- return ['a','b','c']
-
-class PlotFrameTab(DataObject):
- figure = Instance(Figure)
-
-class VMPlotSpec(HasTraits):
- pf = Instance(EnzoStaticOutput)
- field = Str('Density')
- field_list = Property(depends_on = 'pf')
-
- center = Array(shape=(3,), dtype='float64')
- axis = Enum(0,1,2)
-
- @cached_property
- def _get_field_list(self):
- fl = self.pf.h.field_list
- df = self.pf.h.derived_field_list
- fl.sort(); df.sort()
- return fl + df
-
- def _center_default(self):
- return self.pf.h.find_max("Density")[1]
-
-class SlicePlotSpec(VMPlotSpec):
- pass
-
-class ProjPlotSpec(VMPlotSpec):
- weight_field = Str("None")
- none_field_list = Property(depends_on = 'field_list')
-
- @cached_property
- def _get_none_field_list(self):
- return ["None"] + self.field_list
-
-class VMPlotTab(PlotFrameTab):
- pf = Instance(EnzoStaticOutput)
- figure = Instance(Figure, args=())
- field = DelegatesTo('plot_spec')
- field_list = DelegatesTo('plot_spec')
- plot = Instance(VMPlot)
- axes = Instance(Axes)
- disp_width = Float(1.0)
- unit = Str('unitary')
- min_width = Property(Float, depends_on=['pf','unit'])
- max_width = Property(Float, depends_on=['pf','unit'])
- unit_list = Property(depends_on = 'pf')
- smallest_dx = Property(depends_on = 'pf')
-
- traits_view = View(VGroup(
- HGroup(Item('figure', editor=MPLVMPlotEditor(),
- show_label=False)),
- HGroup(Item('disp_width',
- editor=RangeEditor(format="%0.2e",
- low_name='min_width', high_name='max_width',
- mode='logslider', enter_set=True),
- show_label=False, width=400.0),
- Item('unit',
- editor=EnumEditor(name='unit_list')),),
- HGroup(Item('field',
- editor=EnumEditor(name='field_list')),
- )),
- resizable=True)
-
- def __init__(self, **traits):
- super(VMPlotTab, self).__init__(**traits)
- self.axes = self.figure.add_subplot(111, aspect='equal')
-
- def _field_changed(self, old, new):
- self.plot.switch_z(new)
- self._redraw()
-
- @cached_property
- def _get_min_width(self):
- return 50.0*self.smallest_dx*self.pf[self.unit]
-
- @cached_property
- def _get_max_width(self):
- return self.pf['unitary']*self.pf[self.unit]
-
- @cached_property
- def _get_smallest_dx(self):
- return self.pf.h.get_smallest_dx()
-
- @cached_property
- def _get_unit_list(self):
- return self.pf.units.keys()
-
- def _unit_changed(self, old, new):
- self.disp_width = self.disp_width * self.pf[new]/self.pf[old]
-
- def _disp_width_changed(self, old, new):
- self.plot.set_width(new, self.unit)
- self._redraw()
-
- def _redraw(self):
- self.figure.canvas.draw()
-
- def recenter(self, event):
- xp, yp = event.xdata, event.ydata
- dx = abs(self.plot.xlim[0] - self.plot.xlim[1])/self.plot.pix[0]
- dy = abs(self.plot.ylim[0] - self.plot.ylim[1])/self.plot.pix[1]
- x = (dx * xp) + self.plot.xlim[0]
- y = (dy * yp) + self.plot.ylim[0]
- xi = x_dict[self.axis]
- yi = y_dict[self.axis]
- cc = self.center[:]
- cc[xi] = x; cc[yi] = y
- self.plot.data.center = cc[:]
- self.plot.data.set_field_parameter('center', cc.copy())
- self.center = cc
-
-class SlicePlotTab(VMPlotTab):
- plot_spec = Instance(SlicePlotSpec)
-
- axis = DelegatesTo('plot_spec')
- center = DelegatesTo('plot_spec')
-
- plot = Instance(SlicePlot)
-
- def _plot_default(self):
- coord = self.center[self.axis]
- sl = self.pf.h.slice(self.axis, coord, center=self.center[:])
- sp = SlicePlot(sl, self.field, self.figure, self.axes)
- self.figure.canvas.draw()
- return sp
-
- def _center_changed(self, old, new):
- #traceback.print_stack()
- if na.all(na.abs(old - new) == 0.0): return
- print na.abs(old-new)
- print "Re-slicing", old, new
- pp = self.center
- self.plot.data.reslice(pp[self.axis])
- self.plot._refresh_display_width()
- self.figure.canvas.draw()
-
-class ProjPlotTab(VMPlotTab):
- plot_spec = Instance(ProjPlotSpec)
-
- axis = DelegatesTo('plot_spec')
- center = DelegatesTo('plot_spec')
- weight_field = DelegatesTo('plot_spec')
-
- plot = Instance(ProjectionPlot)
-
- def _plot_default(self):
- self.field = self.field[:]
- self.weight_field = self.weight_field[:]
- wf = self.weight_field
- if str(wf) == "None": wf = None
- proj = self.pf.h.proj(self.axis, self.field, wf,
- center=self.center[:])
- pp = ProjectionPlot(proj, self.field, self.figure, self.axes)
- self.figure.canvas.draw()
- return pp
-
- def _center_changed(self, old, new):
- self.plot._refresh_display_width()
-
-class SphereWrapper(DataObject):
- radius = Float
- unit = Str
-
-class MainWindow(HasTraits):
- parameter_file_collections = Instance(ParameterFileCollectionList)
- parameter_files = Instance(ParameterFileCollection)
- plot_frame_tabs = List(Instance(DataObject))
- open_parameterfile = Button
- shell = PythonValue
-
- def _shell_default(self):
- return globals()
- notebook_editor = ListEditor(editor=InstanceEditor(editable=True),
- use_notebook=True)
-
- traits_view = View(VSplit(
- HSplit(VGroup(
- Item('parameter_file_collections',
- width=120.0, height=500.0,
- show_label=False,
- editor = TreeEditor(editable=False,
- nodes=[
- TreeNode(node_for=[ParameterFileCollectionList],
- children='parameter_file_collections',
- label="=Data Collections"),
- TreeNode(node_for=[ParameterFileCollection],
- children='parameter_files',
- label="name",
- view=View()),
- TreeNode(node_for=[ParameterFile],
- children='data_objects',
- label="name",
- menu = Menu(Action(name='Slice',
- action='object.do_slice'),
- Action(name='Project',
- action='object.do_proj'),
- Action(name='VTK',
- action='object.do_vtk')),
- view=View()),
- TreeNode(node_for=[DataObject],
- children='',
- label="name"),
- ], show_icons=False),),
- Item('open_parameterfile', show_label=False)),
- Item('plot_frame_tabs', style='custom',
- editor = notebook_editor,
- show_label=False, height=500.0, width=500.0),
- ),
- HGroup(
- #Item('shell', editor=ShellEditor(share=True),
- #show_label=False, height=120.0),
- ),
- ),
- resizable=True, width=800.0, height=660.0,
- title="reason v2 [prototype]")
-
- def _open_parameterfile_fired(self):
- print "OPENING"
-
- def _parameter_file_collections_default(self):
- return ParameterFileCollectionList()
-
-class YTScript(HasTraits):
- code = Code
- traits_view = View(Item('code', show_label=False),
- height=0.8, width=0.8, resizable=True,
- buttons=OKCancelButtons)
-
-class ObjectViewer(HasTraits):
- to_view=Any
- traits_view = View(
- Item('to_view', editor=ValueEditor(), show_label=False),
- resizable=True, height=0.8, width=0.8)
-
-def view_object(obj):
- ObjectViewer(to_view=obj).edit_traits()
-
-def run_script():
- my_script = YTScript()
- my_script.edit_traits()
- return my_script
-
-class event_mock(object):
- inaxes = True
- button = 3
-
-dol = DataObjectList()
-mw = MainWindow(plot_frame_tabs = [])
-mw.edit_traits()
-#mw.edit_traits()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/gui/traited_explorer/tvtk_interface.py
--- a/yt/gui/traited_explorer/tvtk_interface.py
+++ /dev/null
@@ -1,692 +0,0 @@
-"""
-This is the preliminary interface to VTK. Note that as of VTK 5.2, it still
-requires a patchset prepared here:
-http://yt-project.org/files/vtk_composite_data.zip
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2007-2011 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from enthought.tvtk.tools import ivtk
-from enthought.tvtk.api import tvtk
-from enthought.traits.api import \
- Float, HasTraits, Instance, Range, Any, Delegate, Tuple, File, Int, Str, \
- CArray, List, Button, Bool, Property, cached_property
-from enthought.traits.ui.api import View, Item, HGroup, VGroup, TableEditor, \
- Handler, Controller, RangeEditor, EnumEditor, InstanceEditor
-from enthought.traits.ui.menu import \
- Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.traits.ui.table_column import ObjectColumn
-from enthought.tvtk.pyface.api import DecoratedScene
-
-import enthought.pyface.api as pyface
-
-#from yt.reason import *
-import sys
-import numpy as na
-import time, pickle, os, os.path
-from yt.funcs import *
-from yt.analysis_modules.hierarchy_subset.api import \
- ExtractedHierarchy, ExtractedParameterFile
-
-#from enthought.tvtk.pyface.ui.wx.wxVTKRenderWindowInteractor \
- #import wxVTKRenderWindowInteractor
-
-from enthought.mayavi.core.lut_manager import LUTManager
-
-#wxVTKRenderWindowInteractor.USE_STEREO = 1
-
-class TVTKMapperWidget(HasTraits):
- alpha = Float(1.0)
- post_call = Any
- lut_manager = Instance(LUTManager)
-
- def _alpha_changed(self, old, new):
- self.lut_manager.lut.alpha_range = (new, new)
- self.post_call()
-
-class MappingPlane(TVTKMapperWidget):
- plane = Instance(tvtk.Plane)
- _coord_redit = editor=RangeEditor(format="%0.2e",
- low_name='vmin', high_name='vmax',
- auto_set=False, enter_set=True)
- auto_set = Bool(False)
- traits_view = View(Item('coord', editor=_coord_redit),
- Item('auto_set'),
- Item('alpha', editor=RangeEditor(
- low=0.0, high=1.0,
- enter_set=True, auto_set=False)),
- Item('lut_manager', show_label=False,
- editor=InstanceEditor(), style='custom'))
- vmin = Float
- vmax = Float
-
- def _auto_set_changed(self, old, new):
- if new is True:
- self._coord_redit.auto_set = True
- self._coord_redit.enter_set = False
- else:
- self._coord_redit.auto_set = False
- self._coord_redit.enter_set = True
-
- def __init__(self, vmin, vmax, vdefault, **traits):
- HasTraits.__init__(self, **traits)
- self.vmin = vmin
- self.vmax = vmax
- trait = Range(float(vmin), float(vmax), value=vdefault)
- self.add_trait("coord", trait)
- self.coord = vdefault
-
- def _coord_changed(self, old, new):
- orig = self.plane.origin[:]
- orig[self.axis] = new
- self.plane.origin = orig
- self.post_call()
-
-class MappingMarchingCubes(TVTKMapperWidget):
- operator = Instance(tvtk.MarchingCubes)
- mapper = Instance(tvtk.HierarchicalPolyDataMapper)
- vmin = Float
- vmax = Float
- auto_set = Bool(False)
- _val_redit = RangeEditor(format="%0.2f",
- low_name='vmin', high_name='vmax',
- auto_set=False, enter_set=True)
- traits_view = View(Item('value', editor=_val_redit),
- Item('auto_set'),
- Item('alpha', editor=RangeEditor(
- low=0.0, high=1.0,
- enter_set=True, auto_set=False,)),
- Item('lut_manager', show_label=False,
- editor=InstanceEditor(), style='custom'))
-
- def __init__(self, vmin, vmax, vdefault, **traits):
- HasTraits.__init__(self, **traits)
- self.vmin = vmin
- self.vmax = vmax
- trait = Range(float(vmin), float(vmax), value=vdefault)
- self.add_trait("value", trait)
- self.value = vdefault
-
- def _auto_set_changed(self, old, new):
- if new is True:
- self._val_redit.auto_set = True
- self._val_redit.enter_set = False
- else:
- self._val_redit.auto_set = False
- self._val_redit.enter_set = True
-
- def _value_changed(self, old, new):
- self.operator.set_value(0, new)
- self.post_call()
-
-class MappingIsoContour(MappingMarchingCubes):
- operator = Instance(tvtk.ContourFilter)
-
-class CameraPosition(HasTraits):
- position = CArray(shape=(3,), dtype='float64')
- focal_point = CArray(shape=(3,), dtype='float64')
- view_up = CArray(shape=(3,), dtype='float64')
- clipping_range = CArray(shape=(2,), dtype='float64')
- distance = Float
- num_steps = Int(10)
- orientation_wxyz = CArray(shape=(4,), dtype='float64')
-
-class CameraControl(HasTraits):
- # Traits
- positions = List(CameraPosition)
- yt_scene = Instance('YTScene')
- center = Delegate('yt_scene')
- scene = Delegate('yt_scene')
- camera = Instance(tvtk.OpenGLCamera)
- reset_position = Instance(CameraPosition)
- fps = Float(25.0)
- export_filename = 'frames'
- periodic = Bool
-
- # UI elements
- snapshot = Button()
- play = Button()
- export_frames = Button()
- reset_path = Button()
- recenter = Button()
- save_path = Button()
- load_path = Button()
- export_path = Button()
-
- table_def = TableEditor(
- columns = [ ObjectColumn(name='position'),
- ObjectColumn(name='focal_point'),
- ObjectColumn(name='view_up'),
- ObjectColumn(name='clipping_range'),
- ObjectColumn(name='num_steps') ],
- reorderable=True, deletable=True,
- sortable=True, sort_model=True,
- show_toolbar=True,
- selection_mode='row',
- selected = 'reset_position'
- )
-
- default_view = View(
- VGroup(
- HGroup(
- Item('camera', show_label=False),
- Item('recenter', show_label=False),
- label='Camera'),
- HGroup(
- Item('snapshot', show_label=False),
- Item('play', show_label=False),
- Item('export_frames',show_label=False),
- Item('reset_path', show_label=False),
- Item('save_path', show_label=False),
- Item('load_path', show_label=False),
- Item('export_path', show_label=False),
- Item('export_filename'),
- Item('periodic'),
- Item('fps'),
- label='Playback'),
- VGroup(
- Item('positions', show_label=False,
- editor=table_def),
- label='Camera Path'),
- ),
- resizable=True, title="Camera Path Editor",
- )
-
- def _reset_position_changed(self, old, new):
- if new is None: return
- cam = self.scene.camera
- cam.position = new.position
- cam.focal_point = new.focal_point
- cam.view_up = new.view_up
- cam.clipping_range = new.clipping_range
- self.scene.render()
-
- def __init__(self, **traits):
- HasTraits.__init__(self, **traits)
-
- def take_snapshot(self):
- cam = self.scene.camera
- self.positions.append(CameraPosition(
- position=cam.position,
- focal_point=cam.focal_point,
- view_up=cam.view_up,
- clipping_range=cam.clipping_range,
- distance=cam.distance,
- orientation_wxyz=cam.orientation_wxyz))
-
- def _export_path_fired(self):
- dlg = pyface.FileDialog(
- action='save as',
- wildcard="*.cpath",
- )
- if dlg.open() == pyface.OK:
- print "Saving:", dlg.path
- self.export_camera_path(dlg.path)
-
- def export_camera_path(self, fn):
- to_dump = dict(positions=[], focal_points=[],
- view_ups=[], clipping_ranges=[],
- distances=[], orientation_wxyzs=[])
- def _write(cam):
- to_dump['positions'].append(cam.position)
- to_dump['focal_points'].append(cam.focal_point)
- to_dump['view_ups'].append(cam.view_up)
- to_dump['clipping_ranges'].append(cam.clipping_range)
- to_dump['distances'].append(cam.distance)
- to_dump['orientation_wxyzs'].append(cam.orientation_wxyz)
- self.step_through(0.0, callback=_write)
- pickle.dump(to_dump, open(fn, "wb"))
-
- def _save_path_fired(self):
- dlg = pyface.FileDialog(
- action='save as',
- wildcard="*.cpath",
- )
- if dlg.open() == pyface.OK:
- print "Saving:", dlg.path
- self.dump_camera_path(dlg.path)
-
- def dump_camera_path(self, fn):
- to_dump = dict(positions=[], focal_points=[],
- view_ups=[], clipping_ranges=[],
- distances=[], orientation_wxyzs=[],
- num_stepss=[])
- for p in self.positions:
- to_dump['positions'].append(p.position)
- to_dump['focal_points'].append(p.focal_point)
- to_dump['view_ups'].append(p.view_up)
- to_dump['clipping_ranges'].append(p.clipping_range)
- to_dump['distances'].append(p.distance)
- to_dump['num_stepss'].append(p.num_steps) # stupid s
- to_dump['orientation_wxyzs'].append(p.orientation_wxyz)
- pickle.dump(to_dump, open(fn, "wb"))
-
- def _load_path_fired(self):
- dlg = pyface.FileDialog(
- action='open',
- wildcard="*.cpath",
- )
- if dlg.open() == pyface.OK:
- print "Loading:", dlg.path
- self.load_camera_path(dlg.path)
-
- def load_camera_path(self, fn):
- to_use = pickle.load(open(fn, "rb"))
- self.positions = []
- for i in range(len(to_use['positions'])):
- dd = {}
- for kw in to_use:
- # Strip the s
- dd[kw[:-1]] = to_use[kw][i]
- self.positions.append(
- CameraPosition(**dd))
-
- def _recenter_fired(self):
- self.camera.focal_point = self.center
- self.scene.render()
-
- def _snapshot_fired(self):
- self.take_snapshot()
-
- def _play_fired(self):
- self.step_through()
-
- def _export_frames_fired(self):
- self.step_through(save_frames=True)
-
- def _reset_path_fired(self):
- self.positions = []
-
- def step_through(self, pause = 1.0, callback=None, save_frames=False):
- cam = self.scene.camera
- frame_counter=0
- if self.periodic:
- cyclic_pos = self.positions + [self.positions[0]]
- else:
- cyclic_pos = self.positions
- for i in range(len(cyclic_pos)-1):
- pos1 = cyclic_pos[i]
- pos2 = cyclic_pos[i+1]
- r = pos1.num_steps
- for p in range(pos1.num_steps):
- po = _interpolate(pos1.position, pos2.position, p, r)
- fp = _interpolate(pos1.focal_point, pos2.focal_point, p, r)
- vu = _interpolate(pos1.view_up, pos2.view_up, p, r)
- cr = _interpolate(pos1.clipping_range, pos2.clipping_range, p, r)
- _set_cpos(cam, po, fp, vu, cr)
- self.scene.render()
- if callback is not None: callback(cam)
- if save_frames:
- self.scene.save("%s_%0.5d.png" % (self.export_filename,frame_counter))
- else:
- time.sleep(pause * 1.0/self.fps)
- frame_counter += 1
-
-def _interpolate(q1, q2, p, r):
- return q1 + p*(q2 - q1)/float(r)
-
-def _set_cpos(cam, po, fp, vu, cr):
- cam.position = po
- cam.focal_point = fp
- cam.view_up = vu
- cam.clipping_range = cr
-
-class HierarchyImporter(HasTraits):
- pf = Any
- min_grid_level = Int(0)
- max_level = Int(1)
- number_of_levels = Range(0, 13)
- max_import_levels = Property(depends_on='min_grid_level')
- field = Str("Density")
- field_list = List
- center_on_max = Bool(True)
- center = CArray(shape = (3,), dtype = 'float64')
- cache = Bool(True)
- smoothed = Bool(True)
- show_grids = Bool(True)
-
- def _field_list_default(self):
- fl = self.pf.h.field_list
- df = self.pf.h.derived_field_list
- fl.sort(); df.sort()
- return fl + df
-
- default_view = View(Item('min_grid_level',
- editor=RangeEditor(low=0,
- high_name='max_level')),
- Item('number_of_levels',
- editor=RangeEditor(low=1,
- high_name='max_import_levels')),
- Item('field', editor=EnumEditor(name='field_list')),
- Item('center_on_max'),
- Item('center', enabled_when='not object.center_on_max'),
- Item('smoothed'),
- Item('cache', label='Pre-load data'),
- Item('show_grids'),
- buttons=OKCancelButtons)
-
- def _center_default(self):
- return [0.5,0.5,0.5]
-
- @cached_property
- def _get_max_import_levels(self):
- return min(13, self.pf.h.max_level - self.min_grid_level + 1)
-
-class HierarchyImportHandler(Controller):
- importer = Instance(HierarchyImporter)
-
-
- def close(self, info, is_ok):
- if is_ok:
- yt_scene = YTScene(
- importer=self.importer)
- super(Controller, self).close(info, True)
- return
-
-
-class YTScene(HasTraits):
-
- # Traits
- importer = Instance(HierarchyImporter)
- pf = Delegate("importer")
- min_grid_level = Delegate("importer")
- number_of_levels = Delegate("importer")
- field = Delegate("importer")
- center = CArray(shape = (3,), dtype = 'float64')
- center_on_max = Delegate("importer")
- smoothed = Delegate("importer")
- cache = Delegate("importer")
- show_grids = Delegate("importer")
-
- camera_path = Instance(CameraControl)
- #window = Instance(ivtk.IVTKWithCrustAndBrowser)
- #python_shell = Delegate('window')
- #scene = Delegate('window')
- scene = Instance(HasTraits)
- operators = List(HasTraits)
-
- # State variables
- _grid_boundaries_actor = None
-
- # Views
- def _window_default(self):
- # Should experiment with passing in a pipeline browser
- # that has two root objects -- one for TVTKBases, i.e. the render
- # window, and one that accepts our objects
- return ivtk.IVTKWithCrustAndBrowser(size=(800,600), stereo=1)
-
- def _camera_path_default(self):
- return CameraControl(yt_scene=self, camera=self.scene.camera)
-
- def __init__(self, **traits):
- HasTraits.__init__(self, **traits)
- max_level = min(self.pf.h.max_level,
- self.min_grid_level + self.number_of_levels - 1)
- self.extracted_pf = ExtractedParameterFile(self.pf,
- self.min_grid_level, max_level, offset=None)
- self.extracted_hierarchy = self.extracted_pf.h
- self._hdata_set = tvtk.HierarchicalBoxDataSet()
- self._ugs = []
- self._grids = []
- self._min_val = 1e60
- self._max_val = -1e60
- gid = 0
- if self.cache:
- for grid_set in self.extracted_hierarchy.get_levels():
- for grid in grid_set:
- grid[self.field]
- for l, grid_set in enumerate(self.extracted_hierarchy.get_levels()):
- gid = self._add_level(grid_set, l, gid)
- if self.show_grids:
- self.toggle_grid_boundaries()
-
- def _center_default(self):
- return self.extracted_hierarchy._convert_coords(
- [0.5, 0.5, 0.5])
-
- def do_center_on_max(self):
- self.center = self.extracted_hierarchy._convert_coords(
- self.pf.h.find_max("Density")[1])
- self.scene.camera.focal_point = self.center
-
- def _add_level(self, grid_set, level, gid):
- for grid in grid_set:
- self._hdata_set.set_refinement_ratio(level, 2)
- gid = self._add_grid(grid, gid, level)
- return gid
-
- def _add_grid(self, grid, gid, level=0):
- mylog.debug("Adding grid %s on level %s (%s)",
- grid.id, level, grid.Level)
- if grid in self._grids: return
- self._grids.append(grid)
-
- scalars = grid.get_vertex_centered_data(self.field, smoothed=self.smoothed)
-
- left_index = grid.get_global_startindex()
- origin = grid.LeftEdge
- dds = grid.dds
- right_index = left_index + scalars.shape - 1
- ug = tvtk.UniformGrid(origin=origin, spacing=dds,
- dimensions=grid.ActiveDimensions+1)
- if self.field not in self.pf.field_info or \
- self.pf.field_info[self.field].take_log:
- scalars = na.log10(scalars)
- ug.point_data.scalars = scalars.transpose().ravel()
- ug.point_data.scalars.name = self.field
- if grid.Level != self.min_grid_level + self.number_of_levels - 1:
- ug.cell_visibility_array = grid.child_mask.transpose().ravel()
- else:
- ug.cell_visibility_array = na.ones(
- grid.ActiveDimensions, dtype='int').ravel()
- self._ugs.append((grid,ug))
- self._hdata_set.set_data_set(level, gid, left_index, right_index, ug)
-
- self._min_val = min(self._min_val, scalars.min())
- self._max_val = max(self._max_val, scalars.max())
-
- gid += 1
- return gid
-
- def _add_data_to_ug(self, field):
- for g, ug in self._ugs:
- scalars_temp = grid.get_vertex_centered_data(field, smoothed=self.smoothed)
- ii = ug.point_data.add_array(scalars_temp.transpose().ravel())
- ug.point_data.get_array(ii).name = field
-
- def zoom(self, dist, unit='1'):
- vec = self.scene.camera.focal_point - \
- self.scene.camera.position
- self.scene.camera.position += \
- vec * dist/self._grids[0].pf[unit]
- self.scene.render()
-
- def toggle_grid_boundaries(self):
- if self._grid_boundaries_actor is None:
- # We don't need to track this stuff right now.
- ocf = tvtk.OutlineCornerFilter(
- executive=tvtk.CompositeDataPipeline(),
- corner_factor = 0.5)
- ocf.input = self._hdata_set
- ocm = tvtk.HierarchicalPolyDataMapper(
- input_connection = ocf.output_port)
- self._grid_boundaries_actor = tvtk.Actor(mapper = ocm)
- self.scene.add_actor(self._grid_boundaries_actor)
- else:
- self._grid_boundaries_actor.visibility = \
- (not self._grid_boundaries_actor.visibility)
-
- def _add_sphere(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
- sphere = tvtk.Sphere(center=origin, radius=0.25)
- cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
- cut_function = sphere)
- cutter.input = self._hdata_set
- lut_manager = LUTManager(data_name=self.field, scene=self.scene)
- smap = tvtk.HierarchicalPolyDataMapper(
- scalar_range=(self._min_val, self._max_val),
- lookup_table=lut_manager.lut,
- input_connection = cutter.output_port)
- sactor = tvtk.Actor(mapper=smap)
- self.scene.add_actors(sactor)
- return sphere, lut_manager
-
- def _add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
- plane = tvtk.Plane(origin=origin, normal=normal)
- cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
- cut_function = plane)
- cutter.input = self._hdata_set
- lut_manager = LUTManager(data_name=self.field, scene=self.scene)
- smap = tvtk.HierarchicalPolyDataMapper(
- scalar_range=(self._min_val, self._max_val),
- lookup_table=lut_manager.lut,
- input_connection = cutter.output_port)
- sactor = tvtk.Actor(mapper=smap)
- self.scene.add_actors(sactor)
- return plane, lut_manager
-
- def add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
- self.operators.append(self._add_plane(origin, normal))
- return self.operators[-1]
-
- def _add_axis_plane(self, axis):
- normal = [0,0,0]
- normal[axis] = 1
- np, lut_manager = self._add_plane(self.center, normal=normal)
- LE = self.extracted_hierarchy.min_left_edge
- RE = self.extracted_hierarchy.max_right_edge
- self.operators.append(MappingPlane(
- vmin=LE[axis], vmax=RE[axis],
- vdefault = self.center[axis],
- post_call = self.scene.render,
- plane = np, axis=axis, coord=0.0,
- lut_manager = lut_manager,
- scene=self.scene))
-
- def add_x_plane(self):
- self._add_axis_plane(0)
- return self.operators[-1]
-
- def add_y_plane(self):
- self._add_axis_plane(1)
- return self.operators[-1]
-
- def add_z_plane(self):
- self._add_axis_plane(2)
- return self.operators[-1]
-
- def add_contour(self, val=None):
- if val is None:
- if self._min_val != self._min_val:
- self._min_val = 1.0
- val = (self._max_val+self._min_val) * 0.5
- cubes = tvtk.MarchingCubes(
- executive = tvtk.CompositeDataPipeline())
- cubes.input = self._hdata_set
- cubes.set_value(0, val)
- lut_manager = LUTManager(data_name=self.field, scene=self.scene)
- cube_mapper = tvtk.HierarchicalPolyDataMapper(
- input_connection = cubes.output_port,
- lookup_table=lut_manager.lut)
- cube_mapper.color_mode = 'map_scalars'
- cube_mapper.scalar_range = (self._min_val, self._max_val)
- cube_actor = tvtk.Actor(mapper=cube_mapper)
- self.scene.add_actors(cube_actor)
- self.operators.append(MappingMarchingCubes(operator=cubes,
- vmin=self._min_val, vmax=self._max_val,
- vdefault=val,
- mapper = cube_mapper,
- post_call = self.scene.render,
- lut_manager = lut_manager,
- scene=self.scene))
- return self.operators[-1]
-
- def add_isocontour(self, val=None):
- if val is None: val = (self._max_val+self._min_val) * 0.5
- isocontour = tvtk.ContourFilter(
- executive = tvtk.CompositeDataPipeline())
- isocontour.input = self._hdata_set
- isocontour.generate_values(1, (val, val))
- lut_manager = LUTManager(data_name=self.field, scene=self.scene)
- isocontour_normals = tvtk.PolyDataNormals(
- executive=tvtk.CompositeDataPipeline())
- isocontour_normals.input_connection = isocontour.output_port
- iso_mapper = tvtk.HierarchicalPolyDataMapper(
- input_connection = isocontour_normals.output_port,
- lookup_table=lut_manager.lut)
- iso_mapper.scalar_range = (self._min_val, self._max_val)
- iso_actor = tvtk.Actor(mapper=iso_mapper)
- self.scene.add_actors(iso_actor)
- self.operators.append(MappingIsoContour(operator=isocontour,
- vmin=self._min_val, vmax=self._max_val,
- vdefault=val,
- mapper = iso_mapper,
- post_call = self.scene.render,
- lut_manager = lut_manager,
- scene=self.scene))
- return self.operators[-1]
-
- def display_points(self):
- dd = self.pf.h.all_data()
- points = tvtk.Points()
- good = (dd["creation_time"] > 0.0)
- points.data = na.array([ dd["particle_position_%s" % ax][good] for ax in 'xyz' ]).transpose()
- mass = na.log10(dd["ParticleAge"][good])
- self.conn = tvtk.CellArray()
- for i in xrange(mass.shape[0]):
- self.conn.insert_next_cell(1)
- self.conn.insert_cell_point(i)
- self.points = points
- self.pd = tvtk.PolyData(points = self.points, verts = self.conn)
- self.pd.point_data.scalars = mass
- lut = tvtk.LookupTable()
- self.pdm = tvtk.PolyDataMapper(input = self.pd,
- lookup_table = lut)
- self.pdm.scalar_range = (mass.min(), mass.max())
- self.pdm.scalar_mode = 'use_point_data'
- self.point_actor = tvtk.Actor(mapper = self.pdm)
- self.scene.add_actor(self.point_actor)
-
-def get_all_parents(grid):
- parents = []
- if len(grid.Parents) == 0: return grid
- for parent in grid.Parents: parents.append(get_all_parents(parent))
- return list(set(parents))
-
-def run_vtk():
- gui = pyface.GUI()
- importer = HierarchyImporter()
- importer.edit_traits(handler = HierarchyImportHandler(
- importer = importer))
- #ehds.edit_traits()
- gui.start_event_loop()
-
-
-if __name__=="__main__":
- print "This code probably won't work. But if you want to give it a try,"
- print "you need:"
- print
- print "VTK (CVS)"
- print "Mayavi2 (from Enthought)"
- print
- print "If you have 'em, give it a try!"
- print
- run_vtk()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -106,7 +106,7 @@
from yt.visualization.volume_rendering.api import \
ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
- HomogenizedVolume, Camera
+ HomogenizedVolume, Camera, off_axis_projection
for name, cls in callback_registry.items():
exec("%s = cls" % name)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/_amr_utils/FixedInterpolator.c
--- a/yt/utilities/_amr_utils/FixedInterpolator.c
+++ b/yt/utilities/_amr_utils/FixedInterpolator.c
@@ -127,8 +127,8 @@
return vz[0];
}
-npy_float64 eval_gradient(int *ds, int *ci, npy_float64 *dp,
- npy_float64 *data, npy_float64 *grad)
+void eval_gradient(int ds[3], npy_float64 dp[3],
+ npy_float64 *data, npy_float64 grad[3])
{
// We just take some small value
@@ -145,9 +145,9 @@
//fprintf(stderr, "DIM: %d %0.3lf %0.3lf\n", i, plus, minus);
denom = plus - minus;
dp[i] = plus;
- grad[i] += trilinear_interpolate(ds, ci, dp, data) / denom;
+ grad[i] += offset_interpolate(ds, dp, data) / denom;
dp[i] = minus;
- grad[i] -= trilinear_interpolate(ds, ci, dp, data) / denom;
+ grad[i] -= offset_interpolate(ds, dp, data) / denom;
dp[i] = backup;
normval += grad[i]*grad[i];
}
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/_amr_utils/FixedInterpolator.h
--- a/yt/utilities/_amr_utils/FixedInterpolator.h
+++ b/yt/utilities/_amr_utils/FixedInterpolator.h
@@ -41,8 +41,7 @@
npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
npy_float64 *data);
-npy_float64 eval_gradient(int ds[3], int ci[3], npy_float64 dp[3],
- npy_float64 *data, npy_float64 *grad);
+void eval_gradient(int ds[3], npy_float64 dp[3], npy_float64 *data, npy_float64 grad[3]);
void vertex_interp(npy_float64 v1, npy_float64 v2, npy_float64 isovalue,
npy_float64 vl[3], npy_float64 dds[3],
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -69,6 +69,12 @@
cdef struct Triangle:
Triangle *next
np.float64_t p[3][3]
+ np.float64_t val
+
+cdef struct TriangleCollection:
+ int count
+ Triangle *first
+ Triangle *current
cdef Triangle *AddTriangle(Triangle *self,
np.float64_t p0[3], np.float64_t p1[3], np.float64_t p2[3]):
@@ -93,6 +99,25 @@
this = this.next
return count
+cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
+ Triangle *first):
+ cdef Triangle *this = first
+ cdef Triangle *last
+ cdef int i = 0
+ while this != NULL:
+ values[i] = this.val
+ i += 1
+ last = this
+ this = this.next
+
+cdef void WipeTriangles(Triangle *first):
+ cdef Triangle *this = first
+ cdef Triangle *last
+ while this != NULL:
+ last = this
+ this = this.next
+ free(last)
+
cdef void FillAndWipeTriangles(np.ndarray[np.float64_t, ndim=2] vertices,
Triangle *first):
cdef int count = 0
@@ -114,8 +139,8 @@
np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3], np.float64_t *data)
np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
np.float64_t *data)
- np.float64_t eval_gradient(int *ds, int *ci, np.float64_t *dp,
- np.float64_t *data, np.float64_t *grad)
+ void eval_gradient(int ds[3], np.float64_t dp[3], np.float64_t *data,
+ np.float64_t grad[3])
void offset_fill(int *ds, np.float64_t *data, np.float64_t *gridval)
void vertex_interp(np.float64_t v1, np.float64_t v2, np.float64_t isovalue,
np.float64_t vl[3], np.float64_t dds[3],
@@ -267,9 +292,12 @@
cdef np.float64_t FIT_get_value(FieldInterpolationTable *fit,
np.float64_t *dvs):
- cdef np.float64_t bv, dy, dd, tf
+ cdef np.float64_t bv, dy, dd, tf, rv
cdef int bin_id
- if fit.pass_through == 1: return dvs[fit.field_id]
+ if fit.pass_through == 1:
+ rv = dvs[fit.field_id]
+ if fit.weight_field_id != -1: rv *= dvs[fit.weight_field_id]
+ return rv
if dvs[fit.field_id] > fit.bounds[1] or dvs[fit.field_id] < fit.bounds[0]: return 0.0
bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0
@@ -289,7 +317,6 @@
# correspond to multiple tables, and each field table will only have
# interpolate called once.
cdef FieldInterpolationTable field_tables[6]
- cdef np.float64_t istorage[6]
# Here are the field tables that correspond to each of the six channels.
# We have three emission channels, three absorption channels.
@@ -314,7 +341,6 @@
self.tf_obj = tf_obj
self.n_field_tables = tf_obj.n_field_tables
- for i in range(6): self.istorage[i] = 0.0
self.my_field_tables = []
for i in range(self.n_field_tables):
@@ -345,7 +371,7 @@
cdef void eval_transfer(self, np.float64_t dt, np.float64_t *dvs,
np.float64_t *rgba, np.float64_t *grad):
cdef int i, fid, use
- cdef np.float64_t ta, tf, trgba[6], dot_prod
+ cdef np.float64_t ta, tf, istorage[6], trgba[6], dot_prod
# NOTE: We now disable this. I have left it to ease the process of
# potentially, one day, re-including it.
#use = 0
@@ -355,14 +381,15 @@
# (dvs[fid] <= self.field_tables[i].bounds[1]):
# use = 1
# break
+ for i in range(6): istorage[i] = 0.0
for i in range(self.n_field_tables):
- self.istorage[i] = FIT_get_value(&self.field_tables[i], dvs)
+ istorage[i] = FIT_get_value(&self.field_tables[i], dvs)
# We have to do this after the interpolation
for i in range(self.n_field_tables):
fid = self.field_tables[i].weight_table_id
- if fid != -1: self.istorage[i] *= self.istorage[fid]
+ if fid != -1: istorage[i] *= istorage[fid]
for i in range(6):
- trgba[i] = self.istorage[self.field_table_ids[i]]
+ trgba[i] = istorage[self.field_table_ids[i]]
#print i, trgba[i],
#print
# A few words on opacity. We're going to be integrating equation 1.23
@@ -583,12 +610,13 @@
np.float64_t v_dir[3],
np.float64_t rgba[4],
TransferFunctionProxy tf,
- np.float64_t *return_t = NULL):
+ np.float64_t *return_t = NULL,
+ np.float64_t enter_t = -1.0):
cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
cdef np.float64_t intersect_t = 1.0
cdef np.float64_t iv_dir[3]
cdef np.float64_t intersect[3], tmax[3], tdelta[3]
- cdef np.float64_t enter_t, dist, alpha, dt, exit_t
+ cdef np.float64_t dist, alpha, dt, exit_t
cdef np.float64_t tr, tl, temp_x, temp_y, dv
for i in range(3):
if (v_dir[i] < 0):
@@ -624,6 +652,7 @@
self.left_edge[1] <= v_pos[1] and v_pos[1] <= self.right_edge[1] and \
self.left_edge[2] <= v_pos[2] and v_pos[2] <= self.right_edge[2]:
intersect_t = 0.0
+ if enter_t >= 0.0: intersect_t = enter_t
if not ((0.0 <= intersect_t) and (intersect_t < 1.0)): return 0
for i in range(3):
intersect[i] = v_pos[i] + intersect_t * v_dir[i]
@@ -710,15 +739,33 @@
dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+ ci[1] * (self.dims[2] + 1) + ci[2]
+ # The initial and final values can be linearly interpolated between; so
+ # we just have to calculate our initial and final values.
+ cdef np.float64_t slopes[6]
for i in range(3):
- cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
- # this gets us dp as the current first sample position
- pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
- dp[i] = pos[i] - cell_left[i]
+ dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+ dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
dp[i] *= self.idds[i]
ds[i] = v_dir[i] * self.idds[i] * dt
- local_dds[i] = v_dir[i] * dt
+ for i in range(self.n_fields):
+ slopes[i] = offset_interpolate(self.dims, dp,
+ self.data[i] + offset)
+ for i in range(3):
+ dp[i] += ds[i] * tf.ns
+ cdef np.float64_t temp
+ for i in range(self.n_fields):
+ temp = slopes[i]
+ slopes[i] -= offset_interpolate(self.dims, dp,
+ self.data[i] + offset)
+ slopes[i] *= -1.0/tf.ns
+ self.dvs[i] = temp
if self.star_list != NULL:
+ for i in range(3):
+ cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+ # this gets us dp as the current first sample position
+ pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+ dp[i] -= tf.ns * ds[i]
+ local_dds[i] = v_dir[i] * dt
ballq = kdtree_utils.kd_nearest_range3(
self.star_list, cell_left[0] + self.dds[0]*0.5,
cell_left[1] + self.dds[1]*0.5,
@@ -726,15 +773,16 @@
self.star_er + 0.9*self.dds[0])
# ~0.866 + a bit
for dti in range(tf.ns):
- for i in range(self.n_fields):
- self.dvs[i] = offset_interpolate(self.dims, dp, self.data[i] + offset)
#if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
# continue
- if self.star_list != NULL: self.add_stars(ballq, dt, pos, rgba)
+ if self.star_list != NULL:
+ self.add_stars(ballq, dt, pos, rgba)
+ for i in range(3):
+ dp[i] += ds[i]
+ pos[i] += local_dds[i]
tf.eval_transfer(dt, self.dvs, rgba, grad)
- for i in range(3):
- dp[i] += ds[i]
- pos[i] += local_dds[i]
+ for i in range(self.n_fields):
+ self.dvs[i] += slopes[i]
if ballq != NULL: kdtree_utils.kd_res_free(ballq)
@cython.boundscheck(False)
@@ -841,315 +889,25 @@
for i in range(3):
vel[i] /= vel_mag[0]
- #@cython.boundscheck(False)
- #@cython.wraparound(False)
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
@cython.cdivision(True)
def get_isocontour_triangles(self, np.float64_t isovalue, int field_id = 0):
# Much of this was inspired by code from Paul Bourke's website:
# http://paulbourke.net/geometry/polygonise/
- cdef int *edge_table=[
- 0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
- 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
- 0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
- 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
- 0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
- 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
- 0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
- 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
- 0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
- 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
- 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
- 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
- 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
- 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
- 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
- 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
- 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
- 0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
- 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
- 0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
- 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
- 0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
- 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
- 0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
- 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
- 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
- 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
- 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
- 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
- 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
- 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
- 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 ]
-
- cdef int **tri_table = \
- [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
- [3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
- [3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
- [3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
- [9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
- [9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
- [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
- [8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
- [9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
- [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
- [3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
- [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
- [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
- [4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
- [9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
- [5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
- [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
- [9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
- [0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
- [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
- [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
- [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
- [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
- [5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
- [9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
- [0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
- [1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
- [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
- [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
- [2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
- [7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
- [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
- [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
- [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
- [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
- [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
- [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
- [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
- [1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
- [9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
- [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
- [2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
- [0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
- [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
- [6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
- [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
- [6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
- [5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
- [1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
- [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
- [6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
- [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
- [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
- [3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
- [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
- [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
- [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
- [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
- [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
- [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
- [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
- [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
- [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
- [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
- [1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
- [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
- [0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
- [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
- [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
- [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
- [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
- [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
- [3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
- [6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
- [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
- [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
- [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
- [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
- [7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
- [7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
- [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
- [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
- [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
- [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
- [0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
- [7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
- [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
- [2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
- [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
- [7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
- [2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
- [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
- [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
- [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
- [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
- [7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
- [6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
- [8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
- [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
- [6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
- [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
- [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
- [8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
- [0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
- [1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
- [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
- [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
- [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
- [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
- [5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
- [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
- [9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
- [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
- [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
- [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
- [7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
- [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
- [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
- [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
- [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
- [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
- [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
- [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
- [6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
- [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
- [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
- [6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
- [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
- [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
- [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
- [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
- [9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
- [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
- [1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
- [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
- [0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
- [5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
- [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
- [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
- [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
- [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
- [2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
- [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
- [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
- [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
- [1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
- [9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
- [9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
- [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
- [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
- [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
- [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
- [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
- [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
- [9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
- [5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
- [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
- [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
- [8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
- [0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
- [9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
- [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
- [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
- [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
- [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
- [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
- [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
- [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
- [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
- [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
- [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
- [1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
- [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
- [4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
- [0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
- [3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
- [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
- [0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
- [9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
- [1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
- [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
+ # Cython makes us toss this in here, which I think will change in a
+ # future release.
cdef int i, j, k, n
cdef int offset
cdef np.float64_t gv[8]
cdef int cubeindex
- cdef np.float64_t vertlist[12][3]
- cdef int ntriang = 0
cdef np.float64_t *intdata = NULL
cdef np.float64_t x, y, z
cdef np.float64_t mu
- cdef Triangle *first = NULL
- cdef Triangle *current = NULL
+ cdef TriangleCollection triangles
+ triangles.first = triangles.current = NULL
+ triangles.count = 0
x = self.left_edge[0]
for i in range(self.dims[0]):
y = self.left_edge[1]
@@ -1160,68 +918,578 @@
+ j * (self.dims[2] + 1) + k
intdata = self.data[field_id] + offset
offset_fill(self.dims, intdata, gv)
- cubeindex = 0
- for n in range(8):
- if gv[n] < isovalue:
- cubeindex |= (1 << n)
- if edge_table[cubeindex] == 0:
- z += self.dds[2]
- continue
- if (edge_table[cubeindex] & 1): # 0,0,0 with 1,0,0
- vertex_interp(gv[0], gv[1], isovalue, vertlist[0],
- self.dds, x, y, z, 0, 1)
- if (edge_table[cubeindex] & 2): # 1,0,0 with 1,1,0
- vertex_interp(gv[1], gv[2], isovalue, vertlist[1],
- self.dds, x, y, z, 1, 2)
- if (edge_table[cubeindex] & 4): # 1,1,0 with 0,1,0
- vertex_interp(gv[2], gv[3], isovalue, vertlist[2],
- self.dds, x, y, z, 2, 3)
- if (edge_table[cubeindex] & 8): # 0,1,0 with 0,0,0
- vertex_interp(gv[3], gv[0], isovalue, vertlist[3],
- self.dds, x, y, z, 3, 0)
- if (edge_table[cubeindex] & 16): # 0,0,1 with 1,0,1
- vertex_interp(gv[4], gv[5], isovalue, vertlist[4],
- self.dds, x, y, z, 4, 5)
- if (edge_table[cubeindex] & 32): # 1,0,1 with 1,1,1
- vertex_interp(gv[5], gv[6], isovalue, vertlist[5],
- self.dds, x, y, z, 5, 6)
- if (edge_table[cubeindex] & 64): # 1,1,1 with 0,1,1
- vertex_interp(gv[6], gv[7], isovalue, vertlist[6],
- self.dds, x, y, z, 6, 7)
- if (edge_table[cubeindex] & 128): # 0,1,1 with 0,0,1
- vertex_interp(gv[7], gv[4], isovalue, vertlist[7],
- self.dds, x, y, z, 7, 4)
- if (edge_table[cubeindex] & 256): # 0,0,0 with 0,0,1
- vertex_interp(gv[0], gv[4], isovalue, vertlist[8],
- self.dds, x, y, z, 0, 4)
- if (edge_table[cubeindex] & 512): # 1,0,0 with 1,0,1
- vertex_interp(gv[1], gv[5], isovalue, vertlist[9],
- self.dds, x, y, z, 1, 5)
- if (edge_table[cubeindex] & 1024): # 1,1,0 with 1,1,1
- vertex_interp(gv[2], gv[6], isovalue, vertlist[10],
- self.dds, x, y, z, 2, 6)
- if (edge_table[cubeindex] & 2048): # 0,1,0 with 0,1,1
- vertex_interp(gv[3], gv[7], isovalue, vertlist[11],
- self.dds, x, y, z, 3, 7)
- n = 0
- while 1:
- current = AddTriangle(current,
- vertlist[tri_table[cubeindex][n ]],
- vertlist[tri_table[cubeindex][n+1]],
- vertlist[tri_table[cubeindex][n+2]])
- ntriang += 1
- if first == NULL: first = current
- n += 3
- if tri_table[cubeindex][n] == -1: break
+ march_cubes(gv, isovalue, self.dds, x, y, z,
+ &triangles)
z += self.dds[2]
y += self.dds[1]
x += self.dds[0]
# Hallo, we are all done.
cdef np.ndarray[np.float64_t, ndim=2] vertices
- vertices = np.zeros((ntriang*3,3), dtype='float64')
- FillAndWipeTriangles(vertices, first)
+ vertices = np.zeros((triangles.count*3,3), dtype='float64')
+ FillAndWipeTriangles(vertices, triangles.first)
return vertices
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid(np.float64_t isovalue,
+ np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.int32_t, ndim=3] mask,
+ np.ndarray[np.float64_t, ndim=1] left_edge,
+ np.ndarray[np.float64_t, ndim=1] dxs,
+ obj_sample = None):
+ cdef int dims[3]
+ cdef int i, j, k, n, m, nt
+ cdef int offset
+ cdef np.float64_t gv[8], pos[3], point[3], idds[3]
+ cdef np.float64_t *intdata = NULL
+ cdef np.float64_t *sdata = NULL
+ cdef np.float64_t x, y, z, do_sample
+ cdef np.ndarray[np.float64_t, ndim=3] sample
+ cdef np.ndarray[np.float64_t, ndim=1] sampled
+ cdef TriangleCollection triangles
+ cdef Triangle *last, *current
+ if obj_sample is not None:
+ sample = obj_sample
+ sdata = <np.float64_t *> sample.data
+ do_sample = 1
+ else:
+ do_sample = 0
+ for i in range(3):
+ dims[i] = values.shape[i] - 1
+ idds[i] = 1.0 / dxs[i]
+ triangles.first = triangles.current = NULL
+ last = current = NULL
+ triangles.count = 0
+ cdef np.float64_t *data = <np.float64_t *> values.data
+ cdef np.float64_t *dds = <np.float64_t *> dxs.data
+ pos[0] = left_edge[0]
+ for i in range(dims[0]):
+ pos[1] = left_edge[1]
+ for j in range(dims[1]):
+ pos[2] = left_edge[2]
+ for k in range(dims[2]):
+ if mask[i,j,k] == 1:
+ offset = i * (dims[1] + 1) * (dims[2] + 1) \
+ + j * (dims[2] + 1) + k
+ intdata = data + offset
+ offset_fill(dims, intdata, gv)
+ nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
+ &triangles)
+ if do_sample == 1 and nt > 0:
+ # At each triangle's center, sample our secondary field
+ if last == NULL and triangles.first != NULL:
+ current = triangles.first
+ last = NULL
+ elif last != NULL:
+ current = last.next
+ while current != NULL:
+ for n in range(3):
+ point[n] = 0.0
+ for n in range(3):
+ for m in range(3):
+ point[m] += (current.p[n][m]-pos[m])*idds[m]
+ for n in range(3):
+ point[n] /= 3.0
+ current.val = offset_interpolate(dims, point,
+ sdata + offset)
+ last = current
+ if current.next == NULL: break
+ current = current.next
+ pos[2] += dds[2]
+ pos[1] += dds[1]
+ pos[0] += dds[0]
+ # Hallo, we are all done.
+ cdef np.ndarray[np.float64_t, ndim=2] vertices
+ vertices = np.zeros((triangles.count*3,3), dtype='float64')
+ if do_sample == 1:
+ sampled = np.zeros(triangles.count, dtype='float64')
+ FillTriangleValues(sampled, triangles.first)
+ FillAndWipeTriangles(vertices, triangles.first)
+ return vertices, sampled
+ FillAndWipeTriangles(vertices, triangles.first)
+ return vertices
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def march_cubes_grid_flux(
+ np.float64_t isovalue,
+ np.ndarray[np.float64_t, ndim=3] values,
+ np.ndarray[np.float64_t, ndim=3] v1,
+ np.ndarray[np.float64_t, ndim=3] v2,
+ np.ndarray[np.float64_t, ndim=3] v3,
+ np.ndarray[np.float64_t, ndim=3] flux_field,
+ np.ndarray[np.int32_t, ndim=3] mask,
+ np.ndarray[np.float64_t, ndim=1] left_edge,
+ np.ndarray[np.float64_t, ndim=1] dxs):
+ cdef int dims[3]
+ cdef int i, j, k, n, m
+ cdef int offset
+ cdef np.float64_t gv[8]
+ cdef np.float64_t *intdata = NULL
+ cdef TriangleCollection triangles
+ cdef Triangle *current = NULL
+ cdef Triangle *last = NULL
+ cdef np.float64_t *data = <np.float64_t *> values.data
+ cdef np.float64_t *v1data = <np.float64_t *> v1.data
+ cdef np.float64_t *v2data = <np.float64_t *> v2.data
+ cdef np.float64_t *v3data = <np.float64_t *> v3.data
+ cdef np.float64_t *fdata = <np.float64_t *> flux_field.data
+ cdef np.float64_t *dds = <np.float64_t *> dxs.data
+ cdef np.float64_t flux = 0.0
+ cdef np.float64_t center[3], point[3], wval, temp, area, s
+ cdef np.float64_t cell_pos[3], fv[3], idds[3], normal[3]
+ for i in range(3):
+ dims[i] = values.shape[i] - 1
+ idds[i] = 1.0 / dds[i]
+ triangles.first = triangles.current = NULL
+ triangles.count = 0
+ cell_pos[0] = left_edge[0]
+ for i in range(dims[0]):
+ cell_pos[1] = left_edge[1]
+ for j in range(dims[1]):
+ cell_pos[2] = left_edge[2]
+ for k in range(dims[2]):
+ if mask[i,j,k] == 1:
+ offset = i * (dims[1] + 1) * (dims[2] + 1) \
+ + j * (dims[2] + 1) + k
+ intdata = data + offset
+ offset_fill(dims, intdata, gv)
+ march_cubes(gv, isovalue, dds,
+ cell_pos[0], cell_pos[1], cell_pos[2],
+ &triangles)
+ # Now our triangles collection has a bunch. We now
+ # calculate fluxes for each.
+ if last == NULL and triangles.first != NULL:
+ current = triangles.first
+ last = NULL
+ elif last != NULL:
+ current = last.next
+ while current != NULL:
+ # Calculate the center of the triangle
+ wval = 0.0
+ for n in range(3):
+ center[n] = 0.0
+ for n in range(3):
+ for m in range(3):
+ point[m] = (current.p[n][m]-cell_pos[m])*idds[m]
+ # Now we calculate the value at this point
+ temp = offset_interpolate(dims, point, intdata)
+ #print "something", temp, point[0], point[1], point[2]
+ wval += temp
+ for m in range(3):
+ center[m] += temp * point[m]
+ # Now we divide by our normalizing factor
+ for n in range(3):
+ center[n] /= wval
+ # We have our center point of the triangle, in 0..1
+ # coordinates. So now we interpolate our three
+ # fields.
+ fv[0] = offset_interpolate(dims, center, v1data + offset)
+ fv[1] = offset_interpolate(dims, center, v2data + offset)
+ fv[2] = offset_interpolate(dims, center, v3data + offset)
+ # We interpolate again the actual value data
+ wval = offset_interpolate(dims, center, fdata + offset)
+ # Now we have our flux vector and our field value!
+ # We just need a normal vector with which we can
+ # dot it. The normal should be equal to the gradient
+ # in the center of the triangle, or thereabouts.
+ eval_gradient(dims, center, intdata, normal)
+ temp = 0.0
+ for n in range(3):
+ temp += normal[n]*normal[n]
+ # Take the negative, to ensure it points inwardly
+ temp = -(temp**0.5)
+ # Dump this somewhere for now
+ temp = wval * (fv[0] * normal[0] +
+ fv[1] * normal[1] +
+ fv[2] * normal[2])/temp
+ # Now we need the area of the triangle. This will take
+ # a lot of time to calculate compared to the rest.
+ # We use Heron's formula.
+ for n in range(3):
+ fv[n] = 0.0
+ for n in range(3):
+ fv[0] += (current.p[0][n] - current.p[2][n])**2.0
+ fv[1] += (current.p[1][n] - current.p[0][n])**2.0
+ fv[2] += (current.p[2][n] - current.p[1][n])**2.0
+ s = 0.0
+ for n in range(3):
+ fv[n] = fv[n]**0.5
+ s += 0.5 * fv[n]
+ area = (s*(s-fv[0])*(s-fv[1])*(s-fv[2]))
+ area = area**0.5
+ flux += temp*area
+ last = current
+ if current.next == NULL: break
+ current = current.next
+ cell_pos[2] += dds[2]
+ cell_pos[1] += dds[1]
+ cell_pos[0] += dds[0]
+ # Hallo, we are all done.
+ WipeTriangles(triangles.first)
+ return flux
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int march_cubes(
+ np.float64_t gv[8], np.float64_t isovalue,
+ np.float64_t dds[3],
+ np.float64_t x, np.float64_t y, np.float64_t z,
+ TriangleCollection *triangles):
+ cdef int *edge_table=[
+ 0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
+ 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
+ 0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
+ 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
+ 0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
+ 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
+ 0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
+ 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
+ 0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
+ 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
+ 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
+ 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
+ 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
+ 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
+ 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
+ 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
+ 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
+ 0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
+ 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
+ 0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
+ 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
+ 0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
+ 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
+ 0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
+ 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
+ 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
+ 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
+ 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
+ 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
+ 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
+ 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
+ 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 ]
+
+ cdef int **tri_table = \
+ [[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+ [3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+ [3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+ [3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+ [9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1],
+ [9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+ [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+ [8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+ [9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+ [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1],
+ [3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1],
+ [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1],
+ [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+ [4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1],
+ [9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+ [5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+ [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1],
+ [9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1],
+ [0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1],
+ [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1],
+ [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1],
+ [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1],
+ [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1],
+ [5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1],
+ [9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+ [0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+ [1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1],
+ [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1],
+ [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1],
+ [2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1],
+ [7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1],
+ [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+ [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1],
+ [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1],
+ [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1],
+ [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1],
+ [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1],
+ [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+ [1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1],
+ [9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+ [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1],
+ [2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+ [0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1],
+ [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1],
+ [6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+ [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1],
+ [6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1],
+ [5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1],
+ [1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1],
+ [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1],
+ [6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1],
+ [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1],
+ [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1],
+ [3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1],
+ [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1],
+ [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1],
+ [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1],
+ [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1],
+ [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1],
+ [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1],
+ [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1],
+ [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1],
+ [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+ [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+ [1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+ [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1],
+ [0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1],
+ [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1],
+ [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1],
+ [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1],
+ [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1],
+ [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1],
+ [3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1],
+ [6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+ [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1],
+ [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1],
+ [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+ [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1],
+ [7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1],
+ [7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1],
+ [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1],
+ [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1],
+ [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1],
+ [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1],
+ [0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1],
+ [7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+ [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+ [2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1],
+ [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1],
+ [7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+ [2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1],
+ [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1],
+ [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+ [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1],
+ [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1],
+ [7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1],
+ [6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+ [8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1],
+ [6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1],
+ [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1],
+ [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1],
+ [8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+ [0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1],
+ [1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1],
+ [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1],
+ [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1],
+ [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1],
+ [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1],
+ [5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+ [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1],
+ [9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1],
+ [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1],
+ [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1],
+ [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1],
+ [7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1],
+ [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1],
+ [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1],
+ [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1],
+ [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1],
+ [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1],
+ [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1],
+ [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1],
+ [6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+ [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1],
+ [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1],
+ [6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1],
+ [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1],
+ [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1],
+ [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1],
+ [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1],
+ [9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1],
+ [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1],
+ [1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1],
+ [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1],
+ [0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1],
+ [5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1],
+ [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1],
+ [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1],
+ [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1],
+ [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1],
+ [2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+ [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1],
+ [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1],
+ [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1],
+ [1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1],
+ [9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1],
+ [9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+ [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1],
+ [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1],
+ [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1],
+ [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1],
+ [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1],
+ [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1],
+ [9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1],
+ [5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1],
+ [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1],
+ [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1],
+ [8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1],
+ [9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+ [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1],
+ [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1],
+ [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1],
+ [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1],
+ [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1],
+ [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1],
+ [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1],
+ [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1],
+ [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1],
+ [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1],
+ [1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1],
+ [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1],
+ [4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1],
+ [0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1],
+ [3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1],
+ [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1],
+ [0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1],
+ [9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1],
+ [1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
+ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
+ cdef np.float64_t vertlist[12][3]
+ cdef int cubeindex = 0
+ cdef int n
+ cdef int nt = 0
+ for n in range(8):
+ if gv[n] < isovalue:
+ cubeindex |= (1 << n)
+ if edge_table[cubeindex] == 0:
+ return 0
+ if (edge_table[cubeindex] & 1): # 0,0,0 with 1,0,0
+ vertex_interp(gv[0], gv[1], isovalue, vertlist[0],
+ dds, x, y, z, 0, 1)
+ if (edge_table[cubeindex] & 2): # 1,0,0 with 1,1,0
+ vertex_interp(gv[1], gv[2], isovalue, vertlist[1],
+ dds, x, y, z, 1, 2)
+ if (edge_table[cubeindex] & 4): # 1,1,0 with 0,1,0
+ vertex_interp(gv[2], gv[3], isovalue, vertlist[2],
+ dds, x, y, z, 2, 3)
+ if (edge_table[cubeindex] & 8): # 0,1,0 with 0,0,0
+ vertex_interp(gv[3], gv[0], isovalue, vertlist[3],
+ dds, x, y, z, 3, 0)
+ if (edge_table[cubeindex] & 16): # 0,0,1 with 1,0,1
+ vertex_interp(gv[4], gv[5], isovalue, vertlist[4],
+ dds, x, y, z, 4, 5)
+ if (edge_table[cubeindex] & 32): # 1,0,1 with 1,1,1
+ vertex_interp(gv[5], gv[6], isovalue, vertlist[5],
+ dds, x, y, z, 5, 6)
+ if (edge_table[cubeindex] & 64): # 1,1,1 with 0,1,1
+ vertex_interp(gv[6], gv[7], isovalue, vertlist[6],
+ dds, x, y, z, 6, 7)
+ if (edge_table[cubeindex] & 128): # 0,1,1 with 0,0,1
+ vertex_interp(gv[7], gv[4], isovalue, vertlist[7],
+ dds, x, y, z, 7, 4)
+ if (edge_table[cubeindex] & 256): # 0,0,0 with 0,0,1
+ vertex_interp(gv[0], gv[4], isovalue, vertlist[8],
+ dds, x, y, z, 0, 4)
+ if (edge_table[cubeindex] & 512): # 1,0,0 with 1,0,1
+ vertex_interp(gv[1], gv[5], isovalue, vertlist[9],
+ dds, x, y, z, 1, 5)
+ if (edge_table[cubeindex] & 1024): # 1,1,0 with 1,1,1
+ vertex_interp(gv[2], gv[6], isovalue, vertlist[10],
+ dds, x, y, z, 2, 6)
+ if (edge_table[cubeindex] & 2048): # 0,1,0 with 0,1,1
+ vertex_interp(gv[3], gv[7], isovalue, vertlist[11],
+ dds, x, y, z, 3, 7)
+ n = 0
+ while 1:
+ triangles.current = AddTriangle(triangles.current,
+ vertlist[tri_table[cubeindex][n ]],
+ vertlist[tri_table[cubeindex][n+1]],
+ vertlist[tri_table[cubeindex][n+2]])
+ triangles.count += 1
+ nt += 1
+ if triangles.first == NULL:
+ triangles.first = triangles.current
+ n += 3
+ if tri_table[cubeindex][n] == -1: break
+ return nt
+
+
cdef class GridFace:
cdef int direction
cdef public np.float64_t coord
@@ -1347,7 +1615,7 @@
AdaptiveRayPacket *next
AdaptiveRayPacket *prev
AdaptiveRayPacket *brick_next
- #int cgi
+ int pgi
cdef class AdaptiveRaySource:
cdef np.float64_t center[3]
@@ -1360,7 +1628,10 @@
cdef AdaptiveRayPacket **lpacket_pointers
def __cinit__(self, center, rays_per_cell, initial_nside,
- np.float64_t normalization, brick_list, int max_nside = 8192):
+ np.float64_t normalization, brick_list,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges,
+ int max_nside = 8192):
cdef int i
self.max_nside = max_nside
self.center[0] = center[0]
@@ -1383,6 +1654,10 @@
self.lpacket_pointers[i] = self.packet_pointers[i] = NULL
self.normalization = normalization
self.nrays = 12*initial_nside*initial_nside
+ cdef int *grid_neighbors = <int*> malloc(sizeof(int) * (nbricks+1))
+ grid_neighbors[0] = nbricks
+ for i in range(nbricks):
+ grid_neighbors[i+1] = i
for i in range(self.nrays):
# Initialize rays here
ray = <AdaptiveRayPacket *> malloc(sizeof(AdaptiveRayPacket))
@@ -1401,14 +1676,13 @@
ray.pos[1] = self.center[1]
ray.pos[2] = self.center[2]
ray.brick_next = NULL
+ ray.pgi = -1
if last != NULL:
last.next = ray
- last.brick_next = ray
else:
self.first = ray
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, 0)
last = ray
- self.packet_pointers[0] = self.first
- self.lpacket_pointers[0] = last
def __dealloc__(self):
cdef AdaptiveRayPacket *next
@@ -1418,6 +1692,7 @@
free(ray)
ray = next
free(self.packet_pointers)
+ free(self.lpacket_pointers)
def get_rays(self):
cdef AdaptiveRayPacket *ray = self.first
@@ -1436,80 +1711,173 @@
values[count, 1] = ray.value[1]
values[count, 2] = ray.value[2]
values[count, 3] = ray.value[3]
+ if ray.t < 0.5:
+ print "PROBLEM",
+ print count, ray.ipix, ray.nside, ray.t,
+ print "vd", ray.v_dir[0], ray.v_dir[1], ray.v_dir[2],
+ print "pos", ray.pos[0], ray.pos[1], ray.pos[2],
+ print "pgi", ray.pgi
count += 1
ray = ray.next
return info, values
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ cdef np.float64_t integrate_ray(self, AdaptiveRayPacket *ray,
+ PartitionedGrid pg, TransferFunctionProxy tf):
+ cdef np.float64_t self_center[3], ray_v_dir[3], ray_value[4]
+ self_center[0] = self.center[0]
+ self_center[1] = self.center[1]
+ self_center[2] = self.center[2]
+ enter_t = ray.t
+ ray_v_dir[0] = ray.v_dir[0]
+ ray_v_dir[1] = ray.v_dir[1]
+ ray_v_dir[2] = ray.v_dir[2]
+ ray_value[0] = ray.value[0]
+ ray_value[1] = ray.value[1]
+ ray_value[2] = ray.value[2]
+ ray_value[3] = ray.value[3]
+ hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t,
+ ray.t)
+ ray.value[0] = ray_value[0]
+ ray.value[1] = ray_value[1]
+ ray.value[2] = ray_value[2]
+ ray.value[3] = ray_value[3]
+ if hit == 0: dt = 0.0
+ else: dt = (ray.t - enter_t)/hit
+ for i in range(3):
+ ray.pos[i] = ray.v_dir[i] * ray.t + self.center[i]
+ return dt
+
+ cdef send_ray_home(self, AdaptiveRayPacket *ray,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges,
+ int *grid_neighbors, np.float64_t dt,
+ int skip_append = 0):
+ cdef int found_a_home = 0
+ cdef int i, j, npgi
+ cdef np.float64_t offpos[3]
+ for i in range(3):
+ offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-6*dt
+ for j in range(grid_neighbors[0]):
+ i = grid_neighbors[j+1]
+ if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
+ (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
+ (ledges[i, 2] <= offpos[2] <= redges[i, 2]) and
+ ray.pgi != i):
+ if not skip_append: self.append_to_packets(i, ray)
+ ray.pgi = i
+ npgi = i
+ found_a_home = 1
+ break
+ if found_a_home == 0:
+ #print "Non-neighboring area", ray.pgi, ray.ipix, ray.nside
+ for i in range(ledges.shape[0]):
+ if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
+ (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
+ (ledges[i, 2] <= offpos[2] <= redges[i, 2])):
+ #print "Found a home!", i, ray.ipix, ray.nside, ray.pgi
+ if not skip_append: self.append_to_packets(i, ray)
+ ray.pgi = i
+ npgi = i
+ found_a_home = 1
+ break
+ if found_a_home == 0:
+ raise RuntimeError
+
+ cdef append_to_packets(self, int pgi, AdaptiveRayPacket *ray):
+ # packet_pointers are pointers to the *first* packet in a given brick
+ # lpacket_pointers point to the *final* packet in a given brick, for
+ # easy appending.
+ if self.lpacket_pointers[pgi] == NULL or \
+ self.packet_pointers[pgi] == NULL:
+ self.packet_pointers[pgi] = \
+ self.lpacket_pointers[pgi] = ray
+ ray.brick_next = NULL
+ else:
+ self.lpacket_pointers[pgi].brick_next = ray
+ self.lpacket_pointers[pgi] = ray
+ ray.brick_next = NULL
+
@cython.boundscheck(False)
@cython.wraparound(False)
def integrate_brick(self, PartitionedGrid pg, TransferFunctionProxy tf,
int pgi, np.ndarray[np.float64_t, ndim=2] ledges,
- np.ndarray[np.float64_t, ndim=2] redges):
- cdef np.float64_t domega
- domega = self.get_domega(pg.left_edge, pg.right_edge)
+ np.ndarray[np.float64_t, ndim=2] redges,
+ pgs, int inside = -1):
+ cdef np.float64_t domega, dt
+ cdef PartitionedGrid pgn
#print "dOmega", domega, self.nrays
- cdef int count = 0
- cdef int i, j
- cdef AdaptiveRayPacket *ray = self.packet_pointers[pgi]
+ cdef intersects
+ cdef int i, j, npgi, refined
+ cdef AdaptiveRayPacket *ray2, *ray = self.packet_pointers[pgi]
cdef AdaptiveRayPacket *next
+ cdef AdaptiveRayPacket **pray
cdef int *grid_neighbors = self.find_neighbors(pgi, pg.dds[0], ledges, redges)
- cdef np.float64_t enter_t, dt, offpos[3]
+ cdef int *grid2_neighbors
+ cdef np.float64_t enter_t, offpos[3]
cdef int found_a_home, hit
- #print "Grid: ", pgi, "has", grid_neighbors[0], "neighbors"
- # Some compilers throw errors on the passing of the center, v_dir and
- # value
- cdef np.float64_t self_center[3], ray_v_dir[3], ray_value[4]
- self_center[0] = self.center[0]
- self_center[1] = self.center[1]
- self_center[2] = self.center[2]
while ray != NULL:
- # Note that we may end up splitting a ray such that it ends up
- # outside the brick! This will likely cause them to get lost.
- #print count
- count +=1
- # We don't need to check for intersection anymore, as we are the
- # Ruler of the planet Omicron Persei 8
- #if self.intersects(ray, pg):
- ray = self.refine_ray(ray, domega, pg.dds[0],
- pg.left_edge, pg.right_edge)
- enter_t = ray.t
- ray_v_dir[0] = ray.v_dir[0]
- ray_v_dir[1] = ray.v_dir[1]
- ray_v_dir[2] = ray.v_dir[2]
- ray_value[0] = ray.value[0]
- ray_value[1] = ray.value[1]
- ray_value[2] = ray.value[2]
- ray_value[3] = ray.value[3]
- hit = pg.integrate_ray(self_center, ray_v_dir, ray_value, tf, &ray.t)
- ray.value[0] = ray_value[0]
- ray.value[1] = ray_value[1]
- ray.value[2] = ray_value[2]
- ray.value[3] = ray_value[3]
- if hit == 0: dt = 0.0
- else: dt = (ray.t - enter_t)/hit
- for i in range(3):
- ray.pos[i] = ray.v_dir[i] * ray.t + self.center[i]
- offpos[i] = ray.pos[i] + ray.v_dir[i] * 1e-5*dt
- # We set 'next' after the refinement has occurred
- next = ray.brick_next
- found_a_home = 0
- for j in range(grid_neighbors[0]):
- i = grid_neighbors[j+1]
- if ((ledges[i, 0] <= offpos[0] <= redges[i, 0]) and
- (ledges[i, 1] <= offpos[1] <= redges[i, 1]) and
- (ledges[i, 2] <= offpos[2] <= redges[i, 2])):
- if self.lpacket_pointers[i] == NULL:
- self.packet_pointers[i] = \
- self.lpacket_pointers[i] = ray
- ray.brick_next = NULL
- else:
- self.lpacket_pointers[i].brick_next = ray
- self.lpacket_pointers[i] = ray
- ray.brick_next = NULL
- #ray.cgi = i
- found_a_home = 1
- break
+ #print "Integrating", pgi, ray.pgi, ray.ipix, ray.nside
+ if pgi != ray.pgi:
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, dt)
+ if ray.pgi != pgi:
+ ray = ray.brick_next
+ continue
+ # We start in this brick, and then we integrate to the edge
+ self.packet_pointers[pgi] = next = ray.brick_next
+ if ray.t >= 1.0:
+ ray = next
+ continue
+ dt = self.integrate_ray(ray, pg, tf)
+ # Now the ray has moved, so we grab .brick_next first, then we
+ # move it to its new home
+ self.send_ray_home(ray, ledges, redges, grid_neighbors, dt, 1)
+ # We now are moving into a new PG, which we check for refinement
+ pgn = pgs[ray.pgi]
+ domega = self.get_domega(pgn.left_edge, pgn.right_edge)
+ pray = &ray
+ refined = self.refine_ray(pray, domega, pgn.dds[0],
+ pgn.left_edge, pgn.right_edge)
+ if refined == 0: self.append_to_packets(ray.pgi, ray)
+ # At this point we can no longer access ray, as it is no longer
+ # safe.
+ ray2 = pray[0]
+ for i in range(refined*4):
+ # If we have been refined, send the ray to its appropriate
+ # location.
+ self.send_ray_home(ray2, ledges, redges, grid_neighbors, 0.0, 1)
+ # If it wants to go back in time that is fine but it needs to
+ # make sure it gets forward in time eventually
+ while ray2.pgi <= pgi and ray2.t < 1.0:
+ #print "Recursing", ray2.pgi, pgi, ray2.t, ray2.nside, ray2.ipix, dt
+ # Now we grab a new set of neighbors and whatnot
+ pgn = pgs[ray2.pgi]
+ grid2_neighbors = self.find_neighbors(ray2.pgi, pgn.dds[0],
+ ledges, redges)
+ # We just integrate, we don't bother with the full brick
+ # integration. This means no recursive refinement, and
+ # potential undersampling
+ dt = self.integrate_ray(ray2, pgn, tf)
+ # Now we send this ray home. Hopefully it'll once again be
+ # forward in time.
+ self.send_ray_home(ray2, ledges, redges, grid2_neighbors,
+ dt, 1)
+ free(grid2_neighbors)
+ # This tosses us to the next one in line, of the four..
+ self.append_to_packets(ray2.pgi, ray2)
+ ray2 = ray2.next
+ # We use this because it's been set previously.
ray = next
+ # We check to see if anything has been *added* to the queue, via a
+ # send_ray_home call, here. Otherwise we might end up in the
+ # situation that the final ray is refined, thus next is NULL, but
+ # there are more rays to work on because they have been added via
+ # refinement.
+ if ray == NULL and self.packet_pointers[pgi] != NULL:
+ ray = self.packet_pointers[pgi]
+ #print "Packet pointers!", ray.ipix
free(grid_neighbors)
@cython.boundscheck(False)
@@ -1530,8 +1898,9 @@
gre[0] = redges[this_grid, 0] + dds
gre[1] = redges[this_grid, 1] + dds
gre[2] = redges[this_grid, 2] + dds
- for i in range(this_grid+1, ledges.shape[0]):
+ for i in range(ledges.shape[0]):
# Check for overlap
+ if i == this_grid: continue
if ((gle[0] <= redges[i, 0] and gre[0] >= ledges[i, 0]) and
(gle[1] <= redges[i, 1] and gre[1] >= ledges[i, 1]) and
(gle[2] <= redges[i, 2] and gre[2] >= ledges[i, 2])):
@@ -1539,8 +1908,9 @@
cdef int *tr = <int *> malloc(sizeof(int) * (count + 1))
tr[0] = count
count = 0
- for i in range(this_grid+1, ledges.shape[0]):
+ for i in range(ledges.shape[0]):
# Check for overlap
+ if i == this_grid: continue
if ((gle[0] <= redges[i, 0] and gre[0] >= ledges[i, 0]) and
(gle[1] <= redges[i, 1] and gre[1] >= ledges[i, 1]) and
(gle[2] <= redges[i, 2] and gre[2] >= ledges[i, 2])):
@@ -1557,6 +1927,19 @@
if ray.pos[i] > pg.right_edge[i]: return 0
return 1
+ cdef int find_owner(self, AdaptiveRayPacket *ray,
+ int *neighbors,
+ np.ndarray[np.float64_t, ndim=2] ledges,
+ np.ndarray[np.float64_t, ndim=2] redges):
+ cdef int i, pgi = -1
+ for i in range(ledges.shape[0]):
+ pgi = i
+ if ((ray.pos[0] <= redges[i, 0] and ray.pos[0] >= ledges[i, 0]) and
+ (ray.pos[1] <= redges[i, 1] and ray.pos[1] >= ledges[i, 1]) and
+ (ray.pos[2] <= redges[i, 2] and ray.pos[2] >= ledges[i, 2])):
+ return pgi
+ return -1
+
cdef np.float64_t get_domega(self, np.float64_t left_edge[3],
np.float64_t right_edge[3]):
# We should calculate the subtending angle at the maximum radius of the
@@ -1571,7 +1954,7 @@
r2[0] = (edge[i][0] - self.center[0])**2.0
for j in range(2):
r2[1] = r2[0] + (edge[j][1] - self.center[1])**2.0
- for k in range(3):
+ for k in range(2):
r2[2] = r2[1] + (edge[k][2] - self.center[2])**2.0
max_r2 = fmax(max_r2, r2[2])
domega = 4.0 * 3.1415926 * max_r2 # Used to be / Nrays
@@ -1580,57 +1963,50 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- cdef AdaptiveRayPacket *refine_ray(self, AdaptiveRayPacket *ray,
- np.float64_t domega,
- np.float64_t dx,
- np.float64_t left_edge[3],
- np.float64_t right_edge[3]):
- # This should be recursive; we are not correctly applying split
- # criteria multiple times.
+ cdef int refine_ray(self, AdaptiveRayPacket **pray,
+ np.float64_t domega, np.float64_t dx,
+ np.float64_t left_edge[3],
+ np.float64_t right_edge[3]):
+ cdef AdaptiveRayPacket *ray = pray[0]
+ cdef AdaptiveRayPacket *new_rays[4]
cdef long Nrays = 12 * ray.nside * ray.nside
cdef int i, j
if domega/Nrays < dx*dx/self.rays_per_cell:
- #print ray.nside, domega/Nrays, dx, (domega/Nrays * self.rays_per_cell)/(dx*dx)
- return ray
- if ray.nside >= self.max_nside: return ray
- #print "Refining %s from %s to %s" % (ray.ipix, ray.nside, ray.nside*2)
- # Now we make four new ones
+ return 0
+ if ray.nside >= self.max_nside: return 0
cdef double v_dir[3]
- cdef AdaptiveRayPacket *prev = ray.prev
- # It is important to note here that brick_prev is a local variable for
- # the newly created rays, not the previous ray in this brick, as that
- # has already been passed on to its next brick
- cdef AdaptiveRayPacket *brick_prev = NULL
+ # We need a record of the previous one because we're inserting into a
+ # linked list.
for i in range(4):
- new_ray = <AdaptiveRayPacket *> malloc(
+ new_rays[i] = <AdaptiveRayPacket *> malloc(
sizeof(AdaptiveRayPacket))
- new_ray.nside = ray.nside * 2
- new_ray.ipix = ray.ipix * 4 + i
- new_ray.t = ray.t
- #new_ray.cgi = ray.cgi
- new_ray.prev = prev
- if new_ray.prev != NULL:
- new_ray.prev.next = new_ray
- if brick_prev != NULL:
- brick_prev.brick_next = new_ray
- prev = brick_prev = new_ray
+ new_rays[i].nside = ray.nside * 2
+ new_rays[i].ipix = ray.ipix * 4 + i
+ new_rays[i].t = ray.t
healpix_interface.pix2vec_nest(
- new_ray.nside, new_ray.ipix, v_dir)
+ new_rays[i].nside, new_rays[i].ipix, v_dir)
for j in range(3):
- new_ray.v_dir[j] = v_dir[j] * self.normalization
- new_ray.value[j] = ray.value[j]
- new_ray.pos[j] = self.center[j] + ray.t * new_ray.v_dir[j]
- new_ray.value[3] = ray.value[3]
-
- new_ray.next = ray.next
- new_ray.brick_next = ray.brick_next
- if new_ray.next != NULL:
- new_ray.next.prev = new_ray
+ new_rays[i].v_dir[j] = v_dir[j] * self.normalization
+ new_rays[i].value[j] = ray.value[j]
+ new_rays[i].pos[j] = self.center[j] + ray.t * new_rays[i].v_dir[j]
+ new_rays[i].value[3] = ray.value[3]
+ # Insert into the external list
+ if ray.prev != NULL:
+ ray.prev.next = new_rays[0]
+ new_rays[0].prev = ray.prev
+ new_rays[3].next = ray.next
+ if ray.next != NULL:
+ ray.next.prev = new_rays[3]
+ for i in range(3):
+ # Connect backward and forward
+ new_rays[i].next = new_rays[i+1]
+ new_rays[3-i].prev = new_rays[2-i]
if self.first == ray:
- self.first = new_ray.prev.prev.prev
+ self.first = new_rays[0]
+ self.nrays += 3
free(ray)
- self.nrays += 3
- return new_ray.prev.prev.prev
+ pray[0] = new_rays[0]
+ return 1
# From Enzo:
# dOmega = 4 pi r^2/Nrays
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -27,6 +27,10 @@
cimport numpy as np
cimport cython
+cdef extern from "stdlib.h":
+ # NOTE that size_t might not be int
+ void *alloca(int)
+
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
@@ -140,3 +144,63 @@
rv[fi] = field[ind[0], ind[1], ind[2]]
return rv
raise KeyError
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
+ np.ndarray[np.float64_t, ndim=1] l_corner,
+ np.ndarray[np.float64_t, ndim=1] r_corner):
+ cdef int i, j, k, dim, n_unique, best_dim, n_best, n_grids, addit, my_split
+ n_grids = data.shape[0]
+ cdef np.float64_t **uniquedims, *uniques, split
+ uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+ for i in range(3):
+ uniquedims[i] = <np.float64_t *> \
+ alloca(2*n_grids * sizeof(np.float64_t))
+ my_max = 0
+ for dim in range(3):
+ n_unique = 0
+ uniques = uniquedims[dim]
+ for i in range(n_grids):
+ # Check for disqualification
+ for j in range(2):
+ #print "Checking against", i,j,dim,data[i,j,dim]
+ if not (l_corner[dim] < data[i, j, dim] and
+ data[i, j, dim] < r_corner[dim]):
+ #print "Skipping ", data[i,j,dim]
+ continue
+ skipit = 0
+ # Add our left ...
+ for k in range(n_unique):
+ if uniques[k] == data[i, j, dim]:
+ skipit = 1
+ #print "Identified", uniques[k], data[i,j,dim], n_unique
+ break
+ if skipit == 0:
+ uniques[n_unique] = data[i, j, dim]
+ n_unique += 1
+ if n_unique > my_max:
+ best_dim = dim
+ my_max = n_unique
+ my_split = (n_unique-1)/2
+ # I recognize how lame this is.
+ cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+ for i in range(my_max):
+ #print "Setting tarr: ", i, uniquedims[best_dim][i]
+ tarr[i] = uniquedims[best_dim][i]
+ tarr.sort()
+ split = tarr[my_split]
+ cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
+ cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+ for i in range(n_grids):
+ if data[i, 0, best_dim] < split:
+ less_ids[i] = 1
+ else:
+ less_ids[i] = 0
+ if data[i, 1, best_dim] > split:
+ greater_ids[i] = 1
+ else:
+ greater_ids[i] = 0
+ # Return out unique values
+ return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -28,15 +28,14 @@
import numpy as na
from yt.funcs import *
from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
-from yt.utilities.amr_utils import PartitionedGrid
+from yt.utilities.amr_utils import PartitionedGrid, kdtree_get_choices
from yt.utilities.performance_counters import yt_counters, time_function
-import yt.utilities.parallel_tools.parallel_analysis_interface as PT
+from yt.utilities.parallel_tools.parallel_analysis_interface \
+ import ParallelAnalysisInterface
from copy import deepcopy
from yt.config import ytcfg
from time import time
import h5py
-my_rank = ytcfg.getint("yt", "__parallel_rank")
-nprocs = ytcfg.getint("yt", "__parallel_size")
def corner_bounds(split_dim, split, current_left = None, current_right = None):
r"""
@@ -48,11 +47,11 @@
chosen by specifying the `current_left` or `current_right`.
"""
if(current_left is not None):
- new_left = na.array([current_left[0],current_left[1],current_left[2]])
+ new_left = current_left.copy()
new_left[split_dim] = split
return new_left
elif(current_right is not None):
- new_right = na.array([current_right[0],current_right[1],current_right[2]])
+ new_right = current_right.copy()
new_right[split_dim] = split
return new_right
@@ -288,12 +287,13 @@
'split_pos': 0.5}
"""
+ ParallelAnalysisInterface.__init__(self)
self.current_split_dim = 0
self.pf = pf
self.sdx = self.pf.h.get_smallest_dx()
self._id_offset = pf.h.grids[0]._id_offset
- if nprocs > len(pf.h.grids):
+ if self.comm.size > len(pf.h.grids):
mylog.info('Parallel rendering requires that the number of \n \
grids in the dataset is greater or equal to the number of \n \
processors. Reduce number of processors.')
@@ -379,7 +379,7 @@
# If the full amr kD-tree is requested, merge the results from
# the parallel build.
- if merge_trees and nprocs > 1:
+ if merge_trees and self.comm.size > 1:
self.join_parallel_trees()
self.my_l_corner = self.domain_left_edge
self.my_r_corner = self.domain_right_edge
@@ -736,7 +736,8 @@
thisnode.ri = na.rint((thisnode.r_corner-gle)/dds).astype('int32')
thisnode.dims = (thisnode.ri - thisnode.li).astype('int32')
# Here the cost is actually inversely proportional to 4**Level (empirical)
- thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+ #thisnode.cost = (na.prod(thisnode.dims)/4.**thisnode.grid.Level).astype('int64')
+ thisnode.cost = 1.0
# Here is the old way
# thisnode.cost = na.prod(thisnode.dims).astype('int64')
@@ -751,11 +752,11 @@
self.rebuild_references()
def trim_references(self):
- par_tree_depth = long(na.log2(nprocs))
- for i in range(2**nprocs):
+ par_tree_depth = long(na.log2(self.comm.size))
+ for i in range(2**self.comm.size):
if ((i + 1)>>par_tree_depth) == 1:
- # There are nprocs nodes that meet this criteria
- if (i+1-nprocs) is not my_rank:
+ # There are self.comm.size nodes that meet this criteria
+ if (i+1-self.comm.size) != self.comm.rank:
self.tree_dict.pop(i)
continue
for node in self.tree_dict.itervalues():
@@ -769,7 +770,8 @@
if self.tree_dict[0].split_pos is None:
self.tree_dict.pop(0)
def merge_trees(self):
- self.tree_dict = self._mpi_joindict(self.tree_dict)
+ self.tree_dict = self.comm.par_combine_object(self.tree_dict,
+ datatype = "dict", op = "join")
def rebuild_references(self):
self.tree = self.tree_dict[0]
@@ -987,9 +989,9 @@
current_node.grids = grids
current_node.l_corner = l_corner
current_node.r_corner = r_corner
- # current_node.owner = my_rank
+ # current_node.owner = self.comm.rank
current_node.id = 0
- par_tree_depth = int(na.log2(nprocs))
+ par_tree_depth = int(na.log2(self.comm.size))
anprocs = 2**par_tree_depth
while current_node is not None:
# If we don't have any grids, that means we are revisiting
@@ -1002,7 +1004,7 @@
# This is where all the domain decomposition occurs.
if ((current_node.id + 1)>>par_tree_depth) == 1:
# There are anprocs nodes that meet this criteria
- if (current_node.id+1-anprocs) is my_rank:
+ if (current_node.id+1-anprocs) == self.comm.rank:
# I own this shared node
self.my_l_corner = current_node.l_corner
self.my_r_corner = current_node.r_corner
@@ -1012,7 +1014,7 @@
continue
# If we are down to one grid, we are either in it or the parent grid
- if len(current_node.grids) is 1:
+ if len(current_node.grids) == 1:
thisgrid = current_node.grids[0]
# If we are completely contained by that grid
if (thisgrid.LeftEdge[0] <= current_node.l_corner[0]) and (thisgrid.RightEdge[0] >= current_node.r_corner[0]) and \
@@ -1040,7 +1042,7 @@
continue
# If we don't have any grids, this volume belongs to the parent
- if len(current_node.grids) is 0:
+ if len(current_node.grids) == 0:
set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
# print 'This volume does not have a child grid, so it belongs to my parent!'
current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1060,19 +1062,9 @@
# For some reason doing dim 0 separately is slightly faster.
# This could be rewritten to all be in the loop below.
- best_dim = 0
- best_choices = na.unique(data[:,:,0][(current_node.l_corner[0] < data[:,:,0]) &
- (data[:,:,0] < current_node.r_corner[0])])
-
- for d in range(1,3):
- choices = na.unique(data[:,:,d][(current_node.l_corner[d] < data[:,:,d]) &
- (data[:,:,d] < current_node.r_corner[d])])
-
- if choices.size > best_choices.size:
- best_choices, best_dim = choices, d
-
- split = best_choices[(len(best_choices)-1)/2]
- return data[:,:,best_dim], best_dim, split
+ best_dim, split, less_ids, greater_ids = \
+ kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+ return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
def _build_dividing_node(self, current_node):
'''
@@ -1080,12 +1072,14 @@
left and right children.
'''
- data,best_dim,split = self._get_choices(current_node)
+ data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
current_node.split_ax = best_dim
current_node.split_pos = split
- less_ids = na.nonzero(data[:,0] < split)[0]
- greater_ids = na.nonzero(split < data[:,1])[0]
+ #less_ids0 = (data[:,0] < split)
+ #greater_ids0 = (split < data[:,1])
+ #assert(na.all(less_ids0 == less_ids))
+ #assert(na.all(greater_ids0 == greater_ids))
current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
parent=current_node,
@@ -1144,17 +1138,17 @@
yield node.brick
self.reduce_tree_images(self.tree, front_center)
- self._barrier()
+ self.comm.barrier()
def reduce_tree_images(self, tree, viewpoint, image=None):
if image is not None:
self.image = image
- rounds = int(na.log2(nprocs))
+ rounds = int(na.log2(self.comm.size))
anprocs = 2**rounds
my_node = tree
my_node_id = 0
my_node.owner = 0
- path = na.binary_repr(anprocs+my_rank)
+ path = na.binary_repr(anprocs+self.comm.rank)
for i in range(rounds):
try:
my_node.left_child.owner = my_node.owner
@@ -1168,7 +1162,7 @@
except:
rounds = i-1
for thisround in range(rounds,0,-1):
- #print my_rank, 'my node', my_node_id
+ #print self.comm.rank, 'my node', my_node_id
parent = my_node.parent
#print parent['split_ax'], parent['split_pos']
if viewpoint[parent.split_ax] <= parent.split_pos:
@@ -1181,10 +1175,10 @@
# mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
# Send the images around
- if front.owner == my_rank:
+ if front.owner == self.comm.rank:
if front.owner == parent.owner:
- mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
- arr2 = PT._recv_array(back.owner, tag=back.owner).reshape(
+ mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+ arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
(self.image.shape[0],self.image.shape[1],self.image.shape[2]))
for i in range(3):
# This is the new way: alpha corresponds to opacity of a given
@@ -1197,18 +1191,17 @@
self.image[:,:,i ] = self.image[:,:,i ] + ta * arr2[:,:,i ]
else:
mylog.debug('Reducing image. You have %i rounds to go in this binary tree' % thisround)
- mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
- PT._send_array(self.image.ravel(), back.owner, tag=my_rank)
-
+ mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
+ self.comm.send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
- if back.owner == my_rank:
+ if back.owner == self.comm.rank:
if front.owner == parent.owner:
- mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
- PT._send_array(self.image.ravel(), front.owner, tag=my_rank)
+ mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
+ self.comm.send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
else:
mylog.debug('Reducing image. You have %i rounds to go in this binary tree' % thisround)
- mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
- arr2 = PT._recv_array(front.owner, tag=front.owner).reshape(
+ mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
+ arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
(self.image.shape[0],self.image.shape[1],self.image.shape[2]))
for i in range(3):
# This is the new way: alpha corresponds to opacity of a given
@@ -1222,7 +1215,7 @@
# image[:,:,i+3] = arr2[:,:,i+3] + ta * image[:,:,i+3]
# Set parent owner to back owner
# my_node = (my_node-1)>>1
- if my_rank == my_node.parent.owner:
+ if self.comm.rank == my_node.parent.owner:
my_node = my_node.parent
else:
break
@@ -1230,8 +1223,8 @@
def store_kd_bricks(self, fn=None):
if fn is None:
fn = '%s_kd_bricks.h5'%self.pf
- if my_rank != 0:
- PT._recv_array(my_rank-1, tag=my_rank-1)
+ if self.comm.rank != 0:
+ self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
f = h5py.File(fn,"a")
for node in self.depth_traverse():
i = node.id
@@ -1243,14 +1236,14 @@
except:
pass
f.close()
- if my_rank != (nprocs-1):
- PT._send_array([0],my_rank+1, tag=my_rank)
+ if self.comm.rank != (self.comm.size-1):
+ self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
def load_kd_bricks(self,fn=None):
if fn is None:
fn = '%s_kd_bricks.h5' % self.pf
- if my_rank != 0:
- PT._recv_array(my_rank-1, tag=my_rank-1)
+ if self.comm.rank != 0:
+ self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
try:
f = h5py.File(fn,"r")
for node in self.depth_traverse():
@@ -1273,8 +1266,8 @@
f.close()
except:
pass
- if my_rank != (nprocs-1):
- PT._send_array([0],my_rank+1, tag=my_rank)
+ if self.comm.rank != (self.comm.size-1):
+ self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
def load_tree(self,fn):
raise NotImplementedError()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -45,3 +45,8 @@
from .xunit import \
Xunit
+
+from .halo_tests import \
+ TestHaloCompositionHashHOP, \
+ TestHaloCompositionHashFOF, \
+ TestHaloCompositionHashPHOP
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -60,7 +60,7 @@
results[field] = []
for ax in range(3):
t = self.pf.h.proj(ax, field)
- results[field].append(t.data)
+ results[field].append(t.field_data)
self.result = results
def compare(self, old_result):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/halo_tests.py
--- a/yt/utilities/answer_testing/halo_tests.py
+++ b/yt/utilities/answer_testing/halo_tests.py
@@ -1,16 +1,20 @@
from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
import pylab
from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+from yt.analysis_modules.halo_finding.api import *
+import hashlib
+import numpy as np
-class TestHaloCount(YTStaticOutputTest):
+# Tests the number of halos returned by the HOP halo finder on a dataset
+class TestHaloCountHOP(YTStaticOutputTest):
threshold = 80.0
def run(self):
- # Find the haloes using vanilla HOP.
- haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
- # We only care about the number of haloes.
- self.result = len(haloes)
+ # Find the halos using vanilla HOP.
+ halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+ # We only care about the number of halos.
+ self.result = len(halos)
def compare(self, old_result):
# The new value should be identical to the old one.
@@ -19,18 +23,53 @@
def plot(self):
return []
-create_test(TestHaloCount, "halo_count_test", threshold=80.0)
+# Tests the number of halos returned by the FOF halo finder on a dataset
+class TestHaloCountFOF(YTStaticOutputTest):
+ link = 0.2
+ padding = 0.02
+
+ def run(self):
+ # Find the halos using FOF.
+ halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False,
+ padding=self.padding)
+ # We only care about the number of halos.
+ self.result = len(halos)
+
+ def compare(self, old_result):
+ # The new value should be identical to the old one.
+ self.compare_value_delta(self.result, old_result, 0)
+
+ def plot(self):
+ return []
+
+# Tests the number of halos returned by the Parallel HOP halo finder on a
+# dataset
+class TestHaloCountPHOP(YTStaticOutputTest):
+ threshold = 80.0
+
+ def run(self):
+ # Find the halos using parallel HOP.
+ halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+ # We only care about the number of halos.
+ self.result = len(halos)
+
+ def compare(self, old_result):
+ # The new value should be identical to the old one.
+ self.compare_value_delta(self.result, old_result, 0)
+
+ def plot(self):
+ return []
class TestHaloComposition(YTStaticOutputTest):
threshold=80.0
def run(self):
- # Find the haloes using vanilla HOP.
- haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+ # Find the halos using vanilla HOP.
+ halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
# The result is a list of the particle IDs, stored
# as sets for easy comparison.
IDs = []
- for halo in haloes:
+ for halo in halos:
IDs.append(set(halo["particle_index"]))
self.result = IDs
@@ -42,7 +81,85 @@
return False
return True
- def plot(self):
- return []
+# Tests the content of the halos returned by the HOP halo finder on a dataset
+# by comparing the hash of the arrays of all the particles contained in each
+# halo. Evidently breaks on parallel runtime. DO NOT USE.
+class TestHaloCompositionHashHOP(YTStaticOutputTest):
+ threshold=80.0
+
+ def run(self):
+ # Find the halos using vanilla HOP.
+ halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+ # The result is a flattened array of the arrays of the particle IDs for
+ # each halo
+ IDs = []
+ for halo in halos:
+ IDs.append(halo["particle_index"])
+ IDs = np.concatenate(IDs)
+ self.result = IDs
+
+ def compare(self, old_result):
+ # All the lists of arrays should be identical. To check this
+ # faster, we take the 256-bit hash of these lists and compare them
+ result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+ old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+ if result_hash == old_result_hash:
+ return True
+ else:
+ return False
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
+# Tests the content of the halos returned by the FOF halo finder on a dataset
+# by comparing the hash of the arrays of all the particles contained in each
+# halo. Evidently breaks on parallel runtime. DO NOT USE.
+class TestHaloCompositionHashFOF(YTStaticOutputTest):
+ link = 0.2
+ padding = 0.02
+
+ def run(self):
+ # Find the halos using vanilla FOF.
+ halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False,
+ padding=self.padding)
+ # The result is a flattened array of the arrays of the particle IDs for
+ # each halo
+ IDs = []
+ for halo in halos:
+ IDs.append(halo["particle_index"])
+ IDs = np.concatenate(IDs)
+ self.result = IDs
+
+ def compare(self, old_result):
+ # All the lists of arrays should be identical. To check this
+ # faster, we take the 256-bit hash of these lists and compare them
+ result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+ old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+ if result_hash == old_result_hash:
+ return True
+ else:
+ return False
+
+# Tests the content of the halos returned by the Parallel HOP halo finder on a
+# dataset by comparing the hash of the arrays of all the particles contained
+# in each halo. Evidently breaks on parallel runtime. DO NOT USE.
+class TestHaloCompositionHashPHOP(YTStaticOutputTest):
+ threshold=80.0
+
+ def run(self):
+ # Find the halos using parallel HOP.
+ halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+ # The result is a flattened array of the arrays of the particle IDs for
+ # each halo
+ IDs = []
+ for halo in halos:
+ IDs.append(halo["particle_index"])
+ IDs = np.concatenate(IDs)
+ self.result = IDs
+
+ def compare(self, old_result):
+ # All the lists of arrays should be identical. To check this
+ # faster, we take the 256-bit hash of these lists and compare them
+ result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+ old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+ if result_hash == old_result_hash:
+ return True
+ else:
+ return False
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -23,7 +23,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
import pylab
from yt.mods import *
from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
@@ -37,14 +37,15 @@
def run(self):
# First we get our flattened projection -- this is the
# Density, px, py, pdx, and pdy
- proj = self.pf.h.proj(self.axis, self.field)
+ proj = self.pf.h.proj(self.axis, self.field,
+ weight_field=self.weight_field)
# Now let's stick it in a buffer
pixelized_proj = self.pixelize(proj, self.field)
# We just want the values, so this can be stored
# independently of the parameter file.
- # The .data attributes strip out everything other than the actual array
+ # The .field_data attributes strip out everything other than the actual array
# values.
- self.result = (proj.data, pixelized_proj.data)
+ self.result = (proj.field_data, pixelized_proj.data)
def compare(self, old_result):
proj, pixelized_proj = self.result
@@ -60,10 +61,92 @@
pylab.clf()
pylab.imshow(self.result[1][self.field],
interpolation='nearest', origin='lower')
- fn = "%s_%s_projection.png" % (self.pf, self.field)
+ fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+ self.weight_field)
pylab.savefig(fn)
return [fn]
+class TestOffAxisProjection(YTStaticOutputTest):
+
+ field = None
+ weight_field = None
+
+ def run(self):
+ # Here proj will just be the data array.
+ proj = off_axis_projection(self.pf,
+ (0.5 * (self.pf.domain_left_edge +
+ self.pf.domain_right_edge)),
+ [1., 1., 1.], 1., 400,
+ self.field, weight=self.weight_field)
+
+ # values.
+ self.result = proj
+
+ def compare(self, old_result):
+ proj = self.result
+ oproj = old_result
+
+ self.compare_array_delta(proj, oproj, 1e-7)
+
+ def plot(self):
+ fn = "%s_%s_%s_off-axis_projection.png" % \
+ (self.pf, self.field, self.weight_field)
+ write_image(self.result, fn)
+ return [fn]
+
+class TestRay(YTStaticOutputTest):
+
+ field = None
+
+ def run(self):
+ na.random.seed(4333)
+ start_point = na.random.random(self.pf.dimensionality) * \
+ (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+ self.pf.domain_left_edge
+ end_point = na.random.random(self.pf.dimensionality) * \
+ (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+ self.pf.domain_left_edge
+
+ # Here proj will just be the data array.
+ ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+ # values.
+ self.result = ray[self.field]
+
+ def compare(self, old_result):
+ ray = self.result
+ oray = old_result
+
+ self.compare_array_delta(ray, oray, 1e-7)
+
+ def plot(self):
+ return
+
+class TestSlice(YTStaticOutputTest):
+
+ field = None
+ axis = None
+
+ def run(self):
+ # Here proj will just be the data array.
+ slice = self.pf.h.slice(self.axis,
+ (0.5 * (self.pf.domain_left_edge +
+ self.pf.domain_right_edge))[self.axis],
+ fields=self.field)
+ # values.
+ self.result = slice.field_data
+
+ def compare(self, old_result):
+ slice = self.result
+ oslice = old_result
+
+ self.compare_data_arrays(slice, oslice)
+
+ def plot(self):
+ fn = "%s_%s_slice.png" % (self.pf, self.field)
+ write_image(self.result[self.field], fn)
+ return [fn]
+
# Now we create all our tests. We are using the create_test
# function, which is a relatively simple function that takes the base class,
# a name, and any parameters that the test requires.
@@ -88,7 +171,7 @@
weight=self.weight)
# The arrays are all stored in a dictionary hanging off the profile
# object
- self.result = p.data._data
+ self.result = p.data.field_data
def compare(self, old_result):
self.compare_data_arrays(
@@ -102,3 +185,21 @@
for field in ["Temperature", "x-velocity"]:
create_test(TestGasDistribution, "profile_density_test_%s" % field,
field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+ x_bins = 128
+ y_bins = 128
+ field_z = "CellMassMsun"
+ weight = None
+ def run(self):
+ # We're NOT going to use the low-level profiling API here,
+ # because we are avoiding the calculations of min/max,
+ # as those should be tested in another test.
+ pc = PlotCollection(self.pf, center=self.sim_center)
+ p = pc.add_phase_object(self.entire_simulation,
+ [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+ weight=self.weight)
+ # The arrays are all stored in a dictionary hanging off the profile
+ # object
+ self.result = p.data.field_data
+
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -23,6 +23,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+import matplotlib
from yt.mods import *
# We first create our dictionary of tests to run. This starts out empty, and
@@ -49,13 +50,16 @@
self.acceptable = acceptable
def __repr__(self):
- return "ValueDelta: Delta %0.5e, max of %0.5e" % (
+ return "ValueDelta: Delta %s, max of %s" % (
self.delta, self.acceptable)
class ArrayDelta(ValueDelta):
def __repr__(self):
- return "ArrayDelta: Delta %0.5e, max of %0.5e" % (
- self.delta, self.acceptable)
+ nabove = len(na.where(self.delta > self.acceptable)[0])
+ return "ArrayDelta: Delta %s, max of %s, acceptable of %s.\n" \
+ "%d of %d points above the acceptable limit" % \
+ (self.delta, self.delta.max(), self.acceptable, nabove,
+ self.delta.size)
class ShapeMismatch(RegressionTestException):
def __init__(self, old_shape, current_shape):
@@ -63,7 +67,7 @@
self.current_shape = current_shape
def __repr__(self):
- return "Shape Mismatch: old_buffer %s, current_buffer %0.5e" % (
+ return "Shape Mismatch: old_buffer %s, current_buffer %s" % (
self.old_shape, self.current_shape)
class RegressionTest(object):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -1,5 +1,5 @@
+import matplotlib
from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
import pylab
from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -23,11 +23,12 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
import os, shelve, cPickle, sys, imp, tempfile
from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
import yt.utilities.cmdln as cmdln
+from yt.funcs import *
from .xunit import Xunit
from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
self._path = os.path.join(path, "results")
else:
self._path = os.path.join(path, "results_%s" % self.id)
- if not os.path.isdir(self._path): os.mkdir(self._path)
+ if not os.path.isdir(self._path):
+ only_on_root(os.mkdir, self._path)
if os.path.isfile(self._path): raise RuntimeError
def _fn(self, tn):
return os.path.join(self._path, tn)
+ @rootonly
def __setitem__(self, test_name, result):
# We have to close our shelf manually,
# as the destructor does not necessarily do this.
@@ -79,7 +82,7 @@
class RegressionTestRunner(object):
def __init__(self, results_id, compare_id = None,
results_path = ".", compare_results_path = ".",
- io_log = "OutputLog"):
+ io_log = "OutputLog", plot_tests = False):
# This test runner assumes it has been launched with the current
# working directory that of the test case itself.
self.io_log = io_log
@@ -92,6 +95,7 @@
self.results = RegressionTestStorage(results_id, path=results_path)
self.plot_list = {}
self.passed_tests = {}
+ self.plot_tests = plot_tests
def run_all_tests(self):
plot_list = []
@@ -126,7 +130,8 @@
print self.id, "Running", test.name,
test.setup()
test.run()
- self.plot_list[test.name] = test.plot()
+ if self.plot_tests:
+ self.plot_list[test.name] = test.plot()
self.results[test.name] = test.result
success, msg = self._compare(test)
if self.old_results is None:
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -393,11 +393,14 @@
print "---"
print
print "This installation CAN be automatically updated."
+ _update_hg(path)
print "Updated successfully."
else:
print
print "YT site-packages not in path, so you must"
- print "update this installation manually."
+ print "update this installation manually by committing and"
+ print "merging your modifications to the code before"
+ print "updating to the newest changeset."
print
@cmdln.option("-u", "--update-source", action="store_true",
@@ -443,7 +446,10 @@
print "Updated successfully."
elif opts.update_source:
print
- print "You have to update this installation yourself."
+ print "YT site-packages not in path, so you must"
+ print "update this installation manually by committing and"
+ print "merging your modifications to the code before"
+ print "updating to the newest changeset."
print
if vstring is not None and opts.outputfile is not None:
open(opts.outputfile, "w").write(vstring)
@@ -567,7 +573,7 @@
else:
p = pc.add_slice(opts.field, opts.axis)
from yt.gui.reason.pannable_map import PannableMapServer
- mapper = PannableMapServer(p.data, opts.field)
+ mapper = PannableMapServer(p.field_data, opts.field)
import yt.utilities.bottle as bottle
bottle.debug(True)
if opts.host is not None:
@@ -637,16 +643,18 @@
virial_quantities=['TotalMassMsun','RadiusMpc'])
# Add profile fields.
- hp.add_profile('CellVolume',weight_field=None,accumulation=True)
- hp.add_profile('TotalMassMsun',weight_field=None,accumulation=True)
- hp.add_profile('Density',weight_field=None,accumulation=False)
- hp.add_profile('Temperature',weight_field='CellMassMsun',accumulation=False)
+ pf = hp.pf
+ all_fields = pf.h.field_list + pf.h.derived_field_list
+ for field, wv, acc in HP.standard_fields:
+ if field not in all_fields: continue
+ hp.add_profile(field, wv, acc)
hp.make_profiles(filename="FilteredQuantities.out")
# Add projection fields.
hp.add_projection('Density',weight_field=None)
hp.add_projection('Temperature',weight_field='Density')
- hp.add_projection('Metallicity',weight_field='Density')
+ if "Metallicity" in all_fields:
+ hp.add_projection('Metallicity',weight_field='Density')
# Make projections for all three axes using the filtered halo list and
# save data to hdf5 files.
@@ -669,7 +677,7 @@
pc_dummy = PlotCollection(pf, center=c)
pr = pc_dummy.add_profile_object(dd, ["Density", "Temperature"],
weight="CellMassMsun")
- ph.modify["line"](pr.data["Density"], pr.data["Temperature"])
+ ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
pc.save()
@cmdln.option("-d", "--desc", action="store",
@@ -1562,7 +1570,7 @@
save_name = "%s"%pf+"_"+field+"_rendering.png"
if not '.png' in save_name:
save_name += '.png'
- if cam._mpi_get_rank() != -1:
+ if cam._par_rank != -1:
write_bitmap(image,save_name)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/parallel_tools/distributed_object_collection.py
--- a/yt/utilities/parallel_tools/distributed_object_collection.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-A simple distributed object mechanism, for storing array-heavy objects.
-Meant to be subclassed.
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2010-2011 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from itertools import izip
-
-import numpy as na
-
-from yt.funcs import *
-
-from .parallel_analysis_interface import ParallelAnalysisInterface
-
-class DistributedObjectCollection(ParallelAnalysisInterface):
- valid = True
-
- def _get_object_info(self):
- pass
-
- def _set_object_info(self):
- pass
-
- def join_lists(self):
- info_dict = self._get_object_info()
- info_dict = self._mpi_catdict(info_dict)
- self._set_object_info(info_dict)
-
- def _collect_objects(self, desired_indices):
- # We figure out which indices belong to which processor,
- # then we pack them up, and we send a list to each processor.
- request_count = []
- owners = self._object_owners[desired_indices]
- mylog.debug("Owner list: %s", na.unique1d(owners))
- # Even if we have a million bricks, this should not take long.
- s = self._mpi_get_size()
- m = self._mpi_get_rank()
- requests = dict( ( (i, []) for i in xrange(s) ) )
- for i, p in izip(desired_indices, owners):
- requests[p].append(i)
- for p in sorted(requests):
- requests[p] = na.array(requests[p], dtype='int64')
- request_count.append(len(requests[p]))
- size = len(request_count)
- mylog.debug("Requesting: %s", request_count)
- request_count = na.array(request_count, dtype='int64')
- # Now we distribute our requests to all the processors.
- # This is two-pass. One to get the length of the arrays. The second
- # pass is to get the actual indices themselves.
- request_count = self._mpi_joindict({m : request_count})
- # Now we have our final array of requests, with arrangement
- # (Nproc,Nproc). First index corresponds to requesting proc, second to
- # sending. So [them,us] = 5 means we owe 5, whereas [us, them] means
- # we are owed.
- send_hooks = []
- dsend_buffers, dsend_hooks = [], []
- recv_hooks, recv_buffers = [], []
- drecv_buffers, drecv_hooks = [], []
- # We post our index-list and data receives from each processor.
- mylog.debug("Posting data buffer receives")
- proc_hooks = {}
- for p, request_from in request_count.items():
- if p == m: continue
- size = request_from[m]
- #if size == 0: continue
- # We post receives of the grids we *asked* for.
- # Note that indices into this are not necessarily processor ids.
- # So we store. This has to go before the appends or it's an
- # off-by-one.
- mylog.debug("Setting up index buffer of size %s for receive from %s",
- size, p)
- proc_hooks[len(drecv_buffers)] = p
- drecv_buffers.append(self._create_buffer(requests[p]))
- drecv_hooks.append(self._mpi_Irecv_double(drecv_buffers[-1], p, 1))
- recv_buffers.append(na.zeros(size, dtype='int64'))
- # Our index list goes on 0, our buffer goes on 1. We know how big
- # the index list will be, now.
- recv_hooks.append(self._mpi_Irecv_long(recv_buffers[-1], p, 0))
- # Send our index lists into hte waiting buffers
- mylog.debug("Sending index lists")
- for p, ind_list in requests.items():
- if p == m: continue
- if len(ind_list) == 0: continue
- # Now, we actually send our index lists.
- send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
- # Now we post receives for all of the data buffers.
- mylog.debug("Sending data")
- for i in self._mpi_Request_Waititer(recv_hooks):
- # We get back the index, which here is identical to the processor
- # number doing the send. At this point, we can post our receives.
- p = proc_hooks[i]
- mylog.debug("Processing from %s", p)
- ind_list = recv_buffers[i]
- dsend_buffers.append(self._create_buffer(ind_list))
- self._pack_buffer(ind_list, dsend_buffers[-1])
- dsend_hooks.append(self._mpi_Isend_double(
- dsend_buffers[-1], p, 1))
- mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
- for i in self._mpi_Request_Waititer(drecv_hooks):
- mylog.debug("Unpacking from %s", proc_hooks[i])
- # Now we have to unpack our buffers
- # Our key into this is actually the request for the processor
- # number.
- p = proc_hooks[i]
- self._unpack_buffer(requests[p], drecv_buffers[i])
- mylog.debug("Finalizing sends: %s", len(dsend_hooks))
- for i in self._mpi_Request_Waititer(dsend_hooks):
- continue
-
- def _create_buffer(self, ind_list):
- pass
-
- def _pack_buffer(self, ind_list):
- pass
-
- def _unpack_buffer(self, ind_list, my_buffer):
- pass
-
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -51,8 +51,8 @@
if parallel_capable:
mylog.info("Parallel computation enabled: %s / %s",
MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
- ytcfg["yt","__parallel_rank"] = str(MPI.COMM_WORLD.rank)
- ytcfg["yt","__parallel_size"] = str(MPI.COMM_WORLD.size)
+ ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+ ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
ytcfg["yt","__parallel"] = "True"
if exe_name == "embed_enzo" or \
("_parallel" in dir(sys) and sys._parallel == True):
@@ -82,6 +82,39 @@
else:
parallel_capable = False
+# Set up translation table
+if parallel_capable:
+ dtype_names = dict(
+ float32 = MPI.FLOAT,
+ float64 = MPI.DOUBLE,
+ int32 = MPI.INT,
+ int64 = MPI.LONG
+ )
+ op_names = dict(
+ sum = MPI.SUM,
+ min = MPI.MIN,
+ max = MPI.MAX
+ )
+
+else:
+ dtype_names = dict(
+ float32 = "MPI.FLOAT",
+ float64 = "MPI.DOUBLE",
+ int32 = "MPI.INT",
+ int64 = "MPI.LONG"
+ )
+ op_names = dict(
+ sum = "MPI.SUM",
+ min = "MPI.MIN",
+ max = "MPI.MAX"
+ )
+
+# Because the dtypes will == correctly but do not hash the same, we need this
+# function for dictionary access.
+def get_mpi_type(dtype):
+ for dt, val in dtype_names.items():
+ if dt == dtype: return val
+
class ObjectIterator(object):
"""
This is a generalized class that accepts a list of objects and then
@@ -96,7 +129,7 @@
if hasattr(gs[0], 'proc_num'):
# This one sort of knows about MPI, but not quite
self._objs = [g for g in gs if g.proc_num ==
- ytcfg.getint('yt','__parallel_rank')]
+ ytcfg.getint('yt','__topcomm_parallel_rank')]
self._use_all = True
else:
self._objs = gs
@@ -182,9 +215,9 @@
output; otherwise, the function gets called. Used as a decorator.
"""
@wraps(func)
- def passage(self, data):
+ def passage(self, data, **kwargs):
if not self._distributed: return data
- return func(self, data)
+ return func(self, data, **kwargs)
return passage
def parallel_blocking_call(func):
@@ -211,13 +244,11 @@
"""
@wraps(f1)
def in_order(*args, **kwargs):
- MPI.COMM_WORLD.Barrier()
if MPI.COMM_WORLD.rank == 0:
f1(*args, **kwargs)
MPI.COMM_WORLD.Barrier()
if MPI.COMM_WORLD.rank != 0:
f2(*args, **kwargs)
- MPI.COMM_WORLD.Barrier()
if not parallel_capable: return f1
return in_order
@@ -243,13 +274,483 @@
if parallel_capable: return root_only
return func
-class ParallelAnalysisInterface(object):
+class Workgroup(object):
+ def __init__(self, size, ranks, comm, name):
+ self.size = size
+ self.ranks = ranks
+ self.comm = comm
+ self.name = name
+
+class ProcessorPool(object):
+ comm = None
+ size = None
+ ranks = None
+ available_ranks = None
+ tasks = None
+ workgroups = []
+ def __init__(self):
+ self.comm = communication_system.communicators[-1]
+ self.size = self.comm.size
+ self.ranks = range(self.size)
+ self.available_ranks = range(self.size)
+
+ def add_workgroup(self, size=None, ranks=None, name=None):
+ if size is None:
+ size = len(self.available_ranks)
+ if len(self.available_ranks) < size:
+ print 'Not enough resources available'
+ raise RuntimeError
+ if ranks is None:
+ ranks = [self.available_ranks.pop(0) for i in range(size)]
+
+ # Default name to the workgroup number.
+ if name is None:
+ name = string(len(workgroups))
+
+ group = self.comm.comm.Get_group().Incl(ranks)
+ new_comm = self.comm.comm.Create(group)
+ if self.comm.rank in ranks:
+ communication_system.communicators.append(Communicator(new_comm))
+ self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
+
+ def free_workgroup(self, workgroup):
+ for i in workgroup.ranks:
+ if self.comm.rank == i:
+ communication_system.communicators.pop()
+ self.available_ranks.append(i)
+ del workgroup
+ self.available_ranks.sort()
+
+ def free_all(self):
+ for wg in self.workgroups:
+ self.free_workgroup(wg)
+
+class ResultsStorage(object):
+ slots = ['result', 'result_id']
+ result = None
+ result_id = None
+
+def parallel_objects(objects, njobs, storage = None):
+ if not parallel_capable: raise RuntimeError
+ my_communicator = communication_system.communicators[-1]
+ my_size = my_communicator.size
+ my_rank = my_communicator.rank
+ all_new_comms = na.array_split(na.arange(my_size), njobs)
+ for i,comm_set in enumerate(all_new_comms):
+ if my_rank in comm_set:
+ my_new_id = i
+ break
+ communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+ obj_ids = na.arange(len(objects))
+
+ to_share = {}
+ for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+ if storage is not None:
+ rstore = ResultsStorage()
+ rstore.result_id = result_id
+ yield rstore, obj
+ to_share[rstore.result_id] = rstore.result
+ else:
+ yield obj
+ communication_system.communicators.pop()
+ if storage is not None:
+ # Now we have to broadcast it
+ new_storage = my_communicator.par_combine_object(
+ to_share, datatype = 'dict', op = 'join')
+ storage.update(new_storage)
+
+class CommunicationSystem(object):
+ communicators = []
+
+ def __init__(self):
+ if parallel_capable:
+ self.communicators.append(Communicator(MPI.COMM_WORLD))
+ else:
+ self.communicators.append(Communicator(None))
+ def push(self, size=None, ranks=None):
+ raise NotImplementedError
+ if size is None:
+ size = len(available_ranks)
+ if len(available_ranks) < size:
+ raise RuntimeError
+ if ranks is None:
+ ranks = [available_ranks.pop() for i in range(size)]
+
+ group = MPI.COMM_WORLD.Group.Incl(ranks)
+ new_comm = MPI.COMM_WORLD.Create(group)
+ self.communicators.append(Communicator(new_comm))
+ return new_comm
+
+ def push_with_ids(self, ids):
+ group = self.communicators[-1].comm.Get_group().Incl(ids)
+ new_comm = self.communicators[-1].comm.Create(group)
+ from yt.config import ytcfg
+ ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
+ ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
+ self.communicators.append(Communicator(new_comm))
+ return new_comm
+
+ def pop(self):
+ self.communicators.pop()
+
+class Communicator(object):
+ comm = None
+ _grids = None
+ _distributed = None
+ __tocast = 'c'
+
+ def __init__(self, comm=None):
+ self.comm = comm
+ self._distributed = comm is not None and self.comm.size > 1
"""
This is an interface specification providing several useful utility
functions for analyzing something in parallel.
"""
+
+ def barrier(self):
+ if not self._distributed: return
+ mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
+ self.comm.Barrier()
+
+ def mpi_exit_test(self, data=False):
+ # data==True -> exit. data==False -> no exit
+ mine, statuses = self.mpi_info_dict(data)
+ if True in statuses.values():
+ raise RuntimeError("Fatal error. Exiting.")
+ return None
+
+ @parallel_passthrough
+ def par_combine_object(self, data, op, datatype = None):
+ # op can be chosen from:
+ # cat
+ # join
+ # data is selected to be of types:
+ # na.ndarray
+ # dict
+ # data field dict
+ if datatype is not None:
+ pass
+ elif isinstance(data, types.DictType):
+ datatype == "dict"
+ elif isinstance(data, na.ndarray):
+ datatype == "array"
+ elif isinstance(data, types.ListType):
+ datatype == "list"
+ # Now we have our datatype, and we conduct our operation
+ if datatype == "dict" and op == "join":
+ if self.comm.rank == 0:
+ for i in range(1,self.comm.size):
+ data.update(self.comm.recv(source=i, tag=0))
+ else:
+ self.comm.send(data, dest=0, tag=0)
+ data = self.comm.bcast(data, root=0)
+ return data
+ elif datatype == "dict" and op == "cat":
+ field_keys = data.keys()
+ field_keys.sort()
+ size = data[field_keys[0]].shape[-1]
+ sizes = na.zeros(self.comm.size, dtype='int64')
+ outsize = na.array(size, dtype='int64')
+ self.comm.Allgather([outsize, 1, MPI.LONG],
+ [sizes, 1, MPI.LONG] )
+ # This nested concatenate is to get the shapes to work out correctly;
+ # if we just add [0] to sizes, it will broadcast a summation, not a
+ # concatenation.
+ offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+ arr_size = self.comm.allreduce(size, op=MPI.SUM)
+ for key in field_keys:
+ dd = data[key]
+ rv = self.alltoallv_array(dd, arr_size, offsets, sizes)
+ data[key] = rv
+ return data
+ elif datatype == "array" and op == "cat":
+ if data is None:
+ ncols = -1
+ size = 0
+ dtype = 'float64'
+ mylog.info('Warning: Array passed to par_combine_object was None. Setting dtype to float64. This may break things!')
+ else:
+ dtype = data.dtype
+ if len(data) == 0:
+ ncols = -1
+ size = 0
+ elif len(data.shape) == 1:
+ ncols = 1
+ size = data.shape[0]
+ else:
+ ncols, size = data.shape
+ ncols = self.comm.allreduce(ncols, op=MPI.MAX)
+ if ncols == 0:
+ data = na.zeros(0, dtype=dtype) # This only works for
+ size = data.shape[-1]
+ sizes = na.zeros(self.comm.size, dtype='int64')
+ outsize = na.array(size, dtype='int64')
+ self.comm.Allgather([outsize, 1, MPI.LONG],
+ [sizes, 1, MPI.LONG] )
+ # This nested concatenate is to get the shapes to work out correctly;
+ # if we just add [0] to sizes, it will broadcast a summation, not a
+ # concatenation.
+ offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+ arr_size = self.comm.allreduce(size, op=MPI.SUM)
+ data = self.alltoallv_array(data, arr_size, offsets, sizes)
+ return data
+ elif datatype == "list" and op == "cat":
+ if self.comm.rank == 0:
+ data = self.__mpi_recvlist(data)
+ else:
+ self.comm.send(data, dest=0, tag=0)
+ mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+ data = self.comm.bcast(data, root=0)
+ return data
+ raise NotImplementedError
+
+ @parallel_passthrough
+ def mpi_bcast_pickled(self, data):
+ data = self.comm.bcast(data, root=0)
+ return data
+
+ def preload(self, grids, fields, io_handler):
+ # This will preload if it detects we are parallel capable and
+ # if so, we load *everything* that we need. Use with some care.
+ mylog.debug("Preloading %s from %s grids", fields, len(grids))
+ if not self._distributed: return
+ io_handler.preload(grids, fields)
+
+ @parallel_passthrough
+ def mpi_allreduce(self, data, dtype=None, op='sum'):
+ op = op_names[op]
+ if isinstance(data, na.ndarray) and data.dtype != na.bool:
+ if dtype is None:
+ dtype = data.dtype
+ if dtype != data.dtype:
+ data = data.astype(dtype)
+ temp = data.copy()
+ self.comm.Allreduce([temp,get_mpi_type(dtype)],
+ [data,get_mpi_type(dtype)], op)
+ return data
+ else:
+ # We use old-school pickling here on the assumption the arrays are
+ # relatively small ( < 1e7 elements )
+ return self.comm.allreduce(data, op)
+
+ ###
+ # Non-blocking stuff.
+ ###
+
+ def mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
+ if not self._distributed: return -1
+ if dtype is None: dtype = data.dtype
+ mpi_type = get_mpi_type(dtype)
+ return self.comm.Irecv([data, mpi_type], source, tag)
+
+ def mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+ if not self._distributed: return -1
+ if dtype is None: dtype = data.dtype
+ mpi_type = get_mpi_type(dtype)
+ return self.comm.Isend([data, mpi_type], dest, tag)
+
+ def mpi_Request_Waitall(self, hooks):
+ if not self._distributed: return
+ MPI.Request.Waitall(hooks)
+
+ def mpi_Request_Waititer(self, hooks):
+ for i in xrange(len(hooks)):
+ req = MPI.Request.Waitany(hooks)
+ yield req
+
+ def mpi_Request_Testall(self, hooks):
+ """
+ This returns False if any of the request hooks are un-finished,
+ and True if they are all finished.
+ """
+ if not self._distributed: return True
+ return MPI.Request.Testall(hooks)
+
+ ###
+ # End non-blocking stuff.
+ ###
+
+ ###
+ # Parallel rank and size properties.
+ ###
+
+ @property
+ def size(self):
+ if not self._distributed: return 1
+ return self.comm.size
+
+ @property
+ def rank(self):
+ if not self._distributed: return 0
+ return self.comm.rank
+
+ def mpi_info_dict(self, info):
+ if not self._distributed: return 0, {0:info}
+ data = None
+ if self.comm.rank == 0:
+ data = {0:info}
+ for i in range(1, self.comm.size):
+ data[i] = self.comm.recv(source=i, tag=0)
+ else:
+ self.comm.send(info, dest=0, tag=0)
+ mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+ data = self.comm.bcast(data, root=0)
+ return self.comm.rank, data
+
+ def claim_object(self, obj):
+ if not self._distributed: return
+ obj._owner = self.comm.rank
+ obj._distributed = True
+
+ def do_not_claim_object(self, obj):
+ if not self._distributed: return
+ obj._owner = -1
+ obj._distributed = True
+
+ def write_on_root(self, fn):
+ if not self._distributed: return open(fn, "w")
+ if self.comm.rank == 0:
+ return open(fn, "w")
+ else:
+ return cStringIO.StringIO()
+
+ def get_filename(self, prefix, rank=None):
+ if not self._distributed: return prefix
+ if rank == None:
+ return "%s_%04i" % (prefix, self.comm.rank)
+ else:
+ return "%s_%04i" % (prefix, rank)
+
+ def is_mine(self, obj):
+ if not obj._distributed: return True
+ return (obj._owner == self.comm.rank)
+
+ def send_quadtree(self, target, buf, tgd, args):
+ sizebuf = na.zeros(1, 'int64')
+ sizebuf[0] = buf[0].size
+ self.comm.Send([sizebuf, MPI.LONG], dest=target)
+ self.comm.Send([buf[0], MPI.INT], dest=target)
+ self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
+ self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
+
+ def recv_quadtree(self, target, tgd, args):
+ sizebuf = na.zeros(1, 'int64')
+ self.comm.Recv(sizebuf, source=target)
+ buf = [na.empty((sizebuf[0],), 'int32'),
+ na.empty((sizebuf[0], args[2]),'float64'),
+ na.empty((sizebuf[0],),'float64')]
+ self.comm.Recv([buf[0], MPI.INT], source=target)
+ self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
+ self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
+ return buf
+
+ @parallel_passthrough
+ def merge_quadtree_buffers(self, qt):
+ # This is a modified version of pairwise reduction from Lisandro Dalcin,
+ # in the reductions demo of mpi4py
+ size = self.comm.size
+ rank = self.comm.rank
+
+ mask = 1
+
+ args = qt.get_args() # Will always be the same
+ tgd = na.array([args[0], args[1]], dtype='int64')
+ sizebuf = na.zeros(1, 'int64')
+
+ while mask < size:
+ if (mask & rank) != 0:
+ target = (rank & ~mask) % size
+ #print "SENDING FROM %02i to %02i" % (rank, target)
+ buf = qt.tobuffer()
+ self.send_quadtree(target, buf, tgd, args)
+ #qt = self.recv_quadtree(target, tgd, args)
+ else:
+ target = (rank | mask)
+ if target < size:
+ #print "RECEIVING FROM %02i on %02i" % (target, rank)
+ buf = self.recv_quadtree(target, tgd, args)
+ qto = QuadTree(tgd, args[2])
+ qto.frombuffer(*buf)
+ merge_quadtrees(qt, qto)
+ del qto
+ #self.send_quadtree(target, qt, tgd, args)
+ mask <<= 1
+
+ if rank == 0:
+ buf = qt.tobuffer()
+ sizebuf[0] = buf[0].size
+ self.comm.Bcast([sizebuf, MPI.LONG], root=0)
+ if rank != 0:
+ buf = [na.empty((sizebuf[0],), 'int32'),
+ na.empty((sizebuf[0], args[2]),'float64'),
+ na.empty((sizebuf[0],),'float64')]
+ self.comm.Bcast([buf[0], MPI.INT], root=0)
+ self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
+ self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
+ self.refined = buf[0]
+ if rank != 0:
+ qt = QuadTree(tgd, args[2])
+ qt.frombuffer(*buf)
+ return qt
+
+
+ def send_array(self, arr, dest, tag = 0):
+ if not isinstance(arr, na.ndarray):
+ self.comm.send((None,None), dest=dest, tag=tag)
+ self.comm.send(arr, dest=dest, tag=tag)
+ return
+ tmp = arr.view(self.__tocast) # Cast to CHAR
+ # communicate type and shape
+ self.comm.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
+ self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
+ del tmp
+
+ def recv_array(self, source, tag = 0):
+ dt, ne = self.comm.recv(source=source, tag=tag)
+ if dt is None and ne is None:
+ return self.comm.recv(source=source, tag=tag)
+ arr = na.empty(ne, dtype=dt)
+ tmp = arr.view(self.__tocast)
+ self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
+ return arr
+
+ def alltoallv_array(self, send, total_size, offsets, sizes):
+ if len(send.shape) > 1:
+ recv = []
+ for i in range(send.shape[0]):
+ recv.append(self.alltoallv_array(send[i,:].copy(),
+ total_size, offsets, sizes))
+ recv = na.array(recv)
+ return recv
+ offset = offsets[self.comm.rank]
+ tmp_send = send.view(self.__tocast)
+ recv = na.empty(total_size, dtype=send.dtype)
+ recv[offset:offset+send.size] = send[:]
+ dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
+ roff = [off * dtr for off in offsets]
+ rsize = [siz * dtr for siz in sizes]
+ tmp_recv = recv.view(self.__tocast)
+ self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
+ (tmp_recv, (rsize, roff), MPI.CHAR))
+ return recv
+
+communication_system = CommunicationSystem()
+if parallel_capable:
+ ranks = na.arange(MPI.COMM_WORLD.size)
+ communication_system.push_with_ids(ranks)
+
+class ParallelAnalysisInterface(object):
+ comm = None
_grids = None
- _distributed = parallel_capable
+ _distributed = None
+
+ def __init__(self, comm = None):
+ if comm is None:
+ self.comm = communication_system.communicators[-1]
+ else:
+ self.comm = comm
+ self._grids = self.comm._grids
+ self._distributed = self.comm._distributed
def _get_objs(self, attr, *args, **kwargs):
if self._distributed:
@@ -270,19 +771,28 @@
return ParallelObjectIterator(self, True, attr='_grids')
return ObjectIterator(self, True, attr='_grids')
+ def get_dependencies(self, fields):
+ deps = []
+ fi = self.pf.field_info
+ for field in fields:
+ deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
+ return list(set(deps))
+
def _initialize_parallel(self):
pass
def _finalize_parallel(self):
pass
- def _partition_hierarchy_2d(self, axis):
+
+ def partition_hierarchy_2d(self, axis):
if not self._distributed:
- return False, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
+ return False, self.hierarchy.grid_collection(self.center,
+ self.hierarchy.grids)
xax, yax = x_dict[axis], y_dict[axis]
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
- mi = MPI.COMM_WORLD.rank
+ cc = MPI.Compute_dims(self.comm.size, 2)
+ mi = self.comm.rank
cx, cy = na.unravel_index(mi, cc)
x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
@@ -299,36 +809,7 @@
reg = self.hierarchy.region_strict(self.center, LE, RE)
return True, reg
- def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
- box_vectors, resolution = (1.0, 1.0)):
- if not self._distributed:
- ib = self.hierarchy.inclined_box(origin, box_vectors)
- return False, ib, resolution
- # We presuppose that unit_vectors is already unitary. If it's not,
- # caveat emptor.
- uv = na.array(unit_vectors)
- inv_mat = na.linalg.pinv(uv)
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
- mi = MPI.COMM_WORLD.rank
- cx, cy = na.unravel_index(mi, cc)
- resolution = (1.0/cc[0], 1.0/cc[1])
- # We are rotating with respect to the *origin*, not the back center,
- # so we go from 0 .. width.
- px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
- py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
- nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
- nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
- nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
- nbox_vectors = na.array(
- [unit_vectors[0] * widths[0]/cc[0],
- unit_vectors[1] * widths[1]/cc[1],
- unit_vectors[2] * widths[2]],
- dtype='float64')
- norigin = na.array([nxo, nyo, nzo])
- box = self.hierarchy.inclined_box(norigin, nbox_vectors)
- return True, box, resolution
-
- def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
+ def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
# We need to establish if we're looking at a subvolume, in which case
# we *do* want to pad things.
@@ -350,15 +831,15 @@
# grid that belongs to this processor.
grids = self.pf.h.select_grids(0)
root_grids = [g for g in grids
- if g.proc_num == MPI.COMM_WORLD.rank]
+ if g.proc_num == self.comm.rank]
if len(root_grids) != 1: raise RuntimeError
#raise KeyError
LE = root_grids[0].LeftEdge
RE = root_grids[0].RightEdge
return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
- mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+ cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+ mi = self.comm.rank % (self.comm.size / rank_ratio)
cx, cy, cz = na.unravel_index(mi, cc)
x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -374,7 +855,7 @@
return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
- def _partition_region_3d(self, left_edge, right_edge, padding=0.0,
+ def partition_region_3d(self, left_edge, right_edge, padding=0.0,
rank_ratio = 1):
"""
Given a region, it subdivides it into smaller regions for parallel
@@ -384,8 +865,8 @@
if not self._distributed:
return LE, RE, re
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
- mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+ cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+ mi = self.comm.rank % (self.comm.size / rank_ratio)
cx, cy, cz = na.unravel_index(mi, cc)
x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -401,7 +882,7 @@
return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
- def _partition_hierarchy_3d_bisection_list(self):
+ def partition_hierarchy_3d_bisection_list(self):
"""
Returns an array that is used to drive _partition_hierarchy_3d_bisection,
below.
@@ -419,8 +900,8 @@
i += 1
return [n]
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
- si = MPI.COMM_WORLD.size
+ cc = MPI.Compute_dims(self.comm.size, 3)
+ si = self.comm.size
factors = factor(si)
xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
@@ -443,935 +924,4 @@
break
nextdim = (nextdim + 1) % 3
return cuts
-
-
- def _partition_hierarchy_3d_bisection(self, axis, bins, counts, top_bounds = None,\
- old_group = None, old_comm = None, cut=None, old_cc=None):
- """
- Partition the volume into evenly weighted subvolumes using the distribution
- in counts. The bisection happens in the MPI communicator group old_group.
- You may need to set "MPI_COMM_MAX" and "MPI_GROUP_MAX" environment
- variables.
- """
- counts = counts.astype('int64')
- if not self._distributed:
- LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
- return False, LE, RE, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
-
- # First time through the world is the current group.
- if old_group == None or old_comm == None:
- old_group = MPI.COMM_WORLD.Get_group()
- old_comm = MPI.COMM_WORLD
-
- # Figure out the gridding based on the deepness of cuts.
- if old_cc is None:
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
- else:
- cc = old_cc
- cc[cut[0]] /= cut[1]
- # Set the boundaries of the full bounding box for this group.
- if top_bounds == None:
- LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
- else:
- LE, RE = top_bounds
-
- ra = old_group.Get_rank() # In this group, not WORLD, unless it's the first time.
-
- # First find the total number of particles in my group.
- parts = old_comm.allreduce(int(counts.sum()), op=MPI.SUM)
- # Now the full sum in the bins along this axis in this group.
- full_counts = na.empty(counts.size, dtype='int64')
- old_comm.Allreduce([counts, MPI.LONG], [full_counts, MPI.LONG], op=MPI.SUM)
- # Find the bin that passes the cut points.
- midpoints = [LE[axis]]
- sum = 0
- bin = 0
- for step in xrange(1,cut[1]):
- while sum < ((parts*step)/cut[1]):
- lastsum = sum
- sum += full_counts[bin]
- bin += 1
- # Bin edges
- left_edge = bins[bin-1]
- right_edge = bins[bin]
- # Find a better approx of the midpoint cut line using a linear approx.
- a = float(sum - lastsum) / (right_edge - left_edge)
- midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
- #midpoint = (left_edge + right_edge) / 2.
- midpoints.append(RE[axis])
- # Now we need to split the members of this group into chunks.
- # The values that go into the _ranks are the ranks of the tasks
- # in *this* communicator group, which go zero to size - 1. They are not
- # the same as the global ranks!
- groups = {}
- ranks = {}
- old_group_size = old_group.Get_size()
- for step in xrange(cut[1]):
- groups[step] = na.arange(step*old_group_size/cut[1], (step+1)*old_group_size/cut[1])
- # [ (start, stop, step), ]
- ranks[step] = [ (groups[step][0], groups[step][-1], 1), ]
-
- # Based on where we are, adjust our LE or RE, depending on axis. At the
- # same time assign the new MPI group membership.
- for step in xrange(cut[1]):
- if ra in groups[step]:
- LE[axis] = midpoints[step]
- RE[axis] = midpoints[step+1]
- new_group = old_group.Range_incl(ranks[step])
- new_comm = old_comm.Create(new_group)
-
- if old_cc is not None:
- old_group.Free()
- old_comm.Free()
-
- new_top_bounds = (LE,RE)
-
- # Using the new boundaries, regrid.
- mi = new_comm.rank
- cx, cy, cz = na.unravel_index(mi, cc)
- x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
- y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
- z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
- my_LE = na.array([x[0], y[0], z[0]], dtype='float64')
- my_RE = na.array([x[1], y[1], z[1]], dtype='float64')
-
- # Return a new subvolume and associated stuff.
- return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
- self.hierarchy.region_strict(self.center, my_LE, my_RE)
-
- def _mpi_find_neighbor_3d(self, shift):
- """ Given a shift array, 1x3 long, find the task ID
- of that neighbor. For example, shift=[1,0,0] finds the neighbor
- immediately to the right in the positive x direction. Each task
- has 26 neighbors, of which some may be itself depending on the number
- and arrangement of tasks.
- """
- if not self._distributed: return 0
- shift = na.array(shift)
- cc = na.array(MPI.Compute_dims(MPI.COMM_WORLD.size, 3))
- mi = MPI.COMM_WORLD.rank
- si = MPI.COMM_WORLD.size
- # store some facts about myself
- mi_cx,mi_cy,mi_cz = na.unravel_index(mi,cc)
- mi_ar = na.array([mi_cx,mi_cy,mi_cz])
- # these are identical on all tasks
- # should these be calculated once and stored?
- #dLE = na.empty((si,3), dtype='float64') # positions not needed yet...
- #dRE = na.empty((si,3), dtype='float64')
- tasks = na.empty((cc[0],cc[1],cc[2]), dtype='int64')
-
- for i in range(si):
- cx,cy,cz = na.unravel_index(i,cc)
- tasks[cx,cy,cz] = i
- #x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
- #y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
- #z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
- #dLE[i, :] = na.array([x[0], y[0], z[0]], dtype='float64')
- #dRE[i, :] = na.array([x[1], y[1], z[1]], dtype='float64')
-
- # find the neighbor
- ne = (mi_ar + shift) % cc
- ne = tasks[ne[0],ne[1],ne[2]]
- return ne
-
-
- def _barrier(self):
- if not self._distributed: return
- mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
- MPI.COMM_WORLD.Barrier()
-
- def _mpi_exit_test(self, data=False):
- # data==True -> exit. data==False -> no exit
- mine, statuses = self._mpi_info_dict(data)
- if True in statuses.values():
- raise RuntimeError("Fatal error. Exiting.")
- return None
-
- @parallel_passthrough
- def _mpi_catrgb(self, data):
- self._barrier()
- data, final = data
- if MPI.COMM_WORLD.rank == 0:
- cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
- nsize = final[0]/cc[0], final[1]/cc[1]
- new_image = na.zeros((final[0], final[1], 6), dtype='float64')
- new_image[0:nsize[0],0:nsize[1],:] = data[:]
- for i in range(1,MPI.COMM_WORLD.size):
- cy, cx = na.unravel_index(i, cc)
- mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
- i, nsize[0]*cx,nsize[0]*(cx+1),
- nsize[1]*cy,nsize[1]*(cy+1))
- buf = _recv_array(source=i, tag=0).reshape(
- (nsize[0],nsize[1],6))
- new_image[nsize[0]*cy:nsize[0]*(cy+1),
- nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
- data = new_image
- else:
- _send_array(data.ravel(), dest=0, tag=0)
- data = MPI.COMM_WORLD.bcast(data)
- return (data, final)
-
- @parallel_passthrough
- def _mpi_catdict(self, data):
- field_keys = data.keys()
- field_keys.sort()
- size = data[field_keys[0]].shape[-1]
- sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
- outsize = na.array(size, dtype='int64')
- MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
- [sizes, 1, MPI.LONG] )
- # This nested concatenate is to get the shapes to work out correctly;
- # if we just add [0] to sizes, it will broadcast a summation, not a
- # concatenation.
- offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
- arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
- for key in field_keys:
- dd = data[key]
- rv = _alltoallv_array(dd, arr_size, offsets, sizes)
- data[key] = rv
- return data
-
- @parallel_passthrough
- def _mpi_joindict(self, data):
- #self._barrier()
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1,MPI.COMM_WORLD.size):
- data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
- else:
- MPI.COMM_WORLD.send(data, dest=0, tag=0)
- data = MPI.COMM_WORLD.bcast(data, root=0)
- #self._barrier()
- return data
-
- @parallel_passthrough
- def _mpi_joindict_unpickled_double(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1,MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- keys = na.empty(size, dtype='int64')
- values = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
- MPI.COMM_WORLD.Recv([values, MPI.DOUBLE], i, 0)
- for i,key in enumerate(keys):
- data[key] = values[i]
- # Now convert root's data to arrays.
- size = len(data)
- root_keys = na.empty(size, dtype='int64')
- root_values = na.empty(size, dtype='float64')
- count = 0
- for key in data:
- root_keys[count] = key
- root_values[count] = data[key]
- count += 1
- else:
- MPI.COMM_WORLD.send(len(data), 0, 0)
- keys = na.empty(len(data), dtype='int64')
- values = na.empty(len(data), dtype='float64')
- count = 0
- for key in data:
- keys[count] = key
- values[count] = data[key]
- count += 1
- MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
- MPI.COMM_WORLD.Send([values, MPI.DOUBLE], 0, 0)
- # Now send it back as arrays.
- size = MPI.COMM_WORLD.bcast(size, root=0)
- if MPI.COMM_WORLD.rank != 0:
- del keys, values
- root_keys = na.empty(size, dtype='int64')
- root_values = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
- MPI.COMM_WORLD.Bcast([root_values, MPI.DOUBLE], root=0)
- # Convert back to a dict.
- del data
- data = dict(itertools.izip(root_keys, root_values))
- return data
-
- @parallel_passthrough
- def _mpi_joindict_unpickled_long(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1,MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- keys = na.empty(size, dtype='int64')
- values = na.empty(size, dtype='int64')
- MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
- MPI.COMM_WORLD.Recv([values, MPI.LONG], i, 0)
- for i,key in enumerate(keys):
- data[key] = values[i]
- # Now convert root's data to arrays.
- size = len(data)
- root_keys = na.empty(size, dtype='int64')
- root_values = na.empty(size, dtype='int64')
- count = 0
- for key in data:
- root_keys[count] = key
- root_values[count] = data[key]
- count += 1
- else:
- MPI.COMM_WORLD.send(len(data), 0, 0)
- keys = na.empty(len(data), dtype='int64')
- values = na.empty(len(data), dtype='int64')
- count = 0
- for key in data:
- keys[count] = key
- values[count] = data[key]
- count += 1
- MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
- MPI.COMM_WORLD.Send([values, MPI.LONG], 0, 0)
- # Now send it back as arrays.
- size = MPI.COMM_WORLD.bcast(size, root=0)
- if MPI.COMM_WORLD.rank != 0:
- del keys, values
- root_keys = na.empty(size, dtype='int64')
- root_values = na.empty(size, dtype='int64')
- MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
- MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
- # Convert back to a dict.
- del data
- data = dict(itertools.izip(root_keys,root_values))
- return data
-
- @parallel_passthrough
- def _mpi_concatenate_array_long(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1, MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- new_data = na.empty(size, dtype='int64')
- MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
- data = na.concatenate((data, new_data))
- size = data.size
- del new_data
- else:
- MPI.COMM_WORLD.send(data.size, 0, 0)
- MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
- # Now we distribute the full array.
- size = MPI.COMM_WORLD.bcast(size, root=0)
- if MPI.COMM_WORLD.rank != 0:
- del data
- data = na.empty(size, dtype='int64')
- MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
- return data
-
- @parallel_passthrough
- def _mpi_concatenate_array_double(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1, MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- new_data = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
- data = na.concatenate((data, new_data))
- size = data.size
- del new_data
- else:
- MPI.COMM_WORLD.send(data.size, 0, 0)
- MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
- # Now we distribute the full array.
- size = MPI.COMM_WORLD.bcast(size, root=0)
- if MPI.COMM_WORLD.rank != 0:
- del data
- data = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
- return data
-
- @parallel_passthrough
- def _mpi_concatenate_array_on_root_double(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1, MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- new_data = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
- data = na.concatenate((data, new_data))
- else:
- MPI.COMM_WORLD.send(data.size, 0, 0)
- MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
- return data
-
- @parallel_passthrough
- def _mpi_concatenate_array_on_root_int(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1, MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- new_data = na.empty(size, dtype='int32')
- MPI.COMM_WORLD.Recv([new_data, MPI.INT], i, 0)
- data = na.concatenate((data, new_data))
- else:
- MPI.COMM_WORLD.send(data.size, 0, 0)
- MPI.COMM_WORLD.Send([data, MPI.INT], 0, 0)
- return data
-
- @parallel_passthrough
- def _mpi_concatenate_array_on_root_long(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1, MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- new_data = na.empty(size, dtype='int64')
- MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
- data = na.concatenate((data, new_data))
- else:
- MPI.COMM_WORLD.send(data.size, 0, 0)
- MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
- return data
-
- @parallel_passthrough
- def _mpi_minimum_array_long(self, data):
- """
- Specifically for parallelHOP. For the identical array on each task,
- it merges the arrays together, taking the lower value at each index.
- """
- self._barrier()
- size = data.size # They're all the same size, of course
- if MPI.COMM_WORLD.rank == 0:
- new_data = na.empty(size, dtype='int64')
- for i in range(1, MPI.COMM_WORLD.size):
- MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
- data = na.minimum(data, new_data)
- del new_data
- else:
- MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
- # Redistribute from root
- MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
- return data
-
- @parallel_passthrough
- def _mpi_bcast_long_dict_unpickled(self, data):
- self._barrier()
- size = 0
- if MPI.COMM_WORLD.rank == 0:
- size = len(data)
- size = MPI.COMM_WORLD.bcast(size, root=0)
- root_keys = na.empty(size, dtype='int64')
- root_values = na.empty(size, dtype='int64')
- if MPI.COMM_WORLD.rank == 0:
- count = 0
- for key in data:
- root_keys[count] = key
- root_values[count] = data[key]
- count += 1
- MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
- MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
- if MPI.COMM_WORLD.rank != 0:
- data = {}
- for i,key in enumerate(root_keys):
- data[key] = root_values[i]
- return data
-
- @parallel_passthrough
- def _mpi_maxdict(self, data):
- """
- For each key in data, find the maximum value across all tasks, and
- then broadcast it back.
- """
- self._barrier()
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1,MPI.COMM_WORLD.size):
- temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
- for key in temp_data:
- try:
- old_value = data[key]
- except KeyError:
- # This guarantees the new value gets added.
- old_value = None
- if old_value < temp_data[key]:
- data[key] = temp_data[key]
- else:
- MPI.COMM_WORLD.send(data, dest=0, tag=0)
- data = MPI.COMM_WORLD.bcast(data, root=0)
- self._barrier()
- return data
-
- def _mpi_maxdict_dict(self, data):
- """
- Similar to above, but finds maximums for dicts of dicts. This is
- specificaly for a part of chainHOP.
- """
- if not self._distributed:
- top_keys = []
- bot_keys = []
- vals = []
- for top_key in data:
- for bot_key in data[top_key]:
- top_keys.append(top_key)
- bot_keys.append(bot_key)
- vals.append(data[top_key][bot_key])
- top_keys = na.array(top_keys, dtype='int64')
- bot_keys = na.array(bot_keys, dtype='int64')
- vals = na.array(vals, dtype='float64')
- return (top_keys, bot_keys, vals)
- self._barrier()
- size = 0
- top_keys = []
- bot_keys = []
- vals = []
- for top_key in data:
- for bot_key in data[top_key]:
- top_keys.append(top_key)
- bot_keys.append(bot_key)
- vals.append(data[top_key][bot_key])
- top_keys = na.array(top_keys, dtype='int64')
- bot_keys = na.array(bot_keys, dtype='int64')
- vals = na.array(vals, dtype='float64')
- del data
- if MPI.COMM_WORLD.rank == 0:
- for i in range(1,MPI.COMM_WORLD.size):
- size = MPI.COMM_WORLD.recv(source=i, tag=0)
- mylog.info('Global Hash Table Merge %d of %d size %d' % \
- (i,MPI.COMM_WORLD.size, size))
- recv_top_keys = na.empty(size, dtype='int64')
- recv_bot_keys = na.empty(size, dtype='int64')
- recv_vals = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
- MPI.COMM_WORLD.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
- MPI.COMM_WORLD.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
- top_keys = na.concatenate([top_keys, recv_top_keys])
- bot_keys = na.concatenate([bot_keys, recv_bot_keys])
- vals = na.concatenate([vals, recv_vals])
-# for j, top_key in enumerate(top_keys):
-# if j%1000 == 0: mylog.info(j)
-# # Make sure there's an entry for top_key in data
-# try:
-# test = data[top_key]
-# except KeyError:
-# data[top_key] = {}
-# try:
-# old_value = data[top_key][bot_keys[j]]
-# except KeyError:
-# # This guarantees the new value gets added.
-# old_value = None
-# if old_value < vals[j]:
-# data[top_key][bot_keys[j]] = vals[j]
- else:
-# top_keys = []
-# bot_keys = []
-# vals = []
-# for top_key in data:
-# for bot_key in data[top_key]:
-# top_keys.append(top_key)
-# bot_keys.append(bot_key)
-# vals.append(data[top_key][bot_key])
-# top_keys = na.array(top_keys, dtype='int64')
-# bot_keys = na.array(bot_keys, dtype='int64')
-# vals = na.array(vals, dtype='float64')
- size = top_keys.size
- MPI.COMM_WORLD.send(size, dest=0, tag=0)
- MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
- MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
- MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
- # Getting ghetto here, we're going to decompose the dict into arrays,
- # send that, and then reconstruct it. When data is too big the pickling
- # of the dict fails.
- if MPI.COMM_WORLD.rank == 0:
-# data = defaultdict(dict)
-# for i,top_key in enumerate(top_keys):
-# try:
-# old = data[top_key][bot_keys[i]]
-# except KeyError:
-# old = None
-# if old < vals[i]:
-# data[top_key][bot_keys[i]] = vals[i]
-# top_keys = []
-# bot_keys = []
-# vals = []
-# for top_key in data:
-# for bot_key in data[top_key]:
-# top_keys.append(top_key)
-# bot_keys.append(bot_key)
-# vals.append(data[top_key][bot_key])
-# del data
-# top_keys = na.array(top_keys, dtype='int64')
-# bot_keys = na.array(bot_keys, dtype='int64')
-# vals = na.array(vals, dtype='float64')
- size = top_keys.size
- # Broadcast them using array methods
- size = MPI.COMM_WORLD.bcast(size, root=0)
- if MPI.COMM_WORLD.rank != 0:
- top_keys = na.empty(size, dtype='int64')
- bot_keys = na.empty(size, dtype='int64')
- vals = na.empty(size, dtype='float64')
- MPI.COMM_WORLD.Bcast([top_keys,MPI.LONG], root=0)
- MPI.COMM_WORLD.Bcast([bot_keys,MPI.LONG], root=0)
- MPI.COMM_WORLD.Bcast([vals, MPI.DOUBLE], root=0)
- return (top_keys, bot_keys, vals)
-
- @parallel_passthrough
- def __mpi_recvlist(self, data):
- # First we receive, then we make a new list.
- data = ensure_list(data)
- for i in range(1,MPI.COMM_WORLD.size):
- buf = ensure_list(MPI.COMM_WORLD.recv(source=i, tag=0))
- data += buf
- return data
-
- @parallel_passthrough
- def _mpi_catlist(self, data):
- self._barrier()
- if MPI.COMM_WORLD.rank == 0:
- data = self.__mpi_recvlist(data)
- else:
- MPI.COMM_WORLD.send(data, dest=0, tag=0)
- mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
- data = MPI.COMM_WORLD.bcast(data, root=0)
- self._barrier()
- return data
-
- @parallel_passthrough
- def __mpi_recvarrays(self, data):
- # First we receive, then we make a new list.
- for i in range(1,MPI.COMM_WORLD.size):
- buf = _recv_array(source=i, tag=0)
- if buf is not None:
- if data is None: data = buf
- else: data = na.concatenate([data, buf])
- return data
-
- @parallel_passthrough
- def _mpi_cat_na_array(self,data):
- self._barrier()
- comm = MPI.COMM_WORLD
- if comm.rank == 0:
- for i in range(1,comm.size):
- buf = comm.recv(source=i, tag=0)
- data = na.concatenate([data,buf])
- else:
- comm.send(data, 0, tag = 0)
- data = comm.bcast(data, root=0)
- return data
-
- @parallel_passthrough
- def _mpi_catarray(self, data):
- if data is None:
- ncols = -1
- size = 0
- else:
- if len(data) == 0:
- ncols = -1
- size = 0
- elif len(data.shape) == 1:
- ncols = 1
- size = data.shape[0]
- else:
- ncols, size = data.shape
- ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
- if size == 0:
- data = na.zeros((ncols,0), dtype='float64') # This only works for
- size = data.shape[-1]
- sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
- outsize = na.array(size, dtype='int64')
- MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
- [sizes, 1, MPI.LONG] )
- # This nested concatenate is to get the shapes to work out correctly;
- # if we just add [0] to sizes, it will broadcast a summation, not a
- # concatenation.
- offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
- arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
- data = _alltoallv_array(data, arr_size, offsets, sizes)
- return data
-
- @parallel_passthrough
- def _mpi_bcast_pickled(self, data):
- #self._barrier()
- data = MPI.COMM_WORLD.bcast(data, root=0)
- return data
-
- def _should_i_write(self):
- if not self._distributed: return True
- return (MPI.COMM_WORLD == 0)
-
- def _preload(self, grids, fields, io_handler):
- # This will preload if it detects we are parallel capable and
- # if so, we load *everything* that we need. Use with some care.
- mylog.debug("Preloading %s from %s grids", fields, len(grids))
- if not self._distributed: return
- io_handler.preload(grids, fields)
-
- @parallel_passthrough
- def _mpi_double_array_max(self,data):
- """
- Finds the na.maximum of a distributed array and returns the result
- back to all. The array should be the same length on all tasks!
- """
- self._barrier()
- if MPI.COMM_WORLD.rank == 0:
- recv_data = na.empty(data.size, dtype='float64')
- for i in xrange(1, MPI.COMM_WORLD.size):
- MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
- data = na.maximum(data, recv_data)
- else:
- MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
- MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
- return data
-
- @parallel_passthrough
- def _mpi_allsum(self, data):
- #self._barrier()
- # We use old-school pickling here on the assumption the arrays are
- # relatively small ( < 1e7 elements )
- if isinstance(data, na.ndarray) and data.dtype != na.bool:
- tr = na.zeros_like(data)
- if not data.flags.c_contiguous: data = data.copy()
- MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
- return tr
- else:
- return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
-
- @parallel_passthrough
- def _mpi_Allsum_double(self, data):
- self._barrier()
- # Non-pickling float allsum of a float array, data.
- temp = data.copy()
- MPI.COMM_WORLD.Allreduce([temp, MPI.DOUBLE], [data, MPI.DOUBLE], op=MPI.SUM)
- del temp
- return data
-
- @parallel_passthrough
- def _mpi_Allsum_long(self, data):
- self._barrier()
- # Non-pickling float allsum of an int array, data.
- temp = data.copy()
- MPI.COMM_WORLD.Allreduce([temp, MPI.LONG], [data, MPI.LONG], op=MPI.SUM)
- del temp
- return data
-
- @parallel_passthrough
- def _mpi_allmax(self, data):
- self._barrier()
- return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
-
- @parallel_passthrough
- def _mpi_allmin(self, data):
- self._barrier()
- return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
-
- ###
- # Non-blocking stuff.
- ###
-
- def _mpi_Irecv_long(self, data, source, tag=0):
- if not self._distributed: return -1
- return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, tag)
-
- def _mpi_Irecv_double(self, data, source, tag=0):
- if not self._distributed: return -1
- return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, tag)
-
- def _mpi_Isend_long(self, data, dest, tag=0):
- if not self._distributed: return -1
- return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)
-
- def _mpi_Isend_double(self, data, dest, tag=0):
- if not self._distributed: return -1
- return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, tag)
-
- def _mpi_Request_Waitall(self, hooks):
- if not self._distributed: return
- MPI.Request.Waitall(hooks)
-
- def _mpi_Request_Waititer(self, hooks):
- for i in xrange(len(hooks)):
- req = MPI.Request.Waitany(hooks)
- yield req
-
- def _mpi_Request_Testall(self, hooks):
- """
- This returns False if any of the request hooks are un-finished,
- and True if they are all finished.
- """
- if not self._distributed: return True
- return MPI.Request.Testall(hooks)
-
- ###
- # End non-blocking stuff.
- ###
-
- def _mpi_get_size(self):
- if not self._distributed: return 1
- return MPI.COMM_WORLD.size
-
- def _mpi_get_rank(self):
- if not self._distributed: return 0
- return MPI.COMM_WORLD.rank
-
- def _mpi_info_dict(self, info):
- if not self._distributed: return 0, {0:info}
- self._barrier()
- data = None
- if MPI.COMM_WORLD.rank == 0:
- data = {0:info}
- for i in range(1, MPI.COMM_WORLD.size):
- data[i] = MPI.COMM_WORLD.recv(source=i, tag=0)
- else:
- MPI.COMM_WORLD.send(info, dest=0, tag=0)
- mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
- data = MPI.COMM_WORLD.bcast(data, root=0)
- self._barrier()
- return MPI.COMM_WORLD.rank, data
-
- def _get_dependencies(self, fields):
- deps = []
- fi = self.pf.field_info
- for field in fields:
- deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
- return list(set(deps))
-
- def _claim_object(self, obj):
- if not self._distributed: return
- obj._owner = MPI.COMM_WORLD.rank
- obj._distributed = True
-
- def _do_not_claim_object(self, obj):
- if not self._distributed: return
- obj._owner = -1
- obj._distributed = True
-
- def _write_on_root(self, fn):
- if not self._distributed: return open(fn, "w")
- if MPI.COMM_WORLD.rank == 0:
- return open(fn, "w")
- else:
- return cStringIO.StringIO()
-
- def _get_filename(self, prefix, rank=None):
- if not self._distributed: return prefix
- if rank == None:
- return "%s_%04i" % (prefix, MPI.COMM_WORLD.rank)
- else:
- return "%s_%04i" % (prefix, rank)
-
- def _is_mine(self, obj):
- if not obj._distributed: return True
- return (obj._owner == MPI.COMM_WORLD.rank)
-
- def _send_quadtree(self, target, qt, tgd, args):
- sizebuf = na.zeros(1, 'int64')
- buf = qt.tobuffer()
- sizebuf[0] = buf[0].size
- MPI.COMM_WORLD.Send([sizebuf, MPI.LONG], dest=target)
- MPI.COMM_WORLD.Send([buf[0], MPI.INT], dest=target)
- MPI.COMM_WORLD.Send([buf[1], MPI.DOUBLE], dest=target)
- MPI.COMM_WORLD.Send([buf[2], MPI.DOUBLE], dest=target)
-
- def _recv_quadtree(self, target, tgd, args):
- sizebuf = na.zeros(1, 'int64')
- MPI.COMM_WORLD.Recv(sizebuf, source=target)
- buf = [na.empty((sizebuf[0],), 'int32'),
- na.empty((sizebuf[0], args[2]),'float64'),
- na.empty((sizebuf[0],),'float64')]
- MPI.COMM_WORLD.Recv([buf[0], MPI.INT], source=target)
- MPI.COMM_WORLD.Recv([buf[1], MPI.DOUBLE], source=target)
- MPI.COMM_WORLD.Recv([buf[2], MPI.DOUBLE], source=target)
- qt = QuadTree(tgd, args[2])
- qt.frombuffer(*buf)
- return qt
-
- @parallel_passthrough
- def merge_quadtree_buffers(self, qt):
- # This is a modified version of pairwise reduction from Lisandro Dalcin,
- # in the reductions demo of mpi4py
- size = MPI.COMM_WORLD.size
- rank = MPI.COMM_WORLD.rank
-
- mask = 1
-
- args = qt.get_args() # Will always be the same
- tgd = na.array([args[0], args[1]], dtype='int64')
- sizebuf = na.zeros(1, 'int64')
-
- while mask < size:
- if (mask & rank) != 0:
- target = (rank & ~mask) % size
- #print "SENDING FROM %02i to %02i" % (rank, target)
- self._send_quadtree(target, qt, tgd, args)
- #qt = self._recv_quadtree(target, tgd, args)
- else:
- target = (rank | mask)
- if target < size:
- #print "RECEIVING FROM %02i on %02i" % (target, rank)
- qto = self._recv_quadtree(target, tgd, args)
- merge_quadtrees(qt, qto)
- del qto
- #self._send_quadtree(target, qt, tgd, args)
- mask <<= 1
-
- if rank == 0:
- buf = qt.tobuffer()
- sizebuf[0] = buf[0].size
- MPI.COMM_WORLD.Bcast([sizebuf, MPI.LONG], root=0)
- if rank != 0:
- buf = [na.empty((sizebuf[0],), 'int32'),
- na.empty((sizebuf[0], args[2]),'float64'),
- na.empty((sizebuf[0],),'float64')]
- MPI.COMM_WORLD.Bcast([buf[0], MPI.INT], root=0)
- MPI.COMM_WORLD.Bcast([buf[1], MPI.DOUBLE], root=0)
- MPI.COMM_WORLD.Bcast([buf[2], MPI.DOUBLE], root=0)
- self.refined = buf[0]
- if rank != 0:
- qt = QuadTree(tgd, args[2])
- qt.frombuffer(*buf)
- return qt
-
-__tocast = 'c'
-
-def _send_array(arr, dest, tag = 0):
- if not isinstance(arr, na.ndarray):
- MPI.COMM_WORLD.send((None,None), dest=dest, tag=tag)
- MPI.COMM_WORLD.send(arr, dest=dest, tag=tag)
- return
- tmp = arr.view(__tocast) # Cast to CHAR
- # communicate type and shape
- MPI.COMM_WORLD.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
- MPI.COMM_WORLD.Send([arr, MPI.CHAR], dest=dest, tag=tag)
- del tmp
-
-def _recv_array(source, tag = 0):
- dt, ne = MPI.COMM_WORLD.recv(source=source, tag=tag)
- if dt is None and ne is None:
- return MPI.COMM_WORLD.recv(source=source, tag=tag)
- arr = na.empty(ne, dtype=dt)
- tmp = arr.view(__tocast)
- MPI.COMM_WORLD.Recv([tmp, MPI.CHAR], source=source, tag=tag)
- return arr
-
-def _bcast_array(arr, root = 0):
- if MPI.COMM_WORLD.rank == root:
- tmp = arr.view(__tocast) # Cast to CHAR
- MPI.COMM_WORLD.bcast((arr.dtype.str, arr.shape), root=root)
- else:
- dt, ne = MPI.COMM_WORLD.bcast(None, root=root)
- arr = na.empty(ne, dtype=dt)
- tmp = arr.view(__tocast)
- MPI.COMM_WORLD.Bcast([tmp, MPI.CHAR], root=root)
- return arr
-
-def _alltoallv_array(send, total_size, offsets, sizes):
- if len(send.shape) > 1:
- recv = []
- for i in range(send.shape[0]):
- recv.append(_alltoallv_array(send[i,:].copy(), total_size, offsets, sizes))
- recv = na.array(recv)
- return recv
- offset = offsets[MPI.COMM_WORLD.rank]
- tmp_send = send.view(__tocast)
- recv = na.empty(total_size, dtype=send.dtype)
- recv[offset:offset+send.size] = send[:]
- dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
- roff = [off * dtr for off in offsets]
- rsize = [siz * dtr for siz in sizes]
- tmp_recv = recv.view(__tocast)
- MPI.COMM_WORLD.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
- (tmp_recv, (rsize, roff), MPI.CHAR))
- return recv
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/peewee.py
--- /dev/null
+++ b/yt/utilities/peewee.py
@@ -0,0 +1,1425 @@
+# (\
+# ( \ /(o)\ caw!
+# ( \/ ()/ /)
+# ( `;.))'".)
+# `(/////.-'
+# =====))=))===()
+# ///'
+# //
+# '
+from datetime import datetime
+import logging
+import os
+import re
+import time
+
+try:
+ import sqlite3
+except ImportError:
+ sqlite3 = None
+
+try:
+ import psycopg2
+except ImportError:
+ psycopg2 = None
+
+try:
+ import MySQLdb as mysql
+except ImportError:
+ mysql = None
+
+if sqlite3 is None and psycopg2 is None and mysql is None:
+ raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
+
+
+DATABASE_NAME = os.environ.get('PEEWEE_DATABASE', 'peewee.db')
+logger = logging.getLogger('peewee.logger')
+
+
+class BaseAdapter(object):
+ """
+ The various subclasses of `BaseAdapter` provide a bridge between the high-
+ level `Database` abstraction and the underlying python libraries like
+ psycopg2. It also provides a way to unify the pythonic field types with
+ the underlying column types used by the database engine.
+
+ The `BaseAdapter` provides two types of mappings:
+ - mapping between filter operations and their database equivalents
+ - mapping between basic field types and their database column types
+
+ The `BaseAdapter` also is the mechanism used by the `Database` class to:
+ - handle connections with the database
+ - extract information from the database cursor
+ """
+ operations = {'eq': '= %s'}
+ interpolation = '%s'
+
+ def get_field_types(self):
+ field_types = {
+ 'integer': 'INTEGER',
+ 'float': 'REAL',
+ 'decimal': 'NUMERIC',
+ 'string': 'VARCHAR',
+ 'text': 'TEXT',
+ 'datetime': 'DATETIME',
+ 'primary_key': 'INTEGER',
+ 'foreign_key': 'INTEGER',
+ 'boolean': 'SMALLINT',
+ }
+ field_types.update(self.get_field_overrides())
+ return field_types
+
+ def get_field_overrides(self):
+ return {}
+
+ def connect(self, database, **kwargs):
+ raise NotImplementedError
+
+ def close(self, conn):
+ conn.close()
+
+ def lookup_cast(self, lookup, value):
+ if lookup in ('contains', 'icontains'):
+ return '%%%s%%' % value
+ elif lookup in ('startswith', 'istartswith'):
+ return '%s%%' % value
+ return value
+
+ def last_insert_id(self, cursor, model):
+ return cursor.lastrowid
+
+ def rows_affected(self, cursor):
+ return cursor.rowcount
+
+
+class SqliteAdapter(BaseAdapter):
+ # note the sqlite library uses a non-standard interpolation string
+ operations = {
+ 'lt': '< ?',
+ 'lte': '<= ?',
+ 'gt': '> ?',
+ 'gte': '>= ?',
+ 'eq': '= ?',
+ 'ne': '!= ?', # watch yourself with this one
+ 'in': 'IN (%s)', # special-case to list q-marks
+ 'is': 'IS ?',
+ 'icontains': "LIKE ? ESCAPE '\\'", # surround param with %'s
+ 'contains': "GLOB ?", # surround param with *'s
+ 'istartswith': "LIKE ? ESCAPE '\\'",
+ 'startswith': "GLOB ?",
+ }
+ interpolation = '?'
+
+ def connect(self, database, **kwargs):
+ return sqlite3.connect(database, **kwargs)
+
+ def lookup_cast(self, lookup, value):
+ if lookup == 'contains':
+ return '*%s*' % value
+ elif lookup == 'icontains':
+ return '%%%s%%' % value
+ elif lookup == 'startswith':
+ return '%s*' % value
+ elif lookup == 'istartswith':
+ return '%s%%' % value
+ return value
+
+
+class PostgresqlAdapter(BaseAdapter):
+ operations = {
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'eq': '= %s',
+ 'ne': '!= %s', # watch yourself with this one
+ 'in': 'IN (%s)', # special-case to list q-marks
+ 'is': 'IS %s',
+ 'icontains': 'ILIKE %s', # surround param with %'s
+ 'contains': 'LIKE %s', # surround param with *'s
+ 'istartswith': 'ILIKE %s',
+ 'startswith': 'LIKE %s',
+ }
+
+ def connect(self, database, **kwargs):
+ return psycopg2.connect(database=database, **kwargs)
+
+ def get_field_overrides(self):
+ return {
+ 'primary_key': 'SERIAL',
+ 'datetime': 'TIMESTAMP'
+ }
+
+ def last_insert_id(self, cursor, model):
+ cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
+ model._meta.db_table, model._meta.pk_name))
+ return cursor.fetchone()[0]
+
+
+class MySQLAdapter(BaseAdapter):
+ operations = {
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'eq': '= %s',
+ 'ne': '!= %s', # watch yourself with this one
+ 'in': 'IN (%s)', # special-case to list q-marks
+ 'is': 'IS %s',
+ 'icontains': 'LIKE %s', # surround param with %'s
+ 'contains': 'LIKE BINARY %s', # surround param with *'s
+ 'istartswith': 'LIKE %s',
+ 'startswith': 'LIKE BINARY %s',
+ }
+
+ def connect(self, database, **kwargs):
+ return mysql.connect(db=database, **kwargs)
+
+ def get_field_overrides(self):
+ return {
+ 'primary_key': 'integer AUTO_INCREMENT',
+ 'boolean': 'bool',
+ 'float': 'double precision',
+ 'text': 'longtext',
+ }
+
+
+class Database(object):
+ """
+ A high-level api for working with the supported database engines. `Database`
+ provides a wrapper around some of the functions performed by the `Adapter`,
+ in addition providing support for:
+ - execution of SQL queries
+ - creating and dropping tables and indexes
+ """
+ def __init__(self, adapter, database, **connect_kwargs):
+ self.adapter = adapter
+ self.database = database
+ self.connect_kwargs = connect_kwargs
+
+ def connect(self):
+ self.conn = self.adapter.connect(self.database, **self.connect_kwargs)
+
+ def close(self):
+ self.adapter.close(self.conn)
+
+ def execute(self, sql, params=None, commit=False):
+ cursor = self.conn.cursor()
+ res = cursor.execute(sql, params or ())
+ if commit:
+ self.conn.commit()
+ logger.debug((sql, params))
+ return cursor
+
+ def last_insert_id(self, cursor, model):
+ return self.adapter.last_insert_id(cursor, model)
+
+ def rows_affected(self, cursor):
+ return self.adapter.rows_affected(cursor)
+
+ def column_for_field(self, db_field):
+ try:
+ return self.adapter.get_field_types()[db_field]
+ except KeyError:
+ raise AttributeError('Unknown field type: "%s", valid types are: %s' % \
+ db_field, ', '.join(self.adapter.get_field_types().keys())
+ )
+
+ def create_table(self, model_class):
+ framing = "CREATE TABLE %s (%s);"
+ columns = []
+
+ for field in model_class._meta.fields.values():
+ columns.append(field.to_sql())
+
+ query = framing % (model_class._meta.db_table, ', '.join(columns))
+
+ self.execute(query, commit=True)
+
+ def create_index(self, model_class, field, unique=False):
+ framing = 'CREATE %(unique)s INDEX %(model)s_%(field)s ON %(model)s(%(field)s);'
+
+ if field not in model_class._meta.fields:
+ raise AttributeError(
+ 'Field %s not on model %s' % (field, model_class)
+ )
+
+ unique_expr = ternary(unique, 'UNIQUE', '')
+
+ query = framing % {
+ 'unique': unique_expr,
+ 'model': model_class._meta.db_table,
+ 'field': field
+ }
+
+ self.execute(query, commit=True)
+
+ def drop_table(self, model_class, fail_silently=False):
+ framing = fail_silently and 'DROP TABLE IF EXISTS %s;' or 'DROP TABLE %s;'
+ self.execute(framing % model_class._meta.db_table, commit=True)
+
+ def get_indexes_for_table(self, table):
+ raise NotImplementedError
+
+
+class SqliteDatabase(Database):
+ def __init__(self, database, **connect_kwargs):
+ super(SqliteDatabase, self).__init__(SqliteAdapter(), database, **connect_kwargs)
+
+ def get_indexes_for_table(self, table):
+ res = self.execute('PRAGMA index_list(%s);' % table)
+ rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
+ return rows
+
+
+class PostgresqlDatabase(Database):
+ def __init__(self, database, **connect_kwargs):
+ super(PostgresqlDatabase, self).__init__(PostgresqlAdapter(), database, **connect_kwargs)
+
+ def get_indexes_for_table(self, table):
+ res = self.execute("""
+ SELECT c2.relname, i.indisprimary, i.indisunique
+ FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
+ WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
+ ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
+ return sorted([(r[0], r[1]) for r in res.fetchall()])
+
+class MySQLDatabase(Database):
+ def __init__(self, database, **connect_kwargs):
+ super(MySQLDatabase, self).__init__(MySQLAdapter(), database, **connect_kwargs)
+
+ def get_indexes_for_table(self, table):
+ res = self.execute('SHOW INDEXES IN %s;' % table)
+ rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
+ return rows
+
+
+class QueryResultWrapper(object):
+ """
+ Provides an iterator over the results of a raw Query, additionally doing
+ two things:
+ - converts rows from the database into model instances
+ - ensures that multiple iterations do not result in multiple queries
+ """
+ def __init__(self, model, cursor):
+ self.model = model
+ self.cursor = cursor
+ self._result_cache = []
+ self._populated = False
+
+ def model_from_rowset(self, model_class, row_dict):
+ instance = model_class()
+ for attr, value in row_dict.iteritems():
+ if attr in instance._meta.fields:
+ field = instance._meta.fields[attr]
+ setattr(instance, attr, field.python_value(value))
+ else:
+ setattr(instance, attr, value)
+ return instance
+
+ def _row_to_dict(self, row, result_cursor):
+ return dict((result_cursor.description[i][0], value)
+ for i, value in enumerate(row))
+
+ def __iter__(self):
+ if not self._populated:
+ return self
+ else:
+ return iter(self._result_cache)
+
+ def next(self):
+ row = self.cursor.fetchone()
+ if row:
+ row_dict = self._row_to_dict(row, self.cursor)
+ instance = self.model_from_rowset(self.model, row_dict)
+ self._result_cache.append(instance)
+ return instance
+ else:
+ self._populated = True
+ raise StopIteration
+
+
+# create
+class DoesNotExist(Exception):
+ pass
+
+
+# semantic wrappers for ordering the results of a `SelectQuery`
+def asc(f):
+ return (f, 'ASC')
+
+def desc(f):
+ return (f, 'DESC')
+
+# wrappers for performing aggregation in a `SelectQuery`
+def Count(f, alias='count'):
+ return ('COUNT', f, alias)
+
+def Max(f, alias='max'):
+ return ('MAX', f, alias)
+
+def Min(f, alias='min'):
+ return ('MIN', f, alias)
+
+# decorator for query methods to indicate that they change the state of the
+# underlying data structures
+def returns_clone(func):
+ def inner(self, *args, **kwargs):
+ clone = self.clone()
+ res = func(clone, *args, **kwargs)
+ return clone
+ return inner
+
+# helpers
+ternary = lambda cond, t, f: (cond and [t] or [f])[0]
+
+
+class Node(object):
+ def __init__(self, connector='AND'):
+ self.connector = connector
+ self.children = []
+ self.negated = False
+
+ def connect(self, rhs, connector):
+ if isinstance(rhs, Q):
+ if connector == self.connector:
+ self.children.append(rhs)
+ return self
+ else:
+ p = Node(connector)
+ p.children = [self, rhs]
+ return p
+ elif isinstance(rhs, Node):
+ p = Node(connector)
+ p.children = [self, rhs]
+ return p
+
+ def __or__(self, rhs):
+ return self.connect(rhs, 'OR')
+
+ def __and__(self, rhs):
+ return self.connect(rhs, 'AND')
+
+ def __invert__(self):
+ self.negated = not self.negated
+ return self
+
+ def __unicode__(self):
+ query = []
+ nodes = []
+ for child in self.children:
+ if isinstance(child, Q):
+ query.append(unicode(child))
+ elif isinstance(child, Node):
+ nodes.append('(%s)' % unicode(child))
+ query.extend(nodes)
+ connector = ' %s ' % self.connector
+ query = connector.join(query)
+ if self.negated:
+ query = 'NOT %s' % query
+ return query
+
+
+class Q(object):
+ def __init__(self, **kwargs):
+ self.query = kwargs
+ self.parent = None
+ self.negated = False
+
+ def connect(self, connector):
+ if self.parent is None:
+ self.parent = Node(connector)
+ self.parent.children.append(self)
+
+ def __or__(self, rhs):
+ self.connect('OR')
+ return self.parent | rhs
+
+ def __and__(self, rhs):
+ self.connect('AND')
+ return self.parent & rhs
+
+ def __invert__(self):
+ self.negated = not self.negated
+ return self
+
+ def __unicode__(self):
+ bits = ['%s = %s' % (k, v) for k, v in self.query.items()]
+ if len(self.query.items()) > 1:
+ connector = ' AND '
+ expr = '(%s)' % connector.join(bits)
+ else:
+ expr = bits[0]
+ if self.negated:
+ expr = 'NOT %s' % expr
+ return expr
+
+
+def parseq(*args, **kwargs):
+ node = Node()
+
+ for piece in args:
+ if isinstance(piece, (Q, Node)):
+ node.children.append(piece)
+ else:
+ raise TypeError('Unknown object: %s', piece)
+
+ if kwargs:
+ node.children.append(Q(**kwargs))
+
+ return node
+
+
+class EmptyResultException(Exception):
+ pass
+
+
+class BaseQuery(object):
+ query_separator = '__'
+ requires_commit = True
+ force_alias = False
+
+ def __init__(self, model):
+ self.model = model
+ self.query_context = model
+ self.database = self.model._meta.database
+ self.operations = self.database.adapter.operations
+ self.interpolation = self.database.adapter.interpolation
+
+ self._dirty = True
+ self._where = {}
+ self._joins = []
+
+ def clone(self):
+ raise NotImplementedError
+
+ def lookup_cast(self, lookup, value):
+ return self.database.adapter.lookup_cast(lookup, value)
+
+ def parse_query_args(self, model, **query):
+ parsed = {}
+ for lhs, rhs in query.iteritems():
+ if self.query_separator in lhs:
+ lhs, op = lhs.rsplit(self.query_separator, 1)
+ else:
+ op = 'eq'
+
+ try:
+ field = model._meta.get_field_by_name(lhs)
+ except AttributeError:
+ field = model._meta.get_related_field_by_name(lhs)
+ if field is None:
+ raise
+ if isinstance(rhs, Model):
+ rhs = rhs.get_pk()
+
+ if op == 'in':
+ if isinstance(rhs, SelectQuery):
+ lookup_value = rhs
+ operation = 'IN (%s)'
+ else:
+ if not rhs:
+ raise EmptyResultException
+ lookup_value = [field.db_value(o) for o in rhs]
+ operation = self.operations[op] % \
+ (','.join([self.interpolation for v in lookup_value]))
+ elif op == 'is':
+ if rhs is not None:
+ raise ValueError('__is lookups only accept None')
+ operation = 'IS NULL'
+ lookup_value = []
+ else:
+ lookup_value = field.db_value(rhs)
+ operation = self.operations[op]
+
+ parsed[field.name] = (operation, self.lookup_cast(op, lookup_value))
+
+ return parsed
+
+ @returns_clone
+ def where(self, *args, **kwargs):
+ self._where.setdefault(self.query_context, [])
+ self._where[self.query_context].append(parseq(*args, **kwargs))
+
+ @returns_clone
+ def join(self, model, join_type=None, on=None):
+ if self.query_context._meta.rel_exists(model):
+ self._joins.append((model, join_type, on))
+ self.query_context = model
+ else:
+ raise AttributeError('No foreign key found between %s and %s' % \
+ (self.query_context.__name__, model.__name__))
+
+ @returns_clone
+ def switch(self, model):
+ if model == self.model:
+ self.query_context = model
+ return
+
+ for klass, join_type, on in self._joins:
+ if model == klass:
+ self.query_context = model
+ return
+ raise AttributeError('You must JOIN on %s' % model.__name__)
+
+ def use_aliases(self):
+ return len(self._joins) > 0 or self.force_alias
+
+ def combine_field(self, alias, field_name):
+ if alias:
+ return '%s.%s' % (alias, field_name)
+ return field_name
+
+ def compile_where(self):
+ alias_count = 0
+ alias_map = {}
+
+ alias_required = self.use_aliases()
+
+ joins = list(self._joins)
+ if self._where or len(joins):
+ joins.insert(0, (self.model, None, None))
+
+ where_with_alias = []
+ where_data = []
+ computed_joins = []
+
+ for i, (model, join_type, on) in enumerate(joins):
+ if alias_required:
+ alias_count += 1
+ alias_map[model] = 't%d' % alias_count
+ else:
+ alias_map[model] = ''
+
+ if i > 0:
+ from_model = joins[i-1][0]
+ field = from_model._meta.get_related_field_for_model(model, on)
+ if field:
+ left_field = field.name
+ right_field = model._meta.pk_name
+ else:
+ field = from_model._meta.get_reverse_related_field_for_model(model, on)
+ left_field = from_model._meta.pk_name
+ right_field = field.name
+
+ if join_type is None:
+ if field.null and model not in self._where:
+ join_type = 'LEFT OUTER'
+ else:
+ join_type = 'INNER'
+
+ computed_joins.append(
+ '%s JOIN %s AS %s ON %s = %s' % (
+ join_type,
+ model._meta.db_table,
+ alias_map[model],
+ self.combine_field(alias_map[from_model], left_field),
+ self.combine_field(alias_map[model], right_field),
+ )
+ )
+
+ for (model, join_type, on) in joins:
+ if model in self._where:
+ for node in self._where[model]:
+ query, data = self.parse_node(node, model, alias_map)
+ where_with_alias.append(query)
+ where_data.extend(data)
+
+ return computed_joins, where_with_alias, where_data, alias_map
+
+ def convert_where_to_params(self, where_data):
+ flattened = []
+ for clause in where_data:
+ if isinstance(clause, (tuple, list)):
+ flattened.extend(clause)
+ else:
+ flattened.append(clause)
+ return flattened
+
+ def parse_node(self, node, model, alias_map):
+ query = []
+ query_data = []
+ nodes = []
+ for child in node.children:
+ if isinstance(child, Q):
+ parsed, data = self.parse_q(child, model, alias_map)
+ query.append(parsed)
+ query_data.extend(data)
+ elif isinstance(child, Node):
+ parsed, data = self.parse_node(child, model, alias_map)
+ query.append('(%s)' % parsed)
+ query_data.extend(data)
+ query.extend(nodes)
+ connector = ' %s ' % node.connector
+ query = connector.join(query)
+ if node.negated:
+ query = 'NOT (%s)' % query
+ return query, query_data
+
+ def parse_q(self, q, model, alias_map):
+ query = []
+ query_data = []
+ parsed = self.parse_query_args(model, **q.query)
+ for (name, lookup) in parsed.iteritems():
+ operation, value = lookup
+ if isinstance(value, SelectQuery):
+ sql, value = self.convert_subquery(value)
+ operation = operation % sql
+
+ query_data.append(value)
+
+ combined = self.combine_field(alias_map[model], name)
+ query.append('%s %s' % (combined, operation))
+
+ if len(query) > 1:
+ query = '(%s)' % (' AND '.join(query))
+ else:
+ query = query[0]
+
+ if q.negated:
+ query = 'NOT %s' % query
+
+ return query, query_data
+
+ def convert_subquery(self, subquery):
+ subquery.query, orig_query = subquery.model._meta.pk_name, subquery.query
+ subquery.force_alias, orig_alias = True, subquery.force_alias
+ sql, data = subquery.sql()
+ subquery.query = orig_query
+ subquery.force_alias = orig_alias
+ return sql, data
+
+ def raw_execute(self):
+ query, params = self.sql()
+ return self.database.execute(query, params, self.requires_commit)
+
+
+class RawQuery(BaseQuery):
+ def __init__(self, model, query, *params):
+ self._sql = query
+ self._params = list(params)
+ super(RawQuery, self).__init__(model)
+
+ def sql(self):
+ return self._sql, self._params
+
+ def execute(self):
+ return QueryResultWrapper(self.model, self.raw_execute())
+
+ def join(self):
+ raise AttributeError('Raw queries do not support joining programmatically')
+
+ def where(self):
+ raise AttributeError('Raw queries do not support querying programmatically')
+
+ def switch(self):
+ raise AttributeError('Raw queries do not support switching contexts')
+
+ def __iter__(self):
+ return self.execute()
+
+
+class SelectQuery(BaseQuery):
+ requires_commit = False
+
+ def __init__(self, model, query=None):
+ self.query = query or '*'
+ self._group_by = []
+ self._having = []
+ self._order_by = []
+ self._pagination = None # return all by default
+ self._distinct = False
+ self._qr = None
+ super(SelectQuery, self).__init__(model)
+
+ def clone(self):
+ query = SelectQuery(self.model, self.query)
+ query.query_context = self.query_context
+ query._group_by = list(self._group_by)
+ query._having = list(self._having)
+ query._order_by = list(self._order_by)
+ query._pagination = self._pagination and tuple(self._pagination) or None
+ query._distinct = self._distinct
+ query._qr = self._qr
+ query._where = dict(self._where)
+ query._joins = list(self._joins)
+ return query
+
+ @returns_clone
+ def paginate(self, page_num, paginate_by=20):
+ self._pagination = (page_num, paginate_by)
+
+ def count(self):
+ tmp_pagination = self._pagination
+ self._pagination = None
+
+ tmp_query = self.query
+
+ if self.use_aliases():
+ self.query = 'COUNT(t1.%s)' % (self.model._meta.pk_name)
+ else:
+ self.query = 'COUNT(%s)' % (self.model._meta.pk_name)
+
+ res = self.database.execute(*self.sql())
+
+ self.query = tmp_query
+ self._pagination = tmp_pagination
+
+ return res.fetchone()[0]
+
+ @returns_clone
+ def group_by(self, clause):
+ model = self.query_context
+
+ if isinstance(clause, basestring):
+ fields = (clause,)
+ elif isinstance(clause, (list, tuple)):
+ fields = clause
+ elif issubclass(clause, Model):
+ model = clause
+ fields = clause._meta.get_field_names()
+
+ self._group_by.append((model, fields))
+
+ @returns_clone
+ def having(self, clause):
+ self._having.append(clause)
+
+ @returns_clone
+ def distinct(self):
+ self._distinct = True
+
+ @returns_clone
+ def order_by(self, field_or_string):
+ if isinstance(field_or_string, tuple):
+ field_or_string, ordering = field_or_string
+ else:
+ ordering = 'ASC'
+
+ self._order_by.append(
+ (self.query_context, field_or_string, ordering)
+ )
+
+ def parse_select_query(self, alias_map):
+ if isinstance(self.query, basestring):
+ if self.query in ('*', self.model._meta.pk_name) and self.use_aliases():
+ return '%s.%s' % (alias_map[self.model], self.query)
+ return self.query
+ elif isinstance(self.query, dict):
+ qparts = []
+ aggregates = []
+ for model, cols in self.query.iteritems():
+ alias = alias_map.get(model, '')
+ for col in cols:
+ if isinstance(col, tuple):
+ func, col, col_alias = col
+ aggregates.append('%s(%s) AS %s' % \
+ (func, self.combine_field(alias, col), col_alias)
+ )
+ else:
+ qparts.append(self.combine_field(alias, col))
+ return ', '.join(qparts + aggregates)
+ else:
+ raise TypeError('Unknown type encountered parsing select query')
+
+ def sql(self):
+ joins, where, where_data, alias_map = self.compile_where()
+
+ table = self.model._meta.db_table
+
+ params = []
+ group_by = []
+
+ if self.use_aliases():
+ table = '%s AS %s' % (table, alias_map[self.model])
+ for model, clause in self._group_by:
+ alias = alias_map[model]
+ for field in clause:
+ group_by.append(self.combine_field(alias, field))
+ else:
+ group_by = [c[1] for c in self._group_by]
+
+ parsed_query = self.parse_select_query(alias_map)
+
+ if self._distinct:
+ sel = 'SELECT DISTINCT'
+ else:
+ sel = 'SELECT'
+
+ select = '%s %s FROM %s' % (sel, parsed_query, table)
+ joins = '\n'.join(joins)
+ where = ' AND '.join(where)
+ group_by = ', '.join(group_by)
+ having = ' AND '.join(self._having)
+
+ order_by = []
+ for piece in self._order_by:
+ model, field, ordering = piece
+ if self.use_aliases() and field in model._meta.fields:
+ field = '%s.%s' % (alias_map[model], field)
+ order_by.append('%s %s' % (field, ordering))
+
+ pieces = [select]
+
+ if joins:
+ pieces.append(joins)
+ if where:
+ pieces.append('WHERE %s' % where)
+ params.extend(self.convert_where_to_params(where_data))
+
+ if group_by:
+ pieces.append('GROUP BY %s' % group_by)
+ if having:
+ pieces.append('HAVING %s' % having)
+ if order_by:
+ pieces.append('ORDER BY %s' % ', '.join(order_by))
+ if self._pagination:
+ page, paginate_by = self._pagination
+ if page > 0:
+ page -= 1
+ pieces.append('LIMIT %d OFFSET %d' % (paginate_by, page * paginate_by))
+
+ return ' '.join(pieces), params
+
+ def execute(self):
+ if self._dirty or not self._qr:
+ try:
+ self._qr = QueryResultWrapper(self.model, self.raw_execute())
+ self._dirty = False
+ return self._qr
+ except EmptyResultException:
+ return iter([])
+ else:
+ # call the __iter__ method directly
+ return iter(self._qr)
+
+ def __iter__(self):
+ return self.execute()
+
+
+class UpdateQuery(BaseQuery):
+ def __init__(self, model, **kwargs):
+ self.update_query = kwargs
+ super(UpdateQuery, self).__init__(model)
+
+ def clone(self):
+ query = UpdateQuery(self.model, **self.update_query)
+ query._where = dict(self._where)
+ query._joins = list(self._joins)
+ return query
+
+ def parse_update(self):
+ sets = {}
+ for k, v in self.update_query.iteritems():
+ try:
+ field = self.model._meta.get_field_by_name(k)
+ except AttributeError:
+ field = self.model._meta.get_related_field_by_name(k)
+ if field is None:
+ raise
+
+ sets[field.name] = field.db_value(v)
+
+ return sets
+
+ def sql(self):
+ joins, where, where_data, alias_map = self.compile_where()
+ set_statement = self.parse_update()
+
+ params = []
+ update_params = []
+
+ for k, v in set_statement.iteritems():
+ params.append(v)
+ update_params.append('%s=%s' % (k, self.interpolation))
+
+ update = 'UPDATE %s SET %s' % (
+ self.model._meta.db_table, ', '.join(update_params))
+ where = ' AND '.join(where)
+
+ pieces = [update]
+
+ if where:
+ pieces.append('WHERE %s' % where)
+ params.extend(self.convert_where_to_params(where_data))
+
+ return ' '.join(pieces), params
+
+ def join(self, *args, **kwargs):
+ raise AttributeError('Update queries do not support JOINs in sqlite')
+
+ def execute(self):
+ result = self.raw_execute()
+ return self.database.rows_affected(result)
+
+
+class DeleteQuery(BaseQuery):
+ def clone(self):
+ query = DeleteQuery(self.model)
+ query._where = dict(self._where)
+ query._joins = list(self._joins)
+ return query
+
+ def sql(self):
+ joins, where, where_data, alias_map = self.compile_where()
+
+ params = []
+
+ delete = 'DELETE FROM %s' % (self.model._meta.db_table)
+ where = ' AND '.join(where)
+
+ pieces = [delete]
+
+ if where:
+ pieces.append('WHERE %s' % where)
+ params.extend(self.convert_where_to_params(where_data))
+
+ return ' '.join(pieces), params
+
+ def join(self, *args, **kwargs):
+ raise AttributeError('Update queries do not support JOINs in sqlite')
+
+ def execute(self):
+ result = self.raw_execute()
+ return self.database.rows_affected(result)
+
+
+class InsertQuery(BaseQuery):
+ def __init__(self, model, **kwargs):
+ self.insert_query = kwargs
+ super(InsertQuery, self).__init__(model)
+
+ def parse_insert(self):
+ cols = []
+ vals = []
+ for k, v in self.insert_query.iteritems():
+ field = self.model._meta.get_field_by_name(k)
+ cols.append(k)
+ vals.append(field.db_value(v))
+
+ return cols, vals
+
+ def sql(self):
+ cols, vals = self.parse_insert()
+
+ insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
+ self.model._meta.db_table,
+ ','.join(cols),
+ ','.join(self.interpolation for v in vals)
+ )
+
+ return insert, vals
+
+ def where(self, *args, **kwargs):
+ raise AttributeError('Insert queries do not support WHERE clauses')
+
+ def join(self, *args, **kwargs):
+ raise AttributeError('Insert queries do not support JOINs')
+
+ def execute(self):
+ result = self.raw_execute()
+ return self.database.last_insert_id(result, self.model)
+
+
+class Field(object):
+ db_field = ''
+ default = None
+ field_template = "%(column_type)s%(nullable)s"
+
+ def get_attributes(self):
+ return {}
+
+ def __init__(self, null=False, db_index=False, *args, **kwargs):
+ self.null = null
+ self.db_index = db_index
+ self.attributes = self.get_attributes()
+ self.default = kwargs.get('default', None)
+
+ kwargs['nullable'] = ternary(self.null, '', ' NOT NULL')
+ self.attributes.update(kwargs)
+
+ def add_to_class(self, klass, name):
+ self.name = name
+ self.model = klass
+ setattr(klass, name, None)
+
+ def render_field_template(self):
+ col_type = self.model._meta.database.column_for_field(self.db_field)
+ self.attributes['column_type'] = col_type
+ return self.field_template % self.attributes
+
+ def to_sql(self):
+ rendered = self.render_field_template()
+ return '%s %s' % (self.name, rendered)
+
+ def null_wrapper(self, value, default=None):
+ if (self.null and value is None) or default is None:
+ return value
+ return value or default
+
+ def db_value(self, value):
+ return value
+
+ def python_value(self, value):
+ return value
+
+ def lookup_value(self, lookup_type, value):
+ return self.db_value(value)
+
+
+class CharField(Field):
+ db_field = 'string'
+ field_template = '%(column_type)s(%(max_length)d)%(nullable)s'
+
+ def get_attributes(self):
+ return {'max_length': 255}
+
+ def db_value(self, value):
+ if self.null and value is None:
+ return value
+ value = value or ''
+ return value[:self.attributes['max_length']]
+
+ def lookup_value(self, lookup_type, value):
+ if lookup_type == 'contains':
+ return '*%s*' % self.db_value(value)
+ elif lookup_type == 'icontains':
+ return '%%%s%%' % self.db_value(value)
+ else:
+ return self.db_value(value)
+
+
+class TextField(Field):
+ db_field = 'text'
+
+ def db_value(self, value):
+ return self.null_wrapper(value, '')
+
+ def lookup_value(self, lookup_type, value):
+ if lookup_type == 'contains':
+ return '*%s*' % self.db_value(value)
+ elif lookup_type == 'icontains':
+ return '%%%s%%' % self.db_value(value)
+ else:
+ return self.db_value(value)
+
+
+class DateTimeField(Field):
+ db_field = 'datetime'
+
+ def python_value(self, value):
+ if isinstance(value, basestring):
+ value = value.rsplit('.', 1)[0]
+ return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
+ return value
+
+
+class IntegerField(Field):
+ db_field = 'integer'
+
+ def db_value(self, value):
+ return self.null_wrapper(value, 0)
+
+ def python_value(self, value):
+ if value is not None:
+ return int(value)
+
+
+class BooleanField(IntegerField):
+ db_field = 'boolean'
+
+ def db_value(self, value):
+ if value:
+ return 1
+ return 0
+
+ def python_value(self, value):
+ return bool(value)
+
+
+class FloatField(Field):
+ db_field = 'float'
+
+ def db_value(self, value):
+ return self.null_wrapper(value, 0.0)
+
+ def python_value(self, value):
+ if value is not None:
+ return float(value)
+
+
+class PrimaryKeyField(IntegerField):
+ db_field = 'primary_key'
+ field_template = "%(column_type)s NOT NULL PRIMARY KEY"
+
+
+class ForeignRelatedObject(object):
+ def __init__(self, to, name):
+ self.field_name = name
+ self.to = to
+ self.cache_name = '_cache_%s' % name
+
+ def __get__(self, instance, instance_type=None):
+ if not getattr(instance, self.cache_name, None):
+ id = getattr(instance, self.field_name, 0)
+ qr = self.to.select().where(**{self.to._meta.pk_name: id}).execute()
+ setattr(instance, self.cache_name, qr.next())
+ return getattr(instance, self.cache_name)
+
+ def __set__(self, instance, obj):
+ assert isinstance(obj, self.to), "Cannot assign %s, invalid type" % obj
+ setattr(instance, self.field_name, obj.get_pk())
+ setattr(instance, self.cache_name, obj)
+
+
+class ReverseForeignRelatedObject(object):
+ def __init__(self, related_model, name):
+ self.field_name = name
+ self.related_model = related_model
+
+ def __get__(self, instance, instance_type=None):
+ query = {self.field_name: instance.get_pk()}
+ qr = self.related_model.select().where(**query)
+ return qr
+
+
+class ForeignKeyField(IntegerField):
+ db_field = 'foreign_key'
+ field_template = '%(column_type)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)'
+
+ def __init__(self, to, null=False, related_name=None, *args, **kwargs):
+ self.to = to
+ self.related_name = related_name
+ kwargs.update({
+ 'to_table': to._meta.db_table,
+ 'to_pk': to._meta.pk_name
+ })
+ super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
+
+ def add_to_class(self, klass, name):
+ self.descriptor = name
+ self.name = name + '_id'
+ self.model = klass
+
+ if self.related_name is None:
+ self.related_name = klass._meta.db_table + '_set'
+
+ klass._meta.rel_fields[name] = self.name
+ setattr(klass, self.descriptor, ForeignRelatedObject(self.to, self.name))
+ setattr(klass, self.name, None)
+
+ reverse_rel = ReverseForeignRelatedObject(klass, self.name)
+ setattr(self.to, self.related_name, reverse_rel)
+
+ def lookup_value(self, lookup_type, value):
+ if isinstance(value, Model):
+ return value.get_pk()
+ return value or None
+
+ def db_value(self, value):
+ if isinstance(value, Model):
+ return value.get_pk()
+ return value
+
+
+# define a default database object in the module scope
+database = SqliteDatabase(DATABASE_NAME)
+
+
+class BaseModelOptions(object):
+ def __init__(self, model_class, options=None):
+ # configurable options
+ options = options or {'database': database}
+ for k, v in options.items():
+ setattr(self, k, v)
+
+ self.rel_fields = {}
+ self.fields = {}
+ self.model_class = model_class
+
+ def get_field_names(self):
+ fields = [self.pk_name]
+ fields.extend([f for f in sorted(self.fields.keys()) if f != self.pk_name])
+ return fields
+
+ def get_field_by_name(self, name):
+ if name in self.fields:
+ return self.fields[name]
+ raise AttributeError('Field named %s not found' % name)
+
+ def get_related_field_by_name(self, name):
+ if name in self.rel_fields:
+ return self.fields[self.rel_fields[name]]
+
+ def get_related_field_for_model(self, model, name=None):
+ for field in self.fields.values():
+ if isinstance(field, ForeignKeyField) and field.to == model:
+ if name is None or name == field.name or name == field.descriptor:
+ return field
+
+ def get_reverse_related_field_for_model(self, model, name=None):
+ for field in model._meta.fields.values():
+ if isinstance(field, ForeignKeyField) and field.to == self.model_class:
+ if name is None or name == field.name or name == field.descriptor:
+ return field
+
+ def rel_exists(self, model):
+ return self.get_related_field_for_model(model) or \
+ self.get_reverse_related_field_for_model(model)
+
+
+class BaseModel(type):
+ inheritable_options = ['database']
+
+ def __new__(cls, name, bases, attrs):
+ cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
+
+ attr_dict = {}
+ meta = attrs.pop('Meta', None)
+ if meta:
+ attr_dict = meta.__dict__
+
+ for b in bases:
+ base_meta = getattr(b, '_meta', None)
+ if not base_meta:
+ continue
+
+ for (k, v) in base_meta.__dict__.items():
+ if k in cls.inheritable_options and k not in attr_dict:
+ attr_dict[k] = v
+
+ _meta = BaseModelOptions(cls, attr_dict)
+
+ if not hasattr(_meta, 'db_table'):
+ _meta.db_table = re.sub('[^a-z]+', '_', cls.__name__.lower())
+
+ setattr(cls, '_meta', _meta)
+
+ _meta.pk_name = None
+
+ for name, attr in cls.__dict__.items():
+ if isinstance(attr, Field):
+ attr.add_to_class(cls, name)
+ _meta.fields[attr.name] = attr
+ if isinstance(attr, PrimaryKeyField):
+ _meta.pk_name = attr.name
+
+ if _meta.pk_name is None:
+ _meta.pk_name = 'id'
+ pk = PrimaryKeyField()
+ pk.add_to_class(cls, _meta.pk_name)
+ _meta.fields[_meta.pk_name] = pk
+
+ _meta.model_name = cls.__name__
+
+ if hasattr(cls, '__unicode__'):
+ setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
+ _meta.model_name, self.__unicode__()))
+
+ exception_class = type('%sDoesNotExist' % _meta.model_name, (DoesNotExist,), {})
+ cls.DoesNotExist = exception_class
+
+ return cls
+
+
+class Model(object):
+ __metaclass__ = BaseModel
+
+ def __init__(self, *args, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __eq__(self, other):
+ return other.__class__ == self.__class__ and \
+ self.get_pk() and \
+ other.get_pk() == self.get_pk()
+
+ def get_field_dict(self):
+ def get_field_val(field):
+ field_value = getattr(self, field.name)
+ if not self.get_pk() and field_value is None and field.default is not None:
+ if callable(field.default):
+ field_value = field.default()
+ else:
+ field_value = field.default
+ setattr(self, field.name, field_value)
+ return (field.name, field_value)
+
+ pairs = map(get_field_val, self._meta.fields.values())
+ return dict(pairs)
+
+ @classmethod
+ def create_table(cls):
+ cls._meta.database.create_table(cls)
+
+ for field_name, field_obj in cls._meta.fields.items():
+ if isinstance(field_obj, PrimaryKeyField):
+ cls._meta.database.create_index(cls, field_obj.name, True)
+ elif isinstance(field_obj, ForeignKeyField):
+ cls._meta.database.create_index(cls, field_obj.name)
+ elif field_obj.db_index:
+ cls._meta.database.create_index(cls, field_obj.name)
+
+ @classmethod
+ def drop_table(cls, fail_silently=False):
+ cls._meta.database.drop_table(cls, fail_silently)
+
+ @classmethod
+ def select(cls, query=None):
+ return SelectQuery(cls, query)
+
+ @classmethod
+ def update(cls, **query):
+ return UpdateQuery(cls, **query)
+
+ @classmethod
+ def insert(cls, **query):
+ return InsertQuery(cls, **query)
+
+ @classmethod
+ def delete(cls, **query):
+ return DeleteQuery(cls, **query)
+
+ @classmethod
+ def raw(cls, sql, *params):
+ return RawQuery(cls, sql, *params)
+
+ @classmethod
+ def create(cls, **query):
+ inst = cls(**query)
+ inst.save()
+ return inst
+
+ @classmethod
+ def get_or_create(cls, **query):
+ try:
+ inst = cls.get(**query)
+ except cls.DoesNotExist:
+ inst = cls.create(**query)
+ return inst
+
+ @classmethod
+ def get(cls, *args, **kwargs):
+ query = cls.select().where(*args, **kwargs).paginate(1, 1)
+ try:
+ return query.execute().next()
+ except StopIteration:
+ raise cls.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
+ query.sql()
+ ))
+
+ def get_pk(self):
+ return getattr(self, self._meta.pk_name, None)
+
+ def save(self):
+ field_dict = self.get_field_dict()
+ field_dict.pop(self._meta.pk_name)
+ if self.get_pk():
+ update = self.update(
+ **field_dict
+ ).where(**{self._meta.pk_name: self.get_pk()})
+ update.execute()
+ else:
+ insert = self.insert(**field_dict)
+ new_pk = insert.execute()
+ setattr(self, self._meta.pk_name, new_pk)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/performance_counters.py
--- a/yt/utilities/performance_counters.py
+++ b/yt/utilities/performance_counters.py
@@ -125,8 +125,8 @@
def write_out(self, filename_prefix):
if ytcfg.getboolean("yt","__parallel"):
pfn = "%s_%03i_%03i" % (filename_prefix,
- ytcfg.getint("yt", "__parallel_rank"),
- ytcfg.getint("yt", "__parallel_size"))
+ ytcfg.getint("yt", "__global_parallel_rank"),
+ ytcfg.getint("yt", "__global_parallel_size"))
else:
pfn = "%s" % (filename_prefix)
for n, p in sorted(self.profilers.items()):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -13,7 +13,7 @@
cross_section_thompson_cgs = 6.65e-25 # cm^2
# Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19 Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19 Coulombs
# Physical Constants
boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -53,8 +53,8 @@
def rpdb_excepthook(exc_type, exc, tb):
traceback.print_exception(exc_type, exc, tb)
- task = ytcfg.getint("yt", "__parallel_rank")
- size = ytcfg.getint("yt", "__parallel_size")
+ task = ytcfg.getint("yt", "__global_parallel_rank")
+ size = ytcfg.getint("yt", "__global_parallel_size")
print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
% (task,task)
handler = pdb_handler(tb)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -30,6 +30,7 @@
axis_names
import _MPL
import numpy as na
+import weakref
class FixedResolutionBuffer(object):
def __init__(self, data_source, bounds, buff_size, antialias = True,
@@ -96,6 +97,10 @@
self.axis = data_source.axis
self.periodic = periodic
+ h = getattr(data_source, "hierarchy", None)
+ if h is not None:
+ h.plots.append(weakref.proxy(self))
+
# Handle periodicity, just in case
if self.data_source.axis < 3:
DLE = self.pf.domain_left_edge
@@ -273,6 +278,17 @@
else: data=self[field]
numdisplay.display(data)
+ @property
+ def limits(self):
+ rv = dict(x = None, y = None, z = None)
+ xax = x_dict[self.axis]
+ yax = y_dict[self.axis]
+ xn = axis_names[xax]
+ yn = axis_names[yax]
+ rv[xn] = (self.bounds[0], self.bounds[1])
+ rv[yn] = (self.bounds[2], self.bounds[3])
+ return rv
+
class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
"""
This object is a subclass of :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer`
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -50,8 +50,14 @@
pass
def convert_to_pixels(self, plot, coord, offset = True):
- x0, x1 = plot.xlim
- y0, y1 = plot.ylim
+ if plot.xlim is not None:
+ x0, x1 = plot.xlim
+ else:
+ x0, x1 = plot._axes.get_xlim()
+ if plot.ylim is not None:
+ y0, y1 = plot.ylim
+ else:
+ y0, y1 = plot._axes.get_ylim()
l, b, width, height = mpl_get_bounds(plot._axes.bbox)
dx = width / (x1-x0)
dy = height / (y1-y0)
@@ -473,7 +479,7 @@
class ImageLineCallback(LinePlotCallback):
_type_name = "image_line"
- def __init__(self, p1, p2, plot_args = None):
+ def __init__(self, p1, p2, data_coords=False, plot_args = None):
"""
Plot from *p1* to *p2* (image plane coordinates)
with *plot_args* fed into the plot.
@@ -484,19 +490,27 @@
if plot_args is None: plot_args = {}
self.plot_args = plot_args
self._ids = []
+ self.data_coords = data_coords
def __call__(self, plot):
# We manually clear out any previous calls to this callback:
plot._axes.lines = [l for l in plot._axes.lines if id(l) not in self._ids]
- p1 = self.convert_to_pixels(plot, self.p1)
- p2 = self.convert_to_pixels(plot, self.p2)
+ kwargs = self.plot_args.copy()
+ if self.data_coords and len(plot.image._A.shape) == 2:
+ p1 = self.convert_to_pixels(plot, self.p1)
+ p2 = self.convert_to_pixels(plot, self.p2)
+ else:
+ p1, p2 = self.p1, self.p2
+ if not self.data_coords:
+ kwargs["transform"] = plot._axes.transAxes
+
px, py = (p1[0], p2[0]), (p1[1], p2[1])
# Save state
xx0, xx1 = plot._axes.get_xlim()
yy0, yy1 = plot._axes.get_ylim()
plot._axes.hold(True)
- ii = plot._axes.plot(px, py, **self.plot_args)
+ ii = plot._axes.plot(px, py, **kwargs)
self._ids.append(id(ii[0]))
# Reset state
plot._axes.set_xlim(xx0,xx1)
@@ -905,7 +919,7 @@
class TextLabelCallback(PlotCallback):
_type_name = "text"
- def __init__(self, pos, text, data_coords=False,text_args = None):
+ def __init__(self, pos, text, data_coords=False, text_args = None):
"""
Accepts a position in (0..1, 0..1) of the image, some text and
optionally some text arguments. If data_coords is True,
@@ -918,16 +932,18 @@
self.text_args = text_args
def __call__(self, plot):
- if self.data_coords:
+ kwargs = self.text_args.copy()
+ if self.data_coords and len(plot.image._A.shape) == 2:
if len(self.pos) == 3:
pos = (self.pos[x_dict[plot.data.axis]],
self.pos[y_dict[plot.data.axis]])
else: pos = self.pos
x,y = self.convert_to_pixels(plot, pos)
else:
- x = plot.image._A.shape[0] * self.pos[0]
- y = plot.image._A.shape[1] * self.pos[1]
- plot._axes.text(x, y, self.text, **self.text_args)
+ x, y = self.pos
+ if not self.data_coords:
+ kwargs["transform"] = plot._axes.transAxes
+ plot._axes.text(x, y, self.text, **kwargs)
class ParticleCallback(PlotCallback):
_type_name = "particles"
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -381,8 +381,12 @@
self[self.axis_names["Z"]].max())
if self.log_field:
bI = na.where(buff > 0)
- newmin = na.nanmin(buff[bI])
- newmax = na.nanmax(buff[bI])
+ if len(bI[0]) == 0:
+ newmin = 1e-99
+ newmax = 1e-99
+ else:
+ newmin = na.nanmin(buff[bI])
+ newmax = na.nanmax(buff[bI])
else:
newmin = na.nanmin(buff)
newmax = na.nanmax(buff)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -355,7 +355,7 @@
def get_ticks(self, mi, ma, height = 400, take_log = False):
# This will eventually change to work with non-logged fields
ticks = []
- if take_log:
+ if take_log and mi > 0.0 and ma > 0.0:
ll = LogLocator()
tick_locs = ll(mi, ma)
mi = na.log10(mi)
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -99,6 +99,7 @@
zfield='x-velocity', volume=None,
dx=None, length=None, direction=1,
get_magnitude=False):
+ ParallelAnalysisInterface.__init__(self)
self.pf = pf
self.start_positions = na.array(positions)
self.N = self.start_positions.shape[0]
@@ -124,8 +125,8 @@
self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
def integrate_through_volume(self):
- nprocs = self._mpi_get_size()
- my_rank = self._mpi_get_rank()
+ nprocs = self.comm.size
+ my_rank = self.comm.rank
self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
pbar = get_pbar("Streamlining", self.N)
@@ -144,8 +145,8 @@
@parallel_passthrough
def _finalize_parallel(self,data):
- self.streamlines = self._mpi_allsum(self.streamlines)
- self.magnitudes = self._mpi_allsum(self.magnitudes)
+ self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
+ self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
def _integrate_through_brick(self, node, stream, step,
periodic=False, mag=None):
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -35,10 +35,9 @@
from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
TransferFunctionProxy
from grid_partitioner import HomogenizedVolume, \
- HomogenizedBrickCollection, \
export_partitioned_grids, \
import_partitioned_grids
from image_handling import export_rgba, import_rgba, \
plot_channel, plot_rgb
-from software_sampler import VolumeRendering
-from camera import Camera, PerspectiveCamera, StereoPairCamera
+from camera import Camera, PerspectiveCamera, StereoPairCamera, \
+ off_axis_projection
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -87,7 +87,7 @@
the volume rendering mechanism.
sub_samples : int, optional
The number of samples to take inside every cell per ray.
- pf : `~yt.lagos.StaticOutput`
+ pf : `~yt.data_objects.api.StaticOutput`
For now, this is a require parameter! But in the future it will become
optional. This is the parameter file to volume render.
use_kd: bool, optional
@@ -180,6 +180,7 @@
>>> image = cam.snapshot(fn='my_rendering.png')
"""
+ ParallelAnalysisInterface.__init__(self)
if pf is not None: self.pf = pf
if not iterable(resolution):
resolution = (resolution, resolution)
@@ -356,7 +357,7 @@
pbar.update(total_cells)
pbar.finish()
- if self._mpi_get_rank() is 0 and fn is not None:
+ if self.comm.rank is 0 and fn is not None:
if clip_ratio is not None:
write_bitmap(image, fn, clip_ratio*image.std())
else:
@@ -413,7 +414,7 @@
self.zoom(f)
yield self.snapshot()
- def move_to(self, final, n_steps, final_width=None, yield_snapshots=True):
+ def move_to(self, final, n_steps, final_width=None, exponential=True):
r"""Loop over a look_at
This will yield `n_steps` snapshots until the current view has been
@@ -428,6 +429,9 @@
final_width: float or array_like, optional
Specifies the final width after `n_steps`. Useful for
moving and zooming at the same time.
+ exponential : boolean
+ Specifies whether the move/zoom transition follows an
+ exponential path toward the destination or linear
Examples
--------
@@ -437,17 +441,29 @@
"""
self.center = na.array(self.center)
dW = None
- if final_width is not None:
- if not iterable(final_width):
- final_width = na.array([final_width, final_width, final_width]) # front/back, left/right, top/bottom
- dW = (1.0*final_width-na.array(self.width))/n_steps
- dx = (na.array(final)-self.center)*1.0/n_steps
+ if exponential:
+ if final_width is not None:
+ if not iterable(final_width):
+ width = na.array([final_width, final_width, final_width])
+ # front/back, left/right, top/bottom
+ final_zoom = final_width/na.array(self.width)
+ dW = final_zoom**(1.0/n_steps)
+ position_diff = (na.array(final)/self.center)*1.0
+ dx = position_diff**(1.0/n_steps)
+ else:
+ if final_width is not None:
+ if not iterable(final_width):
+ width = na.array([final_width, final_width, final_width])
+ # front/back, left/right, top/bottom
+ dW = (1.0*final_width-na.array(self.width))/n_steps
+ dx = (na.array(final)-self.center)*1.0/n_steps
for i in xrange(n_steps):
- self.switch_view(center=self.center+dx, width=self.width+dW)
- if yield_snapshots:
- yield self.snapshot()
+ if exponential:
+ self.switch_view(center=self.center*dx, width=self.width*dW)
else:
- yield self
+ self.switch_view(center=self.center+dx, width=self.width+dW)
+ yield self.snapshot()
+
def rotate(self, theta, rot_vector=None):
r"""Rotate by a given angle
@@ -622,7 +638,8 @@
transfer_function = None, fields = None,
sub_samples = 5, log_fields = None, volume = None,
pf = None, use_kd=True, no_ghost=False):
- if pf is not None: self.pf = pf
+ ParallelAnalysisInterface.__init__(self)
+ if pf is not None: self.pf = pf
self.center = na.array(center, dtype='float64')
self.radius = radius
self.nside = nside
@@ -664,7 +681,7 @@
pbar.update(total_cells)
pbar.finish()
- if self._mpi_get_rank() is 0 and fn is not None:
+ if self.comm.rank is 0 and fn is not None:
# This assumes Density; this is a relatively safe assumption.
import matplotlib.figure
import matplotlib.backends.backend_agg
@@ -691,7 +708,8 @@
sub_samples = 5, log_fields = None, volume = None,
pf = None, use_kd=True, no_ghost=False,
rays_per_cell = 0.1, max_nside = 8192):
- if pf is not None: self.pf = pf
+ ParallelAnalysisInterface.__init__(self)
+ if pf is not None: self.pf = pf
self.center = na.array(center, dtype='float64')
self.radius = radius
self.use_kd = use_kd
@@ -732,9 +750,10 @@
self.center -= 1e-2 * min_dx
ray_source = AdaptiveRaySource(self.center, self.rays_per_cell,
self.initial_nside, self.radius,
- bricks, self.max_nside)
+ bricks, left_edges, right_edges, self.max_nside)
for i,brick in enumerate(bricks):
- ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges)
+ ray_source.integrate_brick(brick, tfp, i, left_edges, right_edges,
+ bricks)
total_cells += na.prod(brick.my_data[0].shape)
pbar.update(total_cells)
pbar.finish()
@@ -743,7 +762,8 @@
class StereoPairCamera(Camera):
def __init__(self, original_camera, relative_separation = 0.005):
- self.original_camera = original_camera
+ ParallelAnalysisInterface.__init__(self)
+ self.original_camera = original_camera
self.relative_separation = relative_separation
def split(self):
@@ -763,3 +783,74 @@
oc.volume, oc.fields, oc.log_fields,
oc.sub_samples, oc.pf)
return (left_camera, right_camera)
+
+def off_axis_projection(pf, center, normal_vector, width, resolution,
+ field, weight = None, volume = None):
+ r"""Project through a parameter file, off-axis, and return the image plane.
+
+ This function will accept the necessary items to integrate through a volume
+ at an arbitrary angle and return the integrated field of view to the user.
+ Note that if a weight is supplied, it will multiply the pre-interpolated
+ values together, then create cell-centered values, then interpolate within
+ the cell to conduct the integration.
+
+ Parameters
+ ----------
+ pf : `~yt.data_objects.api.StaticOutput`
+ This is the parameter file to volume render.
+ center : array_like
+ The current "center" of the view port -- the focal point for the
+ camera.
+ normal_vector : array_like
+ The vector between the camera position and the center.
+ width : float or list of floats
+ The current width of the image. If a single float, the volume is
+ cubical, but if not, it is front/back, left/right, top/bottom.
+ resolution : int or list of ints
+ The number of pixels in each direction.
+ field : string
+ The field to project through the volume
+ weight : optional, default None
+ If supplied, the field will be pre-multiplied by this, then divided by
+ the integrated value of this field. This returns an average rather
+ than a sum.
+ volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
+ The volume to ray cast through. Can be specified for finer-grained
+ control, but otherwise will be automatically generated.
+
+ Returns
+ -------
+ image : array
+ An (N,N) array of the final integrated values, in float64 form.
+
+ Examples
+ --------
+
+ >>> image = off_axis_projection(pf, [0.5, 0.5, 0.5], [0.2,0.3,0.4],
+ 0.2, N, "Temperature", "Density")
+ >>> write_image(na.log10(image), "offaxis.png")
+
+ """
+ # We manually modify the ProjectionTransferFunction to get it to work the
+ # way we want, with a second field that's also passed through.
+ fields = [field]
+ if weight is not None:
+ # This is a temporary field, which we will remove at the end.
+ pf.field_info.add_field("temp_weightfield",
+ function=lambda a,b:b[field]*b[weight])
+ fields = ["temp_weightfield", weight]
+ tf = ProjectionTransferFunction(n_fields = 2)
+ tf = ProjectionTransferFunction(n_fields = len(fields))
+ cam = pf.h.camera(center, normal_vector, width, resolution, tf,
+ fields = fields,
+ log_fields = [False] * len(fields),
+ volume = volume)
+ vals = cam.snapshot()
+ image = vals[:,:,0]
+ if weight is None:
+ dl = width * pf.units[pf.field_info[field].projection_conversion]
+ image *= dl
+ else:
+ image /= vals[:,:,1]
+ pf.field_info._field_list.pop("temp_weightfield")
+ return image
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -29,8 +29,6 @@
from yt.utilities.amr_utils import PartitionedGrid, ProtoPrism, GridFace, \
grid_points_in_volume, find_grids_in_inclined_box
-from yt.utilities.parallel_tools.distributed_object_collection import \
- DistributedObjectCollection
from yt.utilities.parallel_tools.parallel_analysis_interface import \
ParallelAnalysisInterface, parallel_root_only
@@ -45,6 +43,7 @@
def __init__(self, fields = "Density", source = None, pf = None,
log_fields = None, no_ghost = False):
# Typically, initialized as hanging off a hierarchy. But, not always.
+ ParallelAnalysisInterface.__init__(self)
self.no_ghost = no_ghost
if pf is not None: self.pf = pf
if source is None: source = self.pf.h.all_data()
@@ -101,13 +100,12 @@
" not yet supported")
if self.bricks is not None and source is None: return
bricks = []
- self._preload(self.source._grids, self.fields, self.pf.h.io)
+ self.comm.preload(self.source._grids, self.fields, self.pf.h.io)
pbar = get_pbar("Partitioning ", len(self.source._grids))
for i, g in enumerate(self.source._grids):
pbar.update(i)
bricks += self._partition_grid(g)
pbar.finish()
- bricks = na.array(bricks, dtype='object')
self.initialize_bricks(bricks)
def initialize_bricks(self, bricks):
@@ -120,14 +118,15 @@
self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
self.brick_parents = na.zeros( NB, dtype='int64')
self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
+ self.bricks = na.empty(len(bricks), dtype='object')
for i,b in enumerate(bricks):
self.brick_left_edges[i,:] = b.LeftEdge
self.brick_right_edges[i,:] = b.RightEdge
self.brick_parents[i] = b.parent_grid_id
self.brick_dimensions[i,:] = b.my_data[0].shape
+ self.bricks[i] = b
# Vertex-centered means we subtract one from the shape
self.brick_dimensions -= 1
- self.bricks = na.array(bricks, dtype='object')
def reflect_across_boundaries(self):
mylog.warning("Note that this doesn't fix ghost zones, so there may be artifacts at domain boundaries!")
@@ -201,176 +200,6 @@
def reset_cast(self):
pass
-class HomogenizedBrickCollection(DistributedObjectCollection):
- def __init__(self, source):
- # The idea here is that we have two sources -- the global_domain
- # source, which would be a decomposition of the 3D domain, and a
- # local_domain source, which is the set of bricks we want at the end.
- self.source = source
- self.pf = source.pf
-
- @classmethod
- def load_bricks(self, base_filename):
- pass
-
- def write_my_bricks(self, base_filename):
- pass
-
- def store_bricks(self, base_filename):
- pass
-
- @parallel_root_only
- def write_hierarchy(self, base_filename):
- pass
-
- def _partition_grid(self, grid, fields, log_field = None):
- fields = ensure_list(fields)
- if log_field is None: log_field = [True] * len(fields)
-
- # This is not super efficient, as it re-fills the regions once for each
- # field.
- vcds = []
- for i,field in enumerate(fields):
- vcd = grid.get_vertex_centered_data(field).astype('float64')
- if log_field[i]: vcd = na.log10(vcd)
- vcds.append(vcd)
-
- GF = GridFaces(grid.Children + [grid])
- PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
-
- pgs = []
- for P in PP.sweep(0):
- sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
- if len(sl) == 0: continue
- dd = [d[sl[0][0]:sl[0][1]+1,
- sl[1][0]:sl[1][1]+1,
- sl[2][0]:sl[2][1]+1].copy() for d in vcds]
- pgs.append(PartitionedGrid(grid.id, len(fields), dd,
- P.LeftEdge, P.RightEdge, sl[-1]))
- return pgs
-
- def _partition_local_grids(self, fields = "Density", log_field = None):
- fields = ensure_list(fields)
- bricks = []
- # We preload.
- # UNCOMMENT FOR PARALLELISM
- #grid_list = list(self._get_grid_objs())
- grid_list = list(self.source._grids)
- self._preload(grid_list, fields, self.pf.h.io)
- pbar = get_pbar("Partitioning ", len(grid_list))
- # UNCOMMENT FOR PARALLELISM
- #for i, g in enumerate(self._get_grids()):
- print "THIS MANY GRIDS!", len(grid_list)
- for i, g in enumerate(self.source._grids):
- pbar.update(i)
- bricks += self._partition_grid(g, fields, log_field)
- pbar.finish()
- bricks = na.array(bricks, dtype='object')
- NB = len(bricks)
- # Now we set up our (local for now) hierarchy. Note that to calculate
- # intersection, we only need to do the left edge & right edge.
- #
- # We're going to double up a little bit here in memory.
- self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
- self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
- self.brick_parents = na.zeros( NB, dtype='int64')
- self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
- self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
- self._object_owners = self.brick_owners
- for i,b in enumerate(bricks):
- self.brick_left_edges[i,:] = b.LeftEdge
- self.brick_right_edges[i,:] = b.RightEdge
- self.brick_parents[i] = b.parent_grid_id
- self.brick_dimensions[i,:] = b.my_data[0].shape
- # Vertex-centered means we subtract one from the shape
- self.brick_dimensions -= 1
- self.bricks = na.array(bricks, dtype='object')
- # UNCOMMENT FOR PARALLELISM
- #self.join_lists()
-
- def _get_object_info(self):
- # We transpose here for the catdict operation
- info_dict = dict(left_edges = self.brick_left_edges.transpose(),
- right_edges = self.brick_right_edges.transpose(),
- parents = self.brick_parents,
- owners = self.brick_owners,
- dimensions = self.brick_dimensions.transpose(),)
- return info_dict
-
- def _set_object_info(self, info_dict):
- self.brick_left_edges = info_dict.pop("left_edges").transpose()
- self.brick_right_edges = info_dict.pop("right_edges").transpose()
- self.brick_parents = info_dict.pop("parents")
- self.brick_dimensions = info_dict.pop("dimensions").transpose()
- self.brick_owners = info_dict.pop("owners")
- self._object_owners = self.brick_owners
- bricks = self.bricks
- self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
- # Copy our bricks back in
- self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
-
- def _create_buffer(self, ind_list):
- # Note that we have vertex-centered data, so we add one before taking
- # the prod and the sum
- total_size = (self.brick_dimensions[ind_list,:] + 1).prod(axis=1).sum()
- mylog.debug("Creating buffer for %s bricks (%s)",
- len(ind_list), total_size)
- my_buffer = na.zeros(total_size, dtype='float64')
- return my_buffer
-
- def _pack_buffer(self, ind_list, my_buffer):
- si = 0
- for index in ind_list:
- d = self.bricks[index].my_data.ravel()
- my_buffer[si:si+d.size] = d[:]
- si += d.size
-
- def _unpack_buffer(self, ind_list, my_buffer):
- si = 0
- for index in ind_list:
- pgi = self.brick_parents[index]
- LE = self.brick_left_edges[index,:].copy()
- RE = self.brick_right_edges[index,:].copy()
- dims = self.brick_dimensions[index,:].copy()
- size = (dims + 1).prod()
- data = my_buffer[si:si+size].reshape(dims + 1)
- self.bricks[index] = PartitionedGrid(
- pgi, data, LE, RE, dims)
- si += size
-
- def _wipe_objects(self, indices):
- self.bricks[indices] = None
-
- def _collect_bricks(self, intersection_source):
- if not self._distributed: return
- # This entire routine should instead be set up to do:
- # alltoall broadcast of the *number* of requested bricks
- # non-blocking receives posted for int arrays
- # sizes of data calculated
- # concatenated data receives posted
- # send all data
- # get bricks back
- # This presupposes that we are using the AMRInclinedBox as a data
- # source. If we're not, we ought to be.
- needed_brick_i = find_grids_in_inclined_box(
- intersection_source.box_vectors, intersection_source.center,
- self.brick_left_edges, self.brick_right_edges)
- needed_brick_i = na.where(needed_brick_i)[0]
- self._collect_objects(needed_brick_i)
-
- def _initialize_parallel(self):
- pass
-
- def _finalize_parallel(self):
- pass
-
- def get_brick(self, brick_id):
- pass
-
- @property
- def _grids(self):
- return self.source._grids
-
class GridFaces(object):
def __init__(self, grids):
self.faces = [ [], [], [] ]
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/software_sampler.py
--- a/yt/visualization/volume_rendering/software_sampler.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2009 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import h5py
-import numpy as na
-
-from yt.funcs import *
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-from yt.visualization.volume_rendering.grid_partitioner import \
- HomogenizedBrickCollection
-
-# We're going to register this class, but it does not directly inherit from
-# AMRData.
-class VolumeRendering(ParallelAnalysisInterface):
- bricks = None
- def __init__(self, normal_vector, width, center,
- resolution, transfer_function,
- fields = None, whole_box = False,
- sub_samples = 5, north_vector = None,
- pf = None):
- # Now we replicate some of the 'cutting plane' logic
- if not iterable(resolution):
- resolution = (resolution, resolution)
- self.resolution = resolution
- self.sub_samples = sub_samples
- if not iterable(width):
- width = (width, width, width) # front/back, left/right, top/bottom
- self.width = width
- self.center = center
- if fields is None: fields = ["Density"]
- self.fields = fields
- self.transfer_function = transfer_function
-
- # Now we set up our various vectors
- normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
- if north_vector is None:
- vecs = na.identity(3)
- t = na.cross(normal_vector, vecs).sum(axis=1)
- ax = t.argmax()
- north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
- north_vector /= na.sqrt(na.dot(north_vector, north_vector))
- east_vector = -na.cross(north_vector, normal_vector).ravel()
- east_vector /= na.sqrt(na.dot(east_vector, east_vector))
- self.unit_vectors = [north_vector, east_vector, normal_vector]
- self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
- self.unit_vectors[1]*self.width[1],
- self.unit_vectors[2]*self.width[2]])
-
- self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
- - 0.5*width[1]*self.unit_vectors[1] \
- - 0.5*width[2]*self.unit_vectors[2]
- self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
- self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
-
- self._initialize_source()
- self._construct_vector_array()
-
- def _initialize_source(self):
- check, source, rf = self._partition_hierarchy_2d_inclined(
- self.unit_vectors, self.origin, self.width, self.box_vectors)
- if check:
- self._base_source = self.pf.h.inclined_box(
- self.origin, self.box_vectors)
- else:
- # To avoid doubling-up
- self._base_source = source
- self.source = source
- self.res_fac = rf
- # Note that if we want to do this in parallel, with 3D domain decomp
- # for the grid/bricks, we can supply self._base_source here. But,
- # _distributed can't be overridden in that case.
- self._brick_collection = HomogenizedBrickCollection(self.source)
-
- def ray_cast(self, finalize=True):
- if self.bricks is None: self.partition_grids()
- # Now we order our bricks
- total_cells, LE, RE = 0, [], []
- for b in self.bricks:
- LE.append(b.LeftEdge)
- RE.append(b.RightEdge)
- total_cells += na.prod(b.my_data[0].shape)
- LE = na.array(LE) - self.back_center
- RE = na.array(RE) - self.back_center
- LE = na.sum(LE * self.unit_vectors[2], axis=1)
- RE = na.sum(RE * self.unit_vectors[2], axis=1)
- dist = na.minimum(LE, RE)
- ind = na.argsort(dist)
- pbar = get_pbar("Ray casting ", total_cells)
- total_cells = 0
- tfp = TransferFunctionProxy(self.transfer_function)
- tfp.ns = self.sub_samples
- for i, b in enumerate(self.bricks[ind]):
- pos = b.cast_plane(tfp, self.vector_plane)
- total_cells += na.prod(b.my_data[0].shape)
- pbar.update(total_cells)
- pbar.finish()
- if finalize: self._finalize()
-
- def _finalize(self):
- #im = self._mpi_catdict(dict(image=self.image)).pop('image')
- im, f = self._mpi_catrgb((self.image, self.resolution))
- self.image = im
-
- def dump_image(self, prefix):
- fn = "%s.h5" % (self._get_filename(prefix))
- mylog.info("Saving to %s", fn)
- f = h5py.File(fn, "w")
- f.create_dataset("/image", data=self.image)
-
- def load_bricks(self, fn):
- self.bricks = import_partitioned_grids(fn)
-
- def save_bricks(self, fn):
- # This will need to be modified for parallel
- export_partitioned_grids(self.bricks, fn)
-
- def save_image(self, prefix = None, norm = 1.0):
- if norm is not None:
- mi, ma = self.image.min(), norm*self.image.max()
- print "Normalizing with ", mi, ma
- image = (na.clip(self.image, mi, ma) - mi)/(ma - mi)
- else:
- image = self.image
- if prefix is None: prefix = "%s_volume_rendering" % (self.pf)
- plot_rgb(image, prefix)
-
- def partition_grids(self):
- log_field = []
- for field in self.fields:
- log_field.append(field in self.pf.field_info and
- self.pf.field_info[field].take_log)
- self._brick_collection._partition_local_grids(self.fields, log_field)
- # UNCOMMENT FOR PARALLELISM
- #self._brick_collection._collect_bricks(self.source)
- self.bricks = self._brick_collection.bricks
-
- def _construct_vector_array(self):
- rx = self.resolution[0] * self.res_fac[0]
- ry = self.resolution[1] * self.res_fac[1]
- # We should move away from pre-generation of vectors like this and into
- # the usage of on-the-fly generation in the VolumeIntegrator module
- self.image = na.zeros((rx,ry,3), dtype='float64', order='C')
- # We might have a different width and back_center
- bl = self.source.box_lengths
- px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
- py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
- inv_mat = self.source._inv_mat
- bc = self.source.origin + 0.5*self.source.box_vectors[0] \
- + 0.5*self.source.box_vectors[1]
- vectors = na.zeros((rx, ry, 3),
- dtype='float64', order='C')
- vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
- vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
- vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
- bounds = (px.min(), px.max(), py.min(), py.max())
- self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
- bc, bounds, self.image,
- self.source._x_vec, self.source._y_vec)
- self.vp_bounds = bounds
- self.vectors = vectors
-
-data_object_registry["volume_rendering"] = VolumeRendering
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/transfer_function_widget.py
--- a/yt/visualization/volume_rendering/transfer_function_widget.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""
-Simple transfer function editor
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2009 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import numpy as na
-import cPickle
-from transfer_functions import ColorTransferFunction
-
-from enthought.traits.api import \
- HasTraits, Float, List, Instance, Button, Array, CArray, Range, \
- DelegatesTo, Property, Any, Code, Callable
-from enthought.traits.ui.api import \
- View, Item, HSplit, VSplit, ListEditor, InstanceEditor, ValueEditor, \
- HGroup, VGroup, CodeEditor, TextEditor, RangeEditor
-from enthought.chaco.api import Plot, ArrayPlotData
-from enthought.enable.component_editor import ComponentEditor
-import enthought.pyface.api as pyface
-
-class TFGaussian(HasTraits):
- center = Range(low = 'left_edge',
- high = 'right_edge')
- left_edge = DelegatesTo('tf')
- right_edge = DelegatesTo('tf')
-
- tf = Any
-
- width = Property
- rwidth = Range(0.0, 0.5, 0.05)
-
- red = Range(0.0, 1.0, 0.5)
- green = Range(0.0, 1.0, 0.5)
- blue = Range(0.0, 1.0, 0.5)
- alpha = Range(0.0, 1.0, 1.0)
-
- traits_view = View(VGroup(
- HGroup(
- Item('center', editor=RangeEditor(format='%0.4f')),
- Item('rwidth', label='Width',
- editor=RangeEditor(format='%0.4f')),
- ),
- HGroup(
- Item('red', editor=RangeEditor(format='%0.4f')),
- Item('green', editor=RangeEditor(format='%0.4f')),
- Item('blue', editor=RangeEditor(format='%0.4f')),
- Item('alpha', editor=RangeEditor(format='%0.4f'))
- ),
- show_border=True,),
- )
-
- def _get_width(self):
- width = self.rwidth * (self.tf.right_edge - self.tf.left_edge)
- return width
-
- def _center_default(self):
- return (self.left_edge + self.right_edge)/2.0
-
- def _width_default(self):
- return (self.right_edge - self.left_edge)/20.0
-
- def _red_changed(self):
- self.tf._redraw()
-
- def _green_changed(self):
- self.tf._redraw()
-
- def _blue_changed(self):
- self.tf._redraw()
-
- def _alpha_changed(self):
- self.tf._redraw()
-
- def _center_changed(self):
- self.tf._redraw()
-
- def _height_changed(self):
- self.tf._redraw()
-
- def _rwidth_changed(self):
- self.tf._redraw()
-
-class TFColors(HasTraits):
- gaussians = List(Instance(TFGaussian))
- transfer_function = Instance(ColorTransferFunction)
-
- left_edge = Float(0.0)
- right_edge = Float(10.0)
-
- add_gaussian = Button
- run_routine = Button
- save_function = Button
-
- routine = Callable
-
- plot_data = Instance(ArrayPlotData)
- image_data = Instance(ArrayPlotData)
- vr_image_data = Instance(ArrayPlotData)
-
- plot = Instance(Plot)
- image_plot = Instance(Plot)
- vr_image_plot = Instance(Plot)
-
- traits_view = View(VGroup(
- HGroup(
- VGroup(
- Item('image_plot', editor=ComponentEditor(),
- show_label=False, resizable=True),
- Item('plot', editor=ComponentEditor(),
- show_label=False, resizable=True),
- ),
- Item('vr_image_plot', editor=ComponentEditor(size=(512,512)),
- show_label=False, resizable=False,
- width=512, height=512)),
- Item("gaussians", style='custom',
- editor=ListEditor(style='custom'),
- show_label=False,
- ),
- HGroup(Item("left_edge"), Item("right_edge")),
- HGroup(Item("add_gaussian", show_label = False),
- Item("run_routine", show_label = False),
- Item("save_function", show_label = False),
- ),
- ),
- width=960, height=800,
- resizable=True)
-
- def _plot_data_default(self):
- return ArrayPlotData(rx = (0.0, 1.0), ry = (0.0, 0.0),
- gx = (0.0, 1.0), gy = (0.0, 0.0),
- bx = (0.0, 1.0), by = (0.0, 0.0),
- ax = (0.0, 1.0), ay = (0.0, 0.0),
- lx = (0.0, 1.0), ly = (0.0, 0.0),
- ux = (0.0, 1.0), uy = (1.0, 1.0))
-
- def _image_data_default(self):
- return ArrayPlotData(image_data = na.zeros((40,256,4), dtype='uint8'))
-
- def _vr_image_data_default(self):
- return ArrayPlotData(vr_image_data = na.zeros((512,512,3), dtype='uint8'))
-
- def _plot_default(self):
- p = Plot(self.plot_data)
- p.plot( ("rx", "ry"), type='line', color='red')
- p.plot( ("gx", "gy"), type='line', color='green')
- p.plot( ("bx", "by"), type='line', color='blue')
- p.plot( ("ax", "ay"), type='line', color='black')
- p.plot( ("lx", "ly"), type='line', color='black')
- p.plot( ("ux", "uy"), type='line', color='black')
- return p
-
- def _image_plot_default(self):
- plot = Plot(self.image_data, default_origin="top left")
- #plot.x_axis.orientation = "top"
- img_plot = plot.img_plot("image_data")[0]
-
- plot.bgcolor = "black"
- return plot
-
- def _vr_image_plot_default(self):
- plot = Plot(self.vr_image_data, default_origin="top left",
- size=(512,512))
- plot.aspect_ratio = 1.0
- #plot.x_axis.orientation = "top"
- img_plot = plot.img_plot("vr_image_data")[0]
-
- plot.bgcolor = "black"
- return plot
-
- def _add_gaussian_fired(self):
- self.gaussians.append(TFGaussian(tf = self))
-
- def _redraw(self):
- self.transfer_function = ColorTransferFunction(
- (self.left_edge, self.right_edge))
- for g in self.gaussians:
- self.transfer_function.add_gaussian(g.center, g.width,
- (g.red, g.green, g.blue, g.alpha))
- for f, c in zip(self.transfer_function.funcs, "rgba"):
- self.plot_data["%sx" % c] = f.x
- self.plot_data["%sy" % c] = f.y
-
- # Now we update the image describing the colors
- # This makes the assumption that all the x values are the same
- image = na.zeros((40, self.transfer_function.nbins, 4), dtype='uint8')
- for i,f in enumerate(self.transfer_function.funcs):
- image[:,:,i] = (f.y[None,:] * 255).astype('uint8')
- self.image_data["image_data"] = image
-
- def _run_routine_fired(self):
- img_data = self.routine(self.transfer_function)
- self.vr_image_data['vr_image_data'] = img_data
-
- def _save_function_fired(self):
- self._redraw()
- dlg = pyface.FileDialog(
- action='save as',
- wildcard="*.ctf",
- )
- if dlg.open() == pyface.OK:
- print "Saving:", dlg.path
- tf = self.transfer_function
- f = open(dlg.path, "wb")
- cPickle.dump(tf, f)
-
-if __name__ == "__main__":
- tfc = TFColors()
- tfc.configure_traits()
diff -r c894d8fce6b3f3c3773da898db2e56449daa9764 -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -623,7 +623,7 @@
return image
class ProjectionTransferFunction(MultiVariateTransferFunction):
- def __init__(self, x_bounds = (-1e60, 1e60)):
+ def __init__(self, x_bounds = (-1e60, 1e60), n_fields = 1):
r"""A transfer function that defines a simple projection.
To generate an interpolated, off-axis projection through a dataset,
@@ -634,9 +634,11 @@
Parameters
----------
- x_boudns : tuple of floats, optional
+ x_bounds : tuple of floats, optional
If any of your values lie outside this range, they will be
truncated.
+ n_fields : int, optional
+ How many fields we're going to project and pass through
Notes
-----
@@ -644,18 +646,18 @@
logging of fields.
"""
+ if n_fields > 3:
+ raise NotImplementedError
MultiVariateTransferFunction.__init__(self)
self.x_bounds = x_bounds
self.nbins = 2
self.linear_mapping = TransferFunction(x_bounds, 2)
self.linear_mapping.pass_through = 1
- self.add_field_table(self.linear_mapping, 0)
- self.alpha = TransferFunction(x_bounds, 2)
- self.alpha.y *= 0.0
- self.alpha.y += 1.0
- self.add_field_table(self.alpha, 0)
- self.link_channels(0, [0,1,2]) # same emission for all rgb
- self.link_channels(2, [3,4,5]) # this will remove absorption
+ self.link_channels(0, [0,1,2]) # same emission for all rgb, default
+ for i in range(n_fields):
+ self.add_field_table(self.linear_mapping, i)
+ self.link_channels(i, i)
+ self.link_channels(n_fields, [3,4,5]) # this will remove absorption
class PlanckTransferFunction(MultiVariateTransferFunction):
def __init__(self, T_bounds, rho_bounds, nbins=256,
https://bitbucket.org/yt_analysis/yt/changeset/8521b7830239/
changeset: 8521b7830239
branch: yt
user: MatthewTurk
date: 2011-11-09 19:37:16
summary: Merging from yt tip
affected #: 43 files
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -268,12 +268,21 @@
cd ..
}
+if type -P wget &>/dev/null
+then
+ echo "Using wget"
+ export GETFILE="wget -nv"
+else
+ echo "Using curl"
+ export GETFILE="curl -sSO"
+fi
+
function get_enzotools
{
echo "Downloading $1 from yt-project.org"
[ -e $1 ] && return
- wget -nv "http://yt-project.org/dependencies/$1" || do_exit
- wget -nv "http://yt-project.org/dependencies/$1.md5" || do_exit
+ ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+ ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
( which md5sum &> /dev/null ) || return # return if we don't have md5sum
( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
}
@@ -304,7 +313,7 @@
get_enzotools Python-2.7.2.tgz
get_enzotools numpy-1.6.1.tar.gz
get_enzotools matplotlib-1.1.0.tar.gz
-get_enzotools mercurial-1.8.1.tar.gz
+get_enzotools mercurial-2.0.tar.gz
get_enzotools ipython-0.10.tar.gz
get_enzotools h5py-2.0.1.tar.gz
get_enzotools Cython-0.15.1.tar.gz
@@ -442,7 +451,7 @@
if [ $INST_HG -eq 1 ]
then
echo "Installing Mercurial."
- do_setup_py mercurial-1.8.1
+ do_setup_py mercurial-2.0
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
@@ -553,6 +562,7 @@
then
( unzip -o ext-3.3.2.zip 2>&1 ) 1>> ${LOG_FILE} || do_exit
( echo "Symlinking ext-3.3.2 as ext-resources" 2>&1 ) 1>> ${LOG_FILE}
+ rm -rf ext-resources
ln -sf ext-3.3.2 ext-resources
touch ext-3.3.2/done
fi
@@ -562,6 +572,7 @@
then
( unzip -o ext-slate-110328.zip 2>&1 ) 1>> ${LOG_FILE} || do_exit
( echo "Symlinking ext-slate-110328 as ext-theme" 2>&1 ) 1>> ${LOG_FILE}
+ rm -rf ext-theme
ln -sf ext-slate-110328 ext-theme
touch ext-slate-110328/done
fi
@@ -571,6 +582,7 @@
then
( unzip -o PhiloGL-1.4.2.zip 2>&1 ) 1>> ${LOG_FILE} || do_exit
( echo "Symlinking PhiloGL-1.4.2 as PhiloGL" 2>&1 ) 1>> ${LOG_FILE}
+ rm -rf PhiloGL
ln -sf PhiloGL-1.4.2 PhiloGL
touch PhiloGL-1.4.2/done
fi
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,15 +20,10 @@
try:
import Cython
except ImportError as e:
- print "Received error on importing Cython:"
- print e
- print "Now attempting to install Cython"
- import pip
- rv = pip.main(["install",
- "http://yt-project.org/dependencies/Cython-latest.tar.gz"])
- if rv == 1:
- print "Unable to install Cython. Please report this bug to yt-users."
- sys.exit(1)
+ print "Cython is a build-time requirement for the source tree of yt."
+ print "Please either install yt from a provided, release tarball,"
+ print "or install Cython (version 0.15 or higher)."
+ sys.exit(1)
######
# This next bit comes from Matthew Brett, to get Cython working with NumPy
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 tests/boolean_regions.py
--- /dev/null
+++ b/tests/boolean_regions.py
@@ -0,0 +1,18 @@
+from yt.utilities.answer_testing.output_tests import \
+ SingleOutputTest, create_test
+from yt.utilities.answer_testing.boolean_region_tests import \
+ TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
+ TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
+ TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
+
+create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
+
+create_test(TestBooleanORGridQuantity, "BooleanORGrid")
+
+create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
+
+create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
+
+create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
+
+create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")
\ No newline at end of file
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1376,7 +1376,8 @@
["ParticleMassMsun", "particle_index"]
def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
- period, threshold=160.0, dm_only=True, rearrange=True, premerge=True):
+ period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
+ tree = 'F'):
"""
Run hop on *data_source* with a given density *threshold*. If
*dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1393,6 +1394,7 @@
self.period = na.array([1.]*3)
self._data_source = data_source
self.premerge = premerge
+ self.tree = tree
mylog.info("Initializing HOP")
HaloList.__init__(self, data_source, dm_only)
@@ -1421,7 +1423,8 @@
obj = ParallelHOPHaloFinder(self.period, self.padding,
self.num_neighbors, self.bounds,
self.particle_fields,
- self.threshold, rearrange=self.rearrange, premerge=self.premerge)
+ self.threshold, rearrange=self.rearrange, premerge=self.premerge,
+ tree = self.tree)
self.densities, self.tags = obj.density, obj.chainID
# I'm going to go ahead and delete self.densities because it's not
# actually being used. I'm not going to remove it altogether because
@@ -1749,6 +1752,7 @@
for halo in self._groups:
if not self.comm.is_mine(halo): continue
halo.write_particle_list(f)
+ f.close()
def dump(self, basename="HopAnalysis"):
r"""Save the full halo data to disk.
@@ -1780,7 +1784,7 @@
def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
resize=True, rearrange=True,\
fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
- total_mass=None, num_particles=None):
+ total_mass=None, num_particles=None, tree = 'F'):
r"""Parallel HOP halo finder.
Halos are built by:
@@ -1810,9 +1814,16 @@
Default = False.
resize : bool
Turns load-balancing on or off. Default = True.
+ kdtree : string
+ Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
+ faster, but uses more memory. The Cython one (kdtree = 'C') is
+ slower but is more memory efficient.
+ Default = 'F'
rearrange : bool
Turns on faster nearest neighbor searches at the cost of increased
- memory usage. Default = True.
+ memory usage.
+ This option only applies when using the Fortran tree.
+ Default = True.
fancy_padding : bool
True calculates padding independently for each face of each
subvolume. Default = True.
@@ -1862,6 +1873,9 @@
self.num_neighbors = 65
self.safety = safety
self.sample = sample
+ self.tree = tree
+ if self.tree != 'F' and self.tree != 'C':
+ mylog.error("No kD Tree specified!")
period = pf.domain_right_edge - pf.domain_left_edge
topbounds = na.array([[0., 0., 0.], period])
# Cut up the volume evenly initially, with no padding.
@@ -1969,7 +1983,8 @@
(LE_padding, RE_padding) = self.padding
parallelHOPHaloList.__init__(self, self._data_source, self.padding, \
self.num_neighbors, self.bounds, total_mass, period, \
- threshold=threshold, dm_only=dm_only, rearrange=rearrange, premerge=premerge)
+ threshold=threshold, dm_only=dm_only, rearrange=rearrange, premerge=premerge,
+ tree = self.tree)
self._join_halolists()
yt_counters("Final Grouping")
@@ -2120,6 +2135,7 @@
mass in the entire volume.
Default = None, which means the total mass is automatically
calculated.
+
Examples
--------
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -38,6 +38,8 @@
except ImportError:
mylog.debug("The Fortran kD-Tree did not import correctly.")
+from yt.utilities.spatial import cKDTree
+
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_blocking_call, \
ParallelAnalysisInterface
@@ -45,7 +47,7 @@
class ParallelHOPHaloFinder(ParallelAnalysisInterface):
def __init__(self,period, padding, num_neighbors, bounds,
particle_fields, threshold=160.0, rearrange=True,
- premerge=True):
+ premerge=True, tree='F'):
ParallelAnalysisInterface.__init__(self)
self.threshold = threshold
self.rearrange = rearrange
@@ -64,6 +66,7 @@
self.mass = particle_fields.pop("ParticleMassMsun")
self.padded_particles = []
self.nMerge = 4
+ self.tree = tree
yt_counters("chainHOP")
self.max_mem = 0
self.__max_memory()
@@ -342,34 +345,50 @@
Set up the data objects that get passed to the kD-tree code.
"""
yt_counters("init kd tree")
- # Yes, we really do need to initialize this many arrays.
- # They're deleted in _parallelHOP.
- fKD.dens = na.zeros(self.size, dtype='float64', order='F')
- fKD.mass = na.concatenate((self.mass, self.mass_pad))
- del self.mass
- fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
- # This actually copies the data into the fortran space.
- self.psize = self.xpos.size
- fKD.pos[0, :self.psize] = self.xpos
- fKD.pos[1, :self.psize] = self.ypos
- fKD.pos[2, :self.psize] = self.zpos
- del self.xpos, self.ypos, self.zpos
- gc.collect()
- fKD.pos[0, self.psize:] = self.xpos_pad
- fKD.pos[1, self.psize:] = self.ypos_pad
- fKD.pos[2, self.psize:] = self.zpos_pad
- del self.xpos_pad, self.ypos_pad, self.zpos_pad
- gc.collect()
- fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
- fKD.nn = self.num_neighbors
- # Plus 2 because we're looking for that neighbor, but only keeping
- # nMerge + 1 neighbor tags, skipping ourselves.
- fKD.nMerge = self.nMerge + 2
- fKD.nparts = self.size
- fKD.sort = True # Slower, but needed in _connect_chains
- fKD.rearrange = self.rearrange # True is faster, but uses more memory
- # Now call the fortran.
- create_tree(0)
+ if self.tree == 'F':
+ # Yes, we really do need to initialize this many arrays.
+ # They're deleted in _parallelHOP.
+ fKD.dens = na.zeros(self.size, dtype='float64', order='F')
+ fKD.mass = na.concatenate((self.mass, self.mass_pad))
+ del self.mass
+ fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
+ # This actually copies the data into the fortran space.
+ self.psize = self.xpos.size
+ fKD.pos[0, :self.psize] = self.xpos
+ fKD.pos[1, :self.psize] = self.ypos
+ fKD.pos[2, :self.psize] = self.zpos
+ del self.xpos, self.ypos, self.zpos
+ gc.collect()
+ fKD.pos[0, self.psize:] = self.xpos_pad
+ fKD.pos[1, self.psize:] = self.ypos_pad
+ fKD.pos[2, self.psize:] = self.zpos_pad
+ del self.xpos_pad, self.ypos_pad, self.zpos_pad
+ gc.collect()
+ fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
+ fKD.nn = self.num_neighbors
+ # Plus 2 because we're looking for that neighbor, but only keeping
+ # nMerge + 1 neighbor tags, skipping ourselves.
+ fKD.nMerge = self.nMerge + 2
+ fKD.nparts = self.size
+ fKD.sort = True # Slower, but needed in _connect_chains
+ fKD.rearrange = self.rearrange # True is faster, but uses more memory
+ # Now call the fortran.
+ create_tree(0)
+ elif self.tree == 'C':
+ self.mass = na.concatenate((self.mass, self.mass_pad))
+ self.pos = na.empty((self.size, 3), dtype='float64')
+ self.psize = self.xpos.size
+ self.pos[:self.psize, 0] = self.xpos
+ self.pos[:self.psize, 1] = self.ypos
+ self.pos[:self.psize, 2] = self.zpos
+ del self.xpos, self.ypos, self.zpos
+ gc.collect()
+ self.pos[self.psize:, 0] = self.xpos_pad
+ self.pos[self.psize:, 1] = self.ypos_pad
+ self.pos[self.psize:, 2] = self.zpos_pad
+ del self.xpos_pad, self.ypos_pad, self.zpos_pad
+ gc.collect()
+ self.kdtree = cKDTree(self.pos, leafsize = 32)
self.__max_memory()
yt_counters("init kd tree")
@@ -395,8 +414,12 @@
self.is_inside = ( (points >= LE).all(axis=1) * \
(points < RE).all(axis=1) )
elif round == 'second':
- self.is_inside = ( (fKD.pos.T >= LE).all(axis=1) * \
- (fKD.pos.T < RE).all(axis=1) )
+ if self.tree == 'F':
+ self.is_inside = ( (fKD.pos.T >= LE).all(axis=1) * \
+ (fKD.pos.T < RE).all(axis=1) )
+ elif self.tree == 'C':
+ self.is_inside = ( (self.pos > LE).all(axis=1) * \
+ (self.pos < RE).all(axis=1) )
# Below we find out which particles are in the `annulus', one padding
# distance inside the boundaries. First we find the particles outside
# this inner boundary.
@@ -406,8 +429,12 @@
inner = na.invert( (points >= temp_LE).all(axis=1) * \
(points < temp_RE).all(axis=1) )
elif round == 'second' or round == 'third':
- inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
- (fKD.pos.T < temp_RE).all(axis=1) )
+ if self.tree == 'F':
+ inner = na.invert( (fKD.pos.T >= temp_LE).all(axis=1) * \
+ (fKD.pos.T < temp_RE).all(axis=1) )
+ elif self.tree == 'C':
+ inner = na.invert( (self.pos >= temp_LE).all(axis=1) * \
+ (self.pos < temp_RE).all(axis=1) )
if round == 'first':
del points
# After inverting the logic above, we want points that are both
@@ -444,26 +471,44 @@
self.densestNN = na.empty(self.size,dtype='int64')
# We find nearest neighbors in chunks.
chunksize = 10000
- fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
- start = 1 # Fortran counting!
- finish = 0
- while finish < self.size:
- finish = min(finish+chunksize,self.size)
- # Call the fortran. start and finish refer to the data locations
- # in fKD.pos, and specify the range of particles to find nearest
- # neighbors
- fKD.start = start
- fKD.finish = finish
- find_chunk_nearest_neighbors()
- chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
- # Find the densest nearest neighbors by referencing the already
- # calculated density.
- n_dens = na.take(self.density,chunk_NNtags)
- max_loc = na.argmax(n_dens,axis=1)
- for i in xrange(finish - start + 1): # +1 for fortran counting.
- j = start + i - 1 # -1 for fortran counting.
- self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
- start = finish + 1
+ if self.tree == 'F':
+ fKD.chunk_tags = na.asfortranarray(na.empty((self.num_neighbors, chunksize), dtype='int64'))
+ start = 1 # Fortran counting!
+ finish = 0
+ while finish < self.size:
+ finish = min(finish+chunksize,self.size)
+ # Call the fortran. start and finish refer to the data locations
+ # in fKD.pos, and specify the range of particles to find nearest
+ # neighbors
+ fKD.start = start
+ fKD.finish = finish
+ find_chunk_nearest_neighbors()
+ chunk_NNtags = (fKD.chunk_tags[:,:finish-start+1] - 1).transpose()
+ # Find the densest nearest neighbors by referencing the already
+ # calculated density.
+ n_dens = na.take(self.density,chunk_NNtags)
+ max_loc = na.argmax(n_dens,axis=1)
+ for i in xrange(finish - start + 1): # +1 for fortran counting.
+ j = start + i - 1 # -1 for fortran counting.
+ self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
+ start = finish + 1
+ elif self.tree == 'C':
+ start = 0
+ finish = 0
+ while finish < self.size - 1:
+ finish = min(finish+chunksize, self.size)
+ # Unlike above, this function returns a new chunk_NNtags
+ # that is the right size every time. But this may not actually
+ # be as memory efficient - fragmenting?
+ chunk_NNtags = self.kdtree.find_chunk_nearest_neighbors(start, \
+ finish, num_neighbors=self.num_neighbors)
+ n_dens = na.take(self.density, chunk_NNtags)
+ max_loc = na.argmax(n_dens, axis=1)
+ max_loc = na.argmax(n_dens,axis=1)
+ for i in xrange(finish - start):
+ j = start + i
+ self.densestNN[j] = chunk_NNtags[i,max_loc[i]]
+ start = finish
yt_counters("densestNN")
self.__max_memory()
del chunk_NNtags, max_loc, n_dens
@@ -568,12 +613,15 @@
chain_map = defaultdict(set)
for i in xrange(max(self.chainID)+1):
chain_map[i].add(i)
- # Plus 2 because we're looking for that neighbor, but only keeping
- # nMerge + 1 neighbor tags, skipping ourselves.
- fKD.dist = na.empty(self.nMerge+2, dtype='float64')
- fKD.tags = na.empty(self.nMerge+2, dtype='int64')
- # We can change this here to make the searches faster.
- fKD.nn = self.nMerge+2
+ if self.tree == 'F':
+ # Plus 2 because we're looking for that neighbor, but only keeping
+ # nMerge + 1 neighbor tags, skipping ourselves.
+ fKD.dist = na.empty(self.nMerge+2, dtype='float64')
+ fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+ # We can change this here to make the searches faster.
+ fKD.nn = self.nMerge + 2
+ elif self.tree == 'C':
+ nn = self.nMerge + 2
yt_counters("preconnect kd tree search.")
for i in xrange(self.size):
# Don't consider this particle if it's not part of a chain.
@@ -586,9 +634,13 @@
# We're only connecting >= peakthresh chains now.
if part_max_dens < self.peakthresh: continue
# Loop over nMerge closest nearest neighbors.
- fKD.qv = fKD.pos[:, i]
- find_nn_nearest_neighbors()
- NNtags = fKD.tags[:] - 1
+ if self.tree == 'F':
+ fKD.qv = fKD.pos[:, i]
+ find_nn_nearest_neighbors()
+ NNtags = fKD.tags[:] - 1
+ elif self.tree == 'C':
+ qv = self.pos[i, :]
+ NNtags = self.kdtree.query(qv, nn)[1]
same_count = 0
for j in xrange(int(self.nMerge+1)):
thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
@@ -1002,10 +1054,13 @@
self.chain_densest_n = {} # chainID -> {chainIDs->boundary dens}
# Plus 2 because we're looking for that neighbor, but only keeping
# nMerge + 1 neighbor tags, skipping ourselves.
- fKD.dist = na.empty(self.nMerge+2, dtype='float64')
- fKD.tags = na.empty(self.nMerge+2, dtype='int64')
- # We can change this here to make the searches faster.
- fKD.nn = self.nMerge+2
+ if self.tree == 'F':
+ fKD.dist = na.empty(self.nMerge+2, dtype='float64')
+ fKD.tags = na.empty(self.nMerge+2, dtype='int64')
+ # We can change this here to make the searches faster.
+ fKD.nn = self.nMerge+2
+ elif self.tree == 'C':
+ nn = self.nMerge + 2
for i in xrange(int(self.size)):
# Don't consider this particle if it's not part of a chain.
if self.chainID[i] < 0: continue
@@ -1018,9 +1073,13 @@
# Make sure we're skipping deleted chains.
if part_max_dens == -1.0: continue
# Loop over nMerge closest nearest neighbors.
- fKD.qv = fKD.pos[:, i]
- find_nn_nearest_neighbors()
- NNtags = fKD.tags[:] - 1
+ if self.tree == 'F':
+ fKD.qv = fKD.pos[:, i]
+ find_nn_nearest_neighbors()
+ NNtags = fKD.tags[:] - 1
+ elif self.tree == 'C':
+ qv = self.pos[i, :]
+ NNtags = self.kdtree.query(qv, nn)[1]
for j in xrange(int(self.nMerge+1)):
thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
thisNN_chainID = self.chainID[thisNN]
@@ -1345,11 +1404,14 @@
select = (self.chainID != -1)
calc = len(na.where(select == True)[0])
loc = na.empty((calc, 3), dtype='float64')
- loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
- loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
- loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
- self.__max_memory()
- del self.xpos_pad, self.ypos_pad, self.zpos_pad
+ if self.tree == 'F':
+ loc[:, 0] = na.concatenate((self.xpos, self.xpos_pad))[select]
+ loc[:, 1] = na.concatenate((self.ypos, self.ypos_pad))[select]
+ loc[:, 2] = na.concatenate((self.zpos, self.zpos_pad))[select]
+ self.__max_memory()
+ del self.xpos_pad, self.ypos_pad, self.zpos_pad
+ elif self.tree == 'C':
+ loc = self.pos[select]
subchain = self.chainID[select]
# First we need to find the maximum density point for all groups.
# I think this will be faster than several vector operations that need
@@ -1470,10 +1532,17 @@
# Loop over the particles to find NN for each.
mylog.info('Finding nearest neighbors/density...')
yt_counters("chainHOP_tags_dens")
- chainHOP_tags_dens()
+ if self.tree == 'F':
+ chainHOP_tags_dens()
+ elif self.tree == 'C':
+ self.density = self.kdtree.chainHOP_get_dens(self.mass, \
+ num_neighbors = self.num_neighbors, nMerge = self.nMerge + 2)
yt_counters("chainHOP_tags_dens")
- self.density = fKD.dens.copy()
- # Now each particle has NNtags, and a local self density.
+ if self.tree == 'F':
+ self.density = fKD.dens.copy()
+ elif self.tree == 'C':
+ pass
+ # Now each particle a local self density.
# Let's find densest NN
mylog.info('Finding densest nearest neighbors...')
self._densestNN()
@@ -1496,17 +1565,22 @@
self._communicate_annulus_chainIDs()
mylog.info('Connecting %d chains into groups...' % self.nchains)
self._connect_chains()
- self.mass = fKD.mass[:self.psize]
- self.mass_pad = fKD.mass[self.psize:]
- del fKD.dens, fKD.mass, fKD.dens
- self.xpos = fKD.pos[0, :self.psize]
- self.ypos = fKD.pos[1, :self.psize]
- self.zpos = fKD.pos[2, :self.psize]
- self.xpos_pad = fKD.pos[0, self.psize:]
- self.ypos_pad = fKD.pos[1, self.psize:]
- self.zpos_pad = fKD.pos[2, self.psize:]
- del fKD.pos, fKD.chunk_tags
- free_tree(0) # Frees the kdtree object.
+ if self.tree == 'F':
+ self.mass = fKD.mass[:self.psize]
+ self.mass_pad = fKD.mass[self.psize:]
+ del fKD.dens, fKD.mass, fKD.dens
+ self.xpos = fKD.pos[0, :self.psize]
+ self.ypos = fKD.pos[1, :self.psize]
+ self.zpos = fKD.pos[2, :self.psize]
+ self.xpos_pad = fKD.pos[0, self.psize:]
+ self.ypos_pad = fKD.pos[1, self.psize:]
+ self.zpos_pad = fKD.pos[2, self.psize:]
+ del fKD.pos, fKD.chunk_tags
+ free_tree(0) # Frees the kdtree object.
+ gc.collect()
+ elif self.tree == 'C':
+ del self.kdtree
+ gc.collect()
del self.densestNN
mylog.info('Communicating group links globally...')
self._make_global_chain_densest_n()
@@ -1530,7 +1604,10 @@
for groupID in self.I_own[taskID]:
self.halo_taskmap[groupID].add(taskID)
del self.I_own
- del self.xpos, self.ypos, self.zpos
+ if self.tree == 'F':
+ del self.xpos, self.ypos, self.zpos
+ elif self.tree == 'C':
+ pass
def __add_to_array(self, arr, key, value, type):
"""
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -105,6 +105,9 @@
self.parse_halo_catalog()
if cache: self.cache = dict()#MaxLengthDict()
+ def __del__(self):
+ self.particle_file.close()
+
def parse_halo_catalog(self):
hp = []
for line in open("FOF/groups_%05i.dat" % self.output_id):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -31,7 +31,7 @@
from yt.funcs import *
from yt.analysis_modules.halo_finding.halo_objects import \
- FOFHaloFinder, HaloFinder
+ FOFHaloFinder, HaloFinder, parallelHF
from yt.analysis_modules.halo_profiler.multi_halo_profiler import \
HaloProfiler
from yt.convenience import load
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/analysis_modules/light_cone/light_cone.py
--- a/yt/analysis_modules/light_cone/light_cone.py
+++ b/yt/analysis_modules/light_cone/light_cone.py
@@ -42,7 +42,7 @@
from .light_cone_projection import _light_cone_projection
class LightCone(EnzoSimulation):
- def __init__(self, EnzoParameterFile, initial_redshift=1.0,
+ def __init__(self, enzo_parameter_file, initial_redshift=1.0,
final_redshift=0.0, observer_redshift=0.0,
field_of_view_in_arcminutes=600.0, image_resolution_in_arcseconds=60.0,
use_minimum_datasets=True, deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
@@ -100,7 +100,7 @@
self.recycleRandomSeed = 0
# Initialize EnzoSimulation machinery for getting dataset list.
- EnzoSimulation.__init__(self, EnzoParameterFile, initial_redshift=self.initial_redshift,
+ EnzoSimulation.__init__(self, enzo_parameter_file, initial_redshift=self.initial_redshift,
final_redshift=self.final_redshift, links=True,
enzo_parameters={'CosmologyComovingBoxSize':float}, **kwargs)
@@ -513,7 +513,7 @@
else:
f.write("Original Solution\n")
f.write("OriginalRandomSeed = %s\n" % self.originalRandomSeed)
- f.write("EnzoParameterFile = %s\n" % self.EnzoParameterFile)
+ f.write("enzo_parameter_file = %s\n" % self.enzo_parameter_file)
f.write("\n")
for q, output in enumerate(self.light_cone_solution):
f.write("Proj %04d, %s, z = %f, depth/box = %f, width/box = %f, axis = %d, center = %f, %f, %f\n" %
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -131,6 +131,7 @@
proj[new_name] = b[f][:]
proj.axis = axis
proj.pf = pf
+ f.close()
return proj
def _chunk(arrlike, chunksize = 800000):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -62,6 +62,16 @@
NeedsProperty, \
NeedsParameter
+def force_array(item, shape):
+ try:
+ sh = item.shape
+ return item
+ except AttributeError:
+ if item:
+ return na.ones(shape, dtype='bool')
+ else:
+ return na.zeros(shape, dtype='bool')
+
def restore_grid_state(func):
"""
A decorator that takes a function with the API of (self, grid, field)
@@ -86,7 +96,8 @@
def check_cache(self, grid):
if isinstance(grid, FakeGridForParticles):
return func(self, grid)
- elif grid.id not in self._cut_masks:
+ elif grid.id not in self._cut_masks or \
+ hasattr(self, "_boolean_touched"):
cm = func(self, grid)
self._cut_masks[grid.id] = cm
return self._cut_masks[grid.id]
@@ -2323,7 +2334,8 @@
if force_particle_read == False and \
self.pf.field_info.has_key(field) and \
self.pf.field_info[field].particle_type and \
- self.pf.h.io._particle_reader:
+ self.pf.h.io._particle_reader and \
+ not isinstance(self, AMRBooleanRegionBase):
self.particles.get_data(field)
if field not in self.field_data:
if self._generate_field(field): continue
@@ -2656,6 +2668,18 @@
particle_handler_registry[self._type_name](self.pf, self)
return self._particle_handler
+
+ def volume(self, unit = "unitary"):
+ """
+ Return the volume of the data container in units *unit*.
+ This is found by adding up the volume of the cells with centers
+ in the container, rather than using the geometric shape of
+ the container, so this may vary very slightly
+ from what might be expected from the geometric volume.
+ """
+ return self.quantities["TotalQuantity"]("CellVolume")[0] * \
+ (self.pf[unit] / self.pf['cm']) ** 3.0
+
class ExtractedRegionBase(AMR3DData):
"""
ExtractedRegions are arbitrarily defined containers of data, useful
@@ -2979,15 +3003,6 @@
& (grid['z'] + dzp > self.left_edge[2]) )
return cm
- def volume(self, unit = "unitary"):
- """
- Return the volume of the region in units *unit*.
- """
- diff = na.array(self.right_edge) - na.array(self.left_edge)
- # Find the full volume
- vol = na.prod(diff * self.pf[unit])
- return vol
-
class AMRRegionStrictBase(AMRRegionBase):
"""
AMRRegion without any dx padding for cell selection
@@ -3049,21 +3064,6 @@
& (grid['z'] + dzp + off_z > self.left_edge[2]) )
return cm
- def volume(self, unit = "unitary"):
- """
- Return the volume of the region in units *unit*.
- """
- period = self.pf.domain_right_edge - self.pf.domain_left_edge
- diff = na.array(self.right_edge) - na.array(self.left_edge)
- # Correct for wrap-arounds.
- tofix = (diff < 0)
- toadd = period[tofix]
- diff += toadd
- # Find the full volume
- vol = na.prod(diff * self.pf[unit])
- return vol
-
-
class AMRPeriodicRegionStrictBase(AMRPeriodicRegionBase):
"""
AMRPeriodicRegion without any dx padding for cell selection
@@ -3155,12 +3155,6 @@
self._cut_masks[grid.id] = cm
return cm
- def volume(self, unit = "unitary"):
- """
- Return the volume of the sphere in units *unit*.
- """
- return 4./3. * math.pi * (self.radius * self.pf[unit])**3.0
-
class AMRCoveringGridBase(AMR3DData):
_spatial = True
_type_name = "covering_grid"
@@ -3404,6 +3398,149 @@
def flush_data(self, *args, **kwargs):
raise KeyError("Can't do this")
+class AMRBooleanRegionBase(AMR3DData):
+ """
+ A hybrid region built by boolean comparison between
+ existing regions.
+ """
+ _type_name = "boolean"
+ _con_args = {"regions"}
+ def __init__(self, regions, fields = None, pf = None, **kwargs):
+ """
+ This will build a hybrid region based on the boolean logic
+ of the regions.
+
+ Parameters
+ ----------
+ regions : list
+ A list of region objects and strings describing the boolean logic
+ to use when building the hybrid region. The boolean logic can be
+ nested using parentheses.
+
+ Examples
+ --------
+ >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
+ [0.6, 0.6, 0.6])
+ >>> re2 = pf.h.region([0.5, 0.5, 0.5], [0.45, 0.45, 0.45],
+ [0.55, 0.55, 0.55])
+ >>> sp1 = pf.h.sphere([0.575, 0.575, 0.575], .03)
+ >>> toroid_shape = pf.h.boolean([re1, "NOT", re2])
+ >>> toroid_shape_with_hole = pf.h.boolean([re1, "NOT", "(", re2, "OR",
+ sp1, ")"])
+ """
+ # Center is meaningless, but we'll define it all the same.
+ AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
+ self.regions = regions
+ self._all_regions = []
+ self._some_overlap = []
+ self._all_overlap = []
+ self._cut_masks = {}
+ self._get_all_regions()
+ self._make_overlaps()
+ self._get_list_of_grids()
+
+ def _get_all_regions(self):
+ # Before anything, we simply find out which regions are involved in all
+ # of this process, uniquely.
+ for item in self.regions:
+ if isinstance(item, types.StringType): continue
+ self._all_regions.append(item)
+ # So cut_masks don't get messed up.
+ item._boolean_touched = True
+ self._all_regions = na.unique(self._all_regions)
+
+ def _make_overlaps(self):
+ # Using the processed cut_masks, we'll figure out what grids
+ # are left in the hybrid region.
+ for region in self._all_regions:
+ region._get_list_of_grids()
+ for grid in region._grids:
+ if grid in self._some_overlap or grid in self._all_overlap:
+ continue
+ # Get the cut_mask for this grid in this region, and see
+ # if there's any overlap with the overall cut_mask.
+ overall = self._get_cut_mask(grid)
+ local = force_array(region._get_cut_mask(grid),
+ grid.ActiveDimensions)
+ # Below we don't want to match empty masks.
+ if overall.sum() == 0 and local.sum() == 0: continue
+ # The whole grid is in the hybrid region if a) its cut_mask
+ # in the original region is identical to the new one and b)
+ # the original region cut_mask is all ones.
+ if (local == na.bitwise_and(overall, local)).all() and \
+ (local == True).all():
+ self._all_overlap.append(grid)
+ continue
+ if (overall == local).any():
+ # Some of local is in overall
+ self._some_overlap.append(grid)
+ continue
+
+ def _is_fully_enclosed(self, grid):
+ return (grid in self._all_overlap)
+
+ def _get_list_of_grids(self):
+ self._grids = na.array(self._some_overlap + self._all_overlap,
+ dtype='object')
+
+ def _get_cut_mask(self, grid, field=None):
+ if self._is_fully_enclosed(grid):
+ return True # We do not want child masking here
+ if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
+ and grid.id in self._cut_masks:
+ return self._cut_masks[grid.id]
+ # If we get this far, we have to generate the cut_mask.
+ return self._get_level_mask(self.regions, grid)
+
+ def _get_level_mask(self, ops, grid):
+ level_masks = []
+ end = 0
+ for i, item in enumerate(ops):
+ if end > 0 and i < end:
+ # We skip over things inside parentheses on this level.
+ continue
+ if isinstance(item, AMRData):
+ # Add this regions cut_mask to level_masks
+ level_masks.append(force_array(item._get_cut_mask(grid),
+ grid.ActiveDimensions))
+ elif item == "AND" or item == "NOT" or item == "OR":
+ level_masks.append(item)
+ elif item == "(":
+ # recurse down, and we'll append the results, which
+ # should be a single cut_mask
+ open_count = 0
+ for ii, item in enumerate(ops[i + 1:]):
+ # We look for the matching closing parentheses to find
+ # where we slice ops.
+ if item == "(":
+ open_count += 1
+ if item == ")" and open_count > 0:
+ open_count -= 1
+ elif item == ")" and open_count == 0:
+ end = i + ii + 1
+ break
+ level_masks.append(force_array(self._get_level_mask(ops[i + 1:end],
+ grid), grid.ActiveDimensions))
+ # Now we do the logic on our level_mask.
+ # There should be no nested logic anymore.
+ # The first item should be a cut_mask,
+ # so that will be our starting point.
+ this_cut_mask = level_masks[0]
+ for i, item in enumerate(level_masks):
+ # I could use a slice above, but I'll keep i consistent instead.
+ if i == 0: continue
+ if item == "AND":
+ # So, the next item in level_masks we want to AND.
+ na.bitwise_and(this_cut_mask, level_masks[i+1], this_cut_mask)
+ if item == "NOT":
+ # It's convenient to remember that NOT == AND NOT
+ na.bitwise_and(this_cut_mask, na.invert(level_masks[i+1]),
+ this_cut_mask)
+ if item == "OR":
+ na.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
+ if not isinstance(grid, FakeGridForParticles):
+ self._cut_masks[grid.id] = this_cut_mask
+ return this_cut_mask
def _reconstruct_object(*args, **kwargs):
pfid = args[0]
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -88,6 +88,10 @@
mylog.debug("Re-examining hierarchy")
self._initialize_level_stats()
+ def __del__(self):
+ if self._data_file is not None:
+ self._data_file.close()
+
def _get_parameters(self):
return self.parameter_file.parameters
parameters=property(_get_parameters)
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -302,7 +302,9 @@
def _is_valid(self, *args, **kwargs):
try:
fileh = h5py.File(args[0],'r')
- return "Chombo_global" in fileh["/"]
+ valid = "Chombo_global" in fileh["/"]
+ fileh.close()
+ return valid
except:
pass
return False
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -45,7 +45,8 @@
fhandle = h5py.File(grid.filename,'r')
ncomp = int(fhandle['/'].attrs['num_components'])
- return [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+ fns = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+ fhandle.close()
def _read_data_set(self,grid,field):
fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
@@ -61,6 +62,7 @@
stop = start + boxsize
data = lev[self._data_string][start:stop]
+ fhandle.close()
return data.reshape(dims, order='F')
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -146,6 +146,7 @@
try:
harray_fp = h5py.File(harray_fn)
self.num_grids = harray_fp["/Level"].len()
+ harray_fp.close()
except IOError:
pass
elif os.path.getsize(self.hierarchy_filename) == 0:
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -126,7 +126,8 @@
except KeyError:
self.grid_particle_count[:] = 0.0
self._particle_indices = na.zeros(self.num_grids + 1, dtype='int64')
- na.add.accumulate(self.grid_particle_count, out=self._particle_indices[1:])
+ na.add.accumulate(self.grid_particle_count.squeeze(),
+ out=self._particle_indices[1:])
# This will become redundant, as _prepare_grid will reset it to its
# current value. Note that FLASH uses 1-based indexing for refinement
# levels, but we do not, so we reduce the level by 1.
@@ -274,14 +275,18 @@
def _find_parameter(self, ptype, pname, scalar = False, handle = None):
# We're going to implement handle caching eventually
- if handle is None: handle = self._handle
if handle is None:
+ close = False
+ handle = self._handle
+ if handle is None:
+ close = True
handle = h5py.File(self.parameter_filename, "r")
nn = "/%s %s" % (ptype,
{False: "runtime parameters", True: "scalars"}[scalar])
for tpname, pval in handle[nn][:]:
if tpname.strip() == pname:
return pval
+ if close: handle.close()
raise KeyError(pname)
def _parse_parameter_file(self):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -46,6 +46,9 @@
enumerate(particle_fields)])
except KeyError:
self._particle_fields = {}
+
+ def __del__(self):
+ self._handle.close()
def _read_particles(self, fields_to_read, type, args, grid_list,
count_list, conv_factors):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -194,7 +194,9 @@
if add1 in fileh['/'].items():
if add2 in fileh['/'+add1].attrs.keys():
if fileh['/'+add1].attrs[add2] == format:
+ fileh.close()
return True
- except h5py.h5e.LowLevelIOError:
+ fileh.close()
+ except:
pass
return False
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -45,6 +45,7 @@
adr = grid.Address
fh = h5py.File(grid.filename,mode='r')
rets = cPickle.loads(fh['/root'].attrs['fieldnames'])
+ fh.close()
return rets
def _read_data_slice(self,grid, field, axis, coord):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -108,11 +108,15 @@
ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
HomogenizedVolume, Camera, off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_objects
+
for name, cls in callback_registry.items():
exec("%s = cls" % name)
from yt.convenience import all_pfs, max_spheres, load, projload
+
# We load plugins. Keep in mind, this can be fairly dangerous -
# the primary purpose is to allow people to have a set of functions
# that get used every time that they don't have to *define* every time.
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -357,14 +357,14 @@
tf_obj.tables[i].y))
self.field_tables[i].field_id = tf_obj.field_ids[i]
self.field_tables[i].weight_field_id = tf_obj.weight_field_ids[i]
- print "Field table", i, "corresponds to",
- print self.field_tables[i].field_id,
- print "(Weighted with ", self.field_tables[i].weight_field_id,
- print ")"
+ #print "Field table", i, "corresponds to",
+ #print self.field_tables[i].field_id,
+ #print "(Weighted with ", self.field_tables[i].weight_field_id,
+ #print ")"
for i in range(6):
self.field_table_ids[i] = tf_obj.field_table_ids[i]
- print "Channel", i, "corresponds to", self.field_table_ids[i]
+ #print "Channel", i, "corresponds to", self.field_table_ids[i]
@cython.boundscheck(False)
@cython.wraparound(False)
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -50,3 +50,12 @@
TestHaloCompositionHashHOP, \
TestHaloCompositionHashFOF, \
TestHaloCompositionHashPHOP
+
+from .boolean_region_tests import \
+ TestBooleanANDGridQuantity, \
+ TestBooleanORGridQuantity, \
+ TestBooleanNOTGridQuantity, \
+ TestBooleanANDParticleQuantity, \
+ TestBooleanORParticleQuantity, \
+ TestBooleanNOTParticleQuantity
+
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/answer_testing/boolean_region_tests.py
--- /dev/null
+++ b/yt/utilities/answer_testing/boolean_region_tests.py
@@ -0,0 +1,165 @@
+from yt.mods import *
+import matplotlib
+import pylab
+from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+import hashlib
+import numpy as np
+
+# Tests to make sure that grid quantities are identical that should
+# be identical for the AND operator.
+class TestBooleanANDGridQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ re = self.pf.h.boolean([re1, "AND", re2])
+ # re should look like re2.
+ x2 = re2['x']
+ x = re['x']
+ x2 = x2[x2.argsort()]
+ x = x[x.argsort()]
+ self.result = (x2, x)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
+# OR
+class TestBooleanORGridQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ re = self.pf.h.boolean([re1, "OR", re2])
+ # re should look like re1
+ x1 = re1['x']
+ x = re['x']
+ x1 = x1[x1.argsort()]
+ x = x[x.argsort()]
+ self.result = (x1, x)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
+# NOT
+class TestBooleanNOTGridQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ # Bottom base
+ re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+ # Side
+ re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+ [five[0], six[1], six[2]])
+ # Last small cube
+ re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+ [six[0], five[1], six[2]])
+ # re1 NOT re2 should look like re3 OR re4 OR re5
+ re = self.pf.h.boolean([re1, "NOT", re2])
+ reo = self.pf.h.boolean([re3, "OR", re4, "OR", re5])
+ x = re['x']
+ xo = reo['x']
+ x = x[x.argsort()]
+ xo = xo[xo.argsort()]
+ self.result = (x, xo)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
+# Tests to make sure that particle quantities are identical that should
+# be identical for the AND operator.
+class TestBooleanANDParticleQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ re = self.pf.h.boolean([re1, "AND", re2])
+ # re should look like re2.
+ x2 = re2['particle_position_x']
+ x = re['particle_position_x']
+ x2 = x2[x2.argsort()]
+ x = x[x.argsort()]
+ self.result = (x2, x)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
+# OR
+class TestBooleanORParticleQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ re = self.pf.h.boolean([re1, "OR", re2])
+ # re should look like re1
+ x1 = re1['particle_position_x']
+ x = re['particle_position_x']
+ x1 = x1[x1.argsort()]
+ x = x[x.argsort()]
+ self.result = (x1, x)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
+# NOT
+class TestBooleanNOTParticleQuantity(YTStaticOutputTest):
+ def run(self):
+ domain = self.pf.domain_right_edge - self.pf.domain_left_edge
+ four = 0.4 * domain + self.pf.domain_left_edge
+ five = 0.5 * domain + self.pf.domain_left_edge
+ six = 0.6 * domain + self.pf.domain_left_edge
+ re1 = self.pf.h.region_strict(five, four, six)
+ re2 = self.pf.h.region_strict(five, five, six)
+ # Bottom base
+ re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+ # Side
+ re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+ [five[0], six[1], six[2]])
+ # Last small cube
+ re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+ [six[0], five[1], six[2]])
+ # re1 NOT re2 should look like re3 OR re4 OR re5
+ re = self.pf.h.boolean([re1, "NOT", re2])
+ reo = self.pf.h.boolean([re3, "OR", re4, "OR", re5])
+ x = re['particle_position_x']
+ xo = reo['particle_position_x']
+ x = x[x.argsort()]
+ xo = xo[xo.argsort()]
+ self.result = (x, xo)
+
+ def compare(self, old_result):
+ self.compare_array_delta(self.result[0], self.result[1], 1e-10)
+
+ def plot(self):
+ return []
+
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1570,7 +1570,7 @@
save_name = "%s"%pf+"_"+field+"_rendering.png"
if not '.png' in save_name:
save_name += '.png'
- if cam._par_rank != -1:
+ if cam.comm.rank != -1:
write_bitmap(image,save_name)
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -49,7 +49,7 @@
from mpi4py import MPI
parallel_capable = (MPI.COMM_WORLD.size > 1)
if parallel_capable:
- mylog.info("Parallel computation enabled: %s / %s",
+ mylog.info("Global parallel computation enabled: %s / %s",
MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
@@ -61,8 +61,6 @@
#ytcfg["yt","StoreParameterFiles"] = "False"
# Now let's make sure we have the right options set.
if MPI.COMM_WORLD.rank > 0:
- if ytcfg.getboolean("yt","serialize"):
- ytcfg["yt","onlydeserialize"] = "True"
if ytcfg.getboolean("yt","LogFile"):
ytcfg["yt","LogFile"] = "False"
yt.utilities.logger.disable_file_logging()
@@ -150,8 +148,10 @@
def __init__(self, pobj, just_list = False, attr='_grids',
round_robin=False):
ObjectIterator.__init__(self, pobj, just_list, attr=attr)
- self._offset = MPI.COMM_WORLD.rank
- self._skip = MPI.COMM_WORLD.size
+ # pobj has to be a ParallelAnalysisInterface, so it must have a .comm
+ # object.
+ self._offset = pobj.comm.rank
+ self._skip = pobj.comm.size
# Note that we're doing this in advance, and with a simple means
# of choosing them; more advanced methods will be explored later.
if self._use_all:
@@ -182,11 +182,15 @@
retval = None
if self._processing or not self._distributed:
return func(self, *args, **kwargs)
- if self._owner == MPI.COMM_WORLD.rank:
+ comm = _get_comm((self,))
+ if self._owner == comm.rank:
self._processing = True
retval = func(self, *args, **kwargs)
self._processing = False
- retval = MPI.COMM_WORLD.bcast(retval, root=self._owner)
+ # To be sure we utilize the root= kwarg, we manually access the .comm
+ # attribute, which must be an instance of MPI.Intracomm, and call bcast
+ # on that.
+ retval = comm.comm.bcast(retval, root=self._owner)
#MPI.COMM_WORLD.Barrier()
return retval
return single_proc_results
@@ -220,6 +224,13 @@
return func(self, data, **kwargs)
return passage
+def _get_comm(args):
+ if len(args) > 0 and hasattr(args[0], "comm"):
+ comm = args[0].comm
+ else:
+ comm = communication_system.communicators[-1]
+ return comm
+
def parallel_blocking_call(func):
"""
This decorator blocks on entry and exit of a function.
@@ -227,10 +238,11 @@
@wraps(func)
def barrierize(*args, **kwargs):
mylog.debug("Entering barrier before %s", func.func_name)
- MPI.COMM_WORLD.Barrier()
+ comm = _get_comm(args)
+ comm.barrier()
retval = func(*args, **kwargs)
mylog.debug("Entering barrier after %s", func.func_name)
- MPI.COMM_WORLD.Barrier()
+ comm.barrier()
return retval
if parallel_capable:
return barrierize
@@ -244,10 +256,11 @@
"""
@wraps(f1)
def in_order(*args, **kwargs):
- if MPI.COMM_WORLD.rank == 0:
+ comm = _get_comm(args)
+ if comm.rank == 0:
f1(*args, **kwargs)
- MPI.COMM_WORLD.Barrier()
- if MPI.COMM_WORLD.rank != 0:
+ comm.barrier()
+ if comm.rank != 0:
f2(*args, **kwargs)
if not parallel_capable: return f1
return in_order
@@ -259,7 +272,8 @@
"""
@wraps(func)
def root_only(*args, **kwargs):
- if MPI.COMM_WORLD.rank == 0:
+ comm = _get_comm(args)
+ if comm.rank == 0:
try:
func(*args, **kwargs)
all_clear = 1
@@ -268,8 +282,7 @@
all_clear = 0
else:
all_clear = None
- #MPI.COMM_WORLD.Barrier()
- all_clear = MPI.COMM_WORLD.bcast(all_clear, root=0)
+ all_clear = comm.mpi_bcast_pickled(all_clear)
if not all_clear: raise RuntimeError
if parallel_capable: return root_only
return func
@@ -334,6 +347,10 @@
if not parallel_capable: raise RuntimeError
my_communicator = communication_system.communicators[-1]
my_size = my_communicator.size
+ if njobs > my_size:
+ mylog.error("You have asked for %s jobs, but you only have %s processors.",
+ njobs, my_size)
+ raise RuntimeError
my_rank = my_communicator.rank
all_new_comms = na.array_split(na.arange(my_size), njobs)
for i,comm_set in enumerate(all_new_comms):
@@ -367,31 +384,29 @@
self.communicators.append(Communicator(MPI.COMM_WORLD))
else:
self.communicators.append(Communicator(None))
- def push(self, size=None, ranks=None):
- raise NotImplementedError
- if size is None:
- size = len(available_ranks)
- if len(available_ranks) < size:
- raise RuntimeError
- if ranks is None:
- ranks = [available_ranks.pop() for i in range(size)]
-
- group = MPI.COMM_WORLD.Group.Incl(ranks)
- new_comm = MPI.COMM_WORLD.Create(group)
- self.communicators.append(Communicator(new_comm))
- return new_comm
+
+ def push(self, new_comm):
+ if not isinstance(new_comm, Communicator):
+ new_comm = Communicator(new_comm)
+ self.communicators.append(new_comm)
+ self._update_parallel_state(new_comm)
def push_with_ids(self, ids):
group = self.communicators[-1].comm.Get_group().Incl(ids)
new_comm = self.communicators[-1].comm.Create(group)
+ self.push(new_comm)
+ return new_comm
+
+ def _update_parallel_state(self, new_comm):
from yt.config import ytcfg
ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
- self.communicators.append(Communicator(new_comm))
- return new_comm
+ if MPI.COMM_WORLD.rank > 0 and ytcfg.getboolean("yt","serialize"):
+ ytcfg["yt","onlydeserialize"] = "True"
def pop(self):
self.communicators.pop()
+ self._update_parallel_state(self.communicators[-1])
class Communicator(object):
comm = None
@@ -495,12 +510,12 @@
data = self.alltoallv_array(data, arr_size, offsets, sizes)
return data
elif datatype == "list" and op == "cat":
- if self.comm.rank == 0:
- data = self.__mpi_recvlist(data)
- else:
- self.comm.send(data, dest=0, tag=0)
- mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
- data = self.comm.bcast(data, root=0)
+ recv_data = self.comm.allgather(data)
+ # Now flatten into a single list, since this
+ # returns us a list of lists.
+ data = []
+ while recv_data:
+ data.extend(recv_data.pop(0))
return data
raise NotImplementedError
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -66,6 +66,10 @@
server.server_close()
if size > 1:
from mpi4py import MPI
+ # This COMM_WORLD is okay. We want to barrierize here, while waiting
+ # for shutdown from the rest of the parallel group. If you are running
+ # with --rpdb it is assumed you know what you are doing and you won't
+ # let this get out of hand.
MPI.COMM_WORLD.Barrier()
class pdb_handler(object):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -148,6 +148,7 @@
config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
config.add_subpackage("kdtree")
config.add_data_files(('kdtree', ['kdtree/fKDpy.so',]))
+ config.add_subpackage("spatial")
config.add_subpackage("parallel_tools")
config.add_extension("data_point_utilities",
"yt/utilities/data_point_utilities.c", libraries=["m"])
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/README
--- /dev/null
+++ b/yt/utilities/spatial/README
@@ -0,0 +1,35 @@
+Stephen Skory
+s at skory.us
+October 2011
+
+This directory is a modified version of the same directory that is part of
+the scipy.spatial package. It has been modified by me in the following
+ways:
+
+- In ckdtree.pyx, distances and searches over the
+ tree both take periodic boundary
+ conditions into account.
+
+- In ckdtree.pyx, all input and output arrays now
+ use 64-bit types: long and double.
+
+- In ckdtree.pyx, I've added two functions specifically for parallel HOP,
+ chainHOP_get_dens and find_chunk_nearest_neighbors.
+
+- In kdtree.py, I've commented out 'import scipy.sparse',
+ which means that any kdtree functionality that uses sparse
+ will not work. This is to avoid needing to build the rest
+ of scipy, which is a challenge and not necessary for just
+ the kdtree.
+
+- I've removed all of the qhull source and functionality.
+
+- I've removed the 'tests' directory.
+
+- I've removed anything having to do with Bento, the
+ python package manager.
+
+Anything that has been removed can be found in the original scipy
+source distribution.
+
+
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/__init__.py
--- /dev/null
+++ b/yt/utilities/spatial/__init__.py
@@ -0,0 +1,34 @@
+"""
+=============================================================
+Spatial algorithms and data structures (:mod:`scipy.spatial`)
+=============================================================
+
+Nearest-neighbor queries:
+
+.. autosummary::
+ :toctree: generated/
+
+ KDTree -- class for efficient nearest-neighbor queries
+ cKDTree -- class for efficient nearest-neighbor queries (faster impl.)
+ distance -- module containing many different distance measures
+
+Delaunay triangulation:
+
+.. autosummary::
+ :toctree: generated/
+
+ Delaunay
+ tsearch
+
+"""
+
+from kdtree import *
+from ckdtree import *
+#from qhull import *
+
+__all__ = filter(lambda s:not s.startswith('_'),dir())
+__all__ += ['distance']
+
+import distance
+from numpy.testing import Tester
+test = Tester().test
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/ckdtree.pyx
--- /dev/null
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -0,0 +1,758 @@
+# Copyright Anne M. Archibald 2008
+# Released under the scipy license
+import numpy as np
+cimport numpy as np
+cimport stdlib
+cimport cython
+
+import kdtree
+
+cdef double infinity = np.inf
+
+__all__ = ['cKDTree']
+
+
+# priority queue
+cdef union heapcontents:
+ int intdata
+ char* ptrdata
+
+cdef struct heapitem:
+ double priority
+ heapcontents contents
+
+cdef struct heap:
+ int n
+ heapitem* heap
+ int space
+
+cdef inline heapcreate(heap* self,int initial_size):
+ self.space = initial_size
+ self.heap = <heapitem*>stdlib.malloc(sizeof(heapitem)*self.space)
+ self.n=0
+
+cdef inline heapdestroy(heap* self):
+ stdlib.free(self.heap)
+
+cdef inline heapresize(heap* self, int new_space):
+ if new_space<self.n:
+ raise ValueError("Heap containing %d items cannot be resized to %d" % (self.n, new_space))
+ self.space = new_space
+ self.heap = <heapitem*>stdlib.realloc(<void*>self.heap,new_space*sizeof(heapitem))
+
+cdef inline heappush(heap* self, heapitem item):
+ cdef int i
+ cdef heapitem t
+
+ self.n += 1
+ if self.n>self.space:
+ heapresize(self,2*self.space+1)
+
+ i = self.n-1
+ self.heap[i] = item
+ while i>0 and self.heap[i].priority<self.heap[(i-1)//2].priority:
+ t = self.heap[(i-1)//2]
+ self.heap[(i-1)//2] = self.heap[i]
+ self.heap[i] = t
+ i = (i-1)//2
+
+cdef heapitem heappeek(heap* self):
+ return self.heap[0]
+
+cdef heapremove(heap* self):
+ cdef heapitem t
+ cdef int i, j, k, l
+
+ self.heap[0] = self.heap[self.n-1]
+ self.n -= 1
+ if self.n < self.space//4 and self.space>40: #FIXME: magic number
+ heapresize(self,self.space//2+1)
+
+ i=0
+ j=1
+ k=2
+ while ((j<self.n and
+ self.heap[i].priority > self.heap[j].priority or
+ k<self.n and
+ self.heap[i].priority > self.heap[k].priority)):
+ if k<self.n and self.heap[j].priority>self.heap[k].priority:
+ l = k
+ else:
+ l = j
+ t = self.heap[l]
+ self.heap[l] = self.heap[i]
+ self.heap[i] = t
+ i = l
+ j = 2*i+1
+ k = 2*i+2
+
+cdef heapitem heappop(heap* self):
+ cdef heapitem it
+ it = heappeek(self)
+ heapremove(self)
+ return it
+
+
+
+
+
+# utility functions
+cdef inline double dmax(double x, double y):
+ if x>y:
+ return x
+ else:
+ return y
+cdef inline double dabs(double x):
+ if x>0:
+ return x
+ else:
+ return -x
+cdef inline double dmin(double x, double y):
+ if x<y:
+ return x
+ else:
+ return y
+cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound,
+ double*period):
+ """Compute the distance between x and y
+
+ Computes the Minkowski p-distance to the power p between two points.
+ If the distance**p is larger than upperbound, then any number larger
+ than upperbound may be returned (the calculation is truncated).
+
+ Periodicity added by S. Skory.
+ """
+ cdef int i
+ cdef double r, m
+ r = 0
+ if p==infinity:
+ for i in range(k):
+ m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+ r = dmax(r,m)
+ if r>upperbound:
+ return r
+ elif p==1:
+ for i in range(k):
+ m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+ r += m
+ if r>upperbound:
+ return r
+ else:
+ for i in range(k):
+ m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+ r += m**p
+ if r>upperbound:
+ return r
+ return r
+
+
+
+# Tree structure
+cdef struct innernode:
+ int split_dim
+ int n_points
+ double split
+ double* maxes
+ double* mins
+ innernode* less
+ innernode* greater
+cdef struct leafnode:
+ int split_dim
+ int n_points
+ int start_idx
+ int end_idx
+ double* maxes
+ double* mins
+
+# this is the standard trick for variable-size arrays:
+# malloc sizeof(nodeinfo)+self.m*sizeof(double) bytes.
+cdef struct nodeinfo:
+ innernode* node
+ double side_distances[0]
+
+cdef class cKDTree:
+ """kd-tree for quick nearest-neighbor lookup
+
+ This class provides an index into a set of k-dimensional points
+ which can be used to rapidly look up the nearest neighbors of any
+ point.
+
+ The algorithm used is described in Maneewongvatana and Mount 1999.
+ The general idea is that the kd-tree is a binary trie, each of whose
+ nodes represents an axis-aligned hyperrectangle. Each node specifies
+ an axis and splits the set of points based on whether their coordinate
+ along that axis is greater than or less than a particular value.
+
+ During construction, the axis and splitting point are chosen by the
+ "sliding midpoint" rule, which ensures that the cells do not all
+ become long and thin.
+
+ The tree can be queried for the r closest neighbors of any given point
+ (optionally returning only those within some maximum distance of the
+ point). It can also be queried, with a substantial gain in efficiency,
+ for the r approximate closest neighbors.
+
+ For large dimensions (20 is already large) do not expect this to run
+ significantly faster than brute force. High-dimensional nearest-neighbor
+ queries are a substantial open problem in computer science.
+
+ Parameters
+ ----------
+ data : array-like, shape (n,m)
+ The n data points of dimension m to be indexed. This array is
+ not copied unless this is necessary to produce a contiguous
+ array of doubles, and so modifying this data will result in
+ bogus results.
+ leafsize : positive integer
+ The number of points at which the algorithm switches over to
+ brute-force.
+
+ """
+
+ cdef innernode* tree
+ cdef readonly object data
+ cdef double* raw_data
+ cdef readonly int n, m
+ cdef readonly int leafsize
+ cdef readonly object maxes
+ cdef double* raw_maxes
+ cdef readonly object mins
+ cdef double* raw_mins
+ cdef object indices
+ cdef np.int64_t* raw_indices
+ def __init__(cKDTree self, data, int leafsize=10):
+ cdef np.ndarray[double, ndim=2] inner_data
+ cdef np.ndarray[double, ndim=1] inner_maxes
+ cdef np.ndarray[double, ndim=1] inner_mins
+ cdef np.ndarray[np.int64_t, ndim=1] inner_indices
+ self.data = np.ascontiguousarray(data,dtype=np.double)
+ self.n, self.m = np.shape(self.data)
+ self.leafsize = leafsize
+ if self.leafsize<1:
+ raise ValueError("leafsize must be at least 1")
+ self.maxes = np.ascontiguousarray(np.amax(self.data,axis=0))
+ self.mins = np.ascontiguousarray(np.amin(self.data,axis=0))
+ self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int64))
+
+ inner_data = self.data
+ self.raw_data = <double*>inner_data.data
+ inner_maxes = self.maxes
+ self.raw_maxes = <double*>inner_maxes.data
+ inner_mins = self.mins
+ self.raw_mins = <double*>inner_mins.data
+ inner_indices = self.indices
+ self.raw_indices = <np.int64_t*>inner_indices.data
+
+ self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins)
+
+ cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins):
+ cdef leafnode* n
+ cdef innernode* ni
+ cdef int i, j, t, p, q, d
+ cdef double size, split, minval, maxval
+ cdef double*mids
+ if end_idx-start_idx<=self.leafsize:
+ n = <leafnode*>stdlib.malloc(sizeof(leafnode))
+ # Skory
+ n.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
+ n.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+ for i in range(self.m):
+ n.maxes[i] = maxes[i]
+ n.mins[i] = mins[i]
+ n.split_dim = -1
+ n.start_idx = start_idx
+ n.end_idx = end_idx
+ return <innernode*>n
+ else:
+ d = 0
+ size = 0
+ for i in range(self.m):
+ if maxes[i]-mins[i] > size:
+ d = i
+ size = maxes[i]-mins[i]
+ maxval = maxes[d]
+ minval = mins[d]
+ if maxval==minval:
+ # all points are identical; warn user?
+ n = <leafnode*>stdlib.malloc(sizeof(leafnode))
+ n.split_dim = -1
+ n.start_idx = start_idx
+ n.end_idx = end_idx
+ return <innernode*>n
+
+ split = (maxval+minval)/2
+
+ p = start_idx
+ q = end_idx-1
+ while p<=q:
+ if self.raw_data[self.raw_indices[p]*self.m+d]<split:
+ p+=1
+ elif self.raw_data[self.raw_indices[q]*self.m+d]>=split:
+ q-=1
+ else:
+ t = self.raw_indices[p]
+ self.raw_indices[p] = self.raw_indices[q]
+ self.raw_indices[q] = t
+ p+=1
+ q-=1
+
+ # slide midpoint if necessary
+ if p==start_idx:
+ # no points less than split
+ j = start_idx
+ split = self.raw_data[self.raw_indices[j]*self.m+d]
+ for i in range(start_idx+1, end_idx):
+ if self.raw_data[self.raw_indices[i]*self.m+d]<split:
+ j = i
+ split = self.raw_data[self.raw_indices[j]*self.m+d]
+ t = self.raw_indices[start_idx]
+ self.raw_indices[start_idx] = self.raw_indices[j]
+ self.raw_indices[j] = t
+ p = start_idx+1
+ q = start_idx
+ elif p==end_idx:
+ # no points greater than split
+ j = end_idx-1
+ split = self.raw_data[self.raw_indices[j]*self.m+d]
+ for i in range(start_idx, end_idx-1):
+ if self.raw_data[self.raw_indices[i]*self.m+d]>split:
+ j = i
+ split = self.raw_data[self.raw_indices[j]*self.m+d]
+ t = self.raw_indices[end_idx-1]
+ self.raw_indices[end_idx-1] = self.raw_indices[j]
+ self.raw_indices[j] = t
+ p = end_idx-1
+ q = end_idx-2
+
+ # construct new node representation
+ ni = <innernode*>stdlib.malloc(sizeof(innernode))
+
+ mids = <double*>stdlib.malloc(sizeof(double)*self.m)
+ for i in range(self.m):
+ mids[i] = maxes[i]
+ mids[d] = split
+ ni.less = self.__build(start_idx,p,mids,mins)
+
+ for i in range(self.m):
+ mids[i] = mins[i]
+ mids[d] = split
+ ni.greater = self.__build(p,end_idx,maxes,mids)
+
+ stdlib.free(mids)
+
+ ni.split_dim = d
+ ni.split = split
+ # Skory
+ ni.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
+ ni.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+ for i in range(self.m):
+ ni.maxes[i] = maxes[i]
+ ni.mins[i] = mins[i]
+
+ return ni
+
+ cdef __free_tree(cKDTree self, innernode* node):
+ if node.split_dim!=-1:
+ self.__free_tree(node.less)
+ self.__free_tree(node.greater)
+ stdlib.free(node.maxes) # Skory
+ stdlib.free(node.mins)
+ stdlib.free(node)
+
+ def __dealloc__(cKDTree self):
+ if <int>(self.tree) == 0:
+ # should happen only if __init__ was never called
+ return
+ self.__free_tree(self.tree)
+
+ cdef void __query(cKDTree self,
+ double*result_distances,
+ long*result_indices,
+ double*x,
+ int k,
+ double eps,
+ double p,
+ double distance_upper_bound,
+ double*period):
+ cdef heap q
+ cdef heap neighbors
+
+ cdef int i, j
+ cdef double t
+ cdef nodeinfo* inf
+ cdef nodeinfo* inf2
+ cdef double d
+ cdef double m_left, m_right, m
+ cdef double epsfac
+ cdef double min_distance
+ cdef double far_min_distance
+ cdef heapitem it, it2, neighbor
+ cdef leafnode* node
+ cdef innernode* inode
+ cdef innernode* near
+ cdef innernode* far
+ cdef double* side_distances
+
+ # priority queue for chasing nodes
+ # entries are:
+ # minimum distance between the cell and the target
+ # distances between the nearest side of the cell and the target
+ # the head node of the cell
+ heapcreate(&q,12)
+
+ # priority queue for the nearest neighbors
+ # furthest known neighbor first
+ # entries are (-distance**p, i)
+ heapcreate(&neighbors,k)
+
+ # set up first nodeinfo
+ inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double))
+ inf.node = self.tree
+ for i in range(self.m):
+ inf.side_distances[i] = 0
+ t = x[i]-self.raw_maxes[i]
+ if t>inf.side_distances[i]:
+ inf.side_distances[i] = t
+ else:
+ t = self.raw_mins[i]-x[i]
+ if t>inf.side_distances[i]:
+ inf.side_distances[i] = t
+ if p!=1 and p!=infinity:
+ inf.side_distances[i]=inf.side_distances[i]**p
+
+ # compute first distance
+ min_distance = 0.
+ for i in range(self.m):
+ if p==infinity:
+ min_distance = dmax(min_distance,inf.side_distances[i])
+ else:
+ min_distance += inf.side_distances[i]
+
+ # fiddle approximation factor
+ if eps==0:
+ epsfac=1
+ elif p==infinity:
+ epsfac = 1/(1+eps)
+ else:
+ epsfac = 1/(1+eps)**p
+
+ # internally we represent all distances as distance**p
+ if p!=infinity and distance_upper_bound!=infinity:
+ distance_upper_bound = distance_upper_bound**p
+
+ while True:
+ if inf.node.split_dim==-1:
+ node = <leafnode*>inf.node
+
+ # brute-force
+ for i in range(node.start_idx,node.end_idx):
+ d = _distance_p(
+ self.raw_data+self.raw_indices[i]*self.m,
+ x,p,self.m,distance_upper_bound,period)
+
+ if d<distance_upper_bound:
+ # replace furthest neighbor
+ if neighbors.n==k:
+ heapremove(&neighbors)
+ neighbor.priority = -d
+ neighbor.contents.intdata = self.raw_indices[i]
+ heappush(&neighbors,neighbor)
+
+ # adjust upper bound for efficiency
+ if neighbors.n==k:
+ distance_upper_bound = -heappeek(&neighbors).priority
+ # done with this node, get another
+ stdlib.free(inf)
+ if q.n==0:
+ # no more nodes to visit
+ break
+ else:
+ it = heappop(&q)
+ inf = <nodeinfo*>it.contents.ptrdata
+ min_distance = it.priority
+ else:
+ inode = <innernode*>inf.node
+
+ # we don't push cells that are too far onto the queue at all,
+ # but since the distance_upper_bound decreases, we might get
+ # here even if the cell's too far
+ if min_distance>distance_upper_bound*epsfac:
+ # since this is the nearest cell, we're done, bail out
+ stdlib.free(inf)
+ # free all the nodes still on the heap
+ for i in range(q.n):
+ stdlib.free(q.heap[i].contents.ptrdata)
+ break
+
+ # set up children for searching
+ if x[inode.split_dim]<inode.split:
+ near = inode.less
+ far = inode.greater
+ else:
+ near = inode.greater
+ far = inode.less
+
+ # near child is at the same distance as the current node
+ # we're going here next, so no point pushing it on the queue
+ # no need to recompute the distance or the side_distances
+ inf.node = near
+
+ # far child is further by an amount depending only
+ # on the split value; compute its distance and side_distances
+ # and push it on the queue if it's near enough
+ inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double))
+ it2.contents.ptrdata = <char*> inf2
+ inf2.node = far
+
+ # Periodicity added by S Skory
+ m_left = dmin( dabs(far.mins[inode.split_dim] - x[inode.split_dim]), \
+ period[inode.split_dim] - dabs(far.mins[inode.split_dim] - x[inode.split_dim]))
+ m_right = dmin( dabs(far.maxes[inode.split_dim] - x[inode.split_dim]), \
+ period[inode.split_dim] - dabs(far.maxes[inode.split_dim] - x[inode.split_dim]))
+ m = dmin(m_left,m_right)
+
+ # most side distances unchanged
+ for i in range(self.m):
+ inf2.side_distances[i] = inf.side_distances[i]
+
+ # one side distance changes
+ # we can adjust the minimum distance without recomputing
+ if p == infinity:
+ # we never use side_distances in the l_infinity case
+ # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])
+ far_min_distance = dmax(min_distance, m)
+ elif p == 1:
+ inf2.side_distances[inode.split_dim] = m
+ far_min_distance = dmax(min_distance, m)
+ else:
+ inf2.side_distances[inode.split_dim] = m**p
+ #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
+ far_min_distance = m**p
+
+ it2.priority = far_min_distance
+
+
+ # far child might be too far, if so, don't bother pushing it
+ if far_min_distance<=distance_upper_bound*epsfac:
+ heappush(&q,it2)
+ else:
+ stdlib.free(inf2)
+ # just in case
+ it2.contents.ptrdata = <char*> 0
+
+ # fill output arrays with sorted neighbors
+ for i in range(neighbors.n-1,-1,-1):
+ neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced
+ result_indices[i] = neighbor.contents.intdata
+ if p==1 or p==infinity:
+ result_distances[i] = -neighbor.priority
+ else:
+ result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
+
+ heapdestroy(&q)
+ heapdestroy(&neighbors)
+
+ def query(cKDTree self, object x, int k=1, double eps=0, double p=2,
+ double distance_upper_bound=infinity, object period=None):
+ """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf,
+ period=None)
+
+ Query the kd-tree for nearest neighbors.
+
+ Parameters
+ ----------
+ x : array_like, last dimension self.m
+ An array of points to query.
+ k : int
+ The number of nearest neighbors to return.
+ eps : non-negative float
+ Return approximate nearest neighbors; the k-th returned value
+ is guaranteed to be no further than (1 + `eps`) times the
+ distance to the real k-th nearest neighbor.
+ p : float, 1 <= p <= infinity
+ Which Minkowski p-norm to use.
+ 1 is the sum-of-absolute-values "Manhattan" distance.
+ 2 is the usual Euclidean distance.
+ infinity is the maximum-coordinate-difference distance.
+ distance_upper_bound : non-negative float
+ Return only neighbors within this distance. This is used to prune
+ tree searches, so if you are doing a series of nearest-neighbor
+ queries, it may help to supply the distance to the nearest neighbor
+ of the most recent point.
+
+ Returns
+ -------
+ d : ndarray of floats
+ The distances to the nearest neighbors.
+ If `x` has shape tuple+(self.m,), then `d` has shape tuple+(k,).
+ Missing neighbors are indicated with infinite distances.
+ i : ndarray of ints
+ The locations of the neighbors in self.data.
+ If `x` has shape tuple+(self.m,), then `i` has shape tuple+(k,).
+ Missing neighbors are indicated with self.n.
+
+ """
+ cdef np.ndarray[long, ndim=2] ii
+ cdef np.ndarray[double, ndim=2] dd
+ cdef np.ndarray[double, ndim=2] xx
+ cdef np.ndarray[double, ndim=1] cperiod
+ cdef int c
+ x = np.asarray(x).astype(np.double)
+ if period is None:
+ period = np.array([np.inf]*self.m)
+ else:
+ period = np.asarray(period).astype(np.double)
+ cperiod = np.ascontiguousarray(period)
+ if np.shape(x)[-1] != self.m:
+ raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
+ if p<1:
+ raise ValueError("Only p-norms with 1<=p<=infinity permitted")
+ if len(x.shape)==1:
+ single = True
+ x = x[np.newaxis,:]
+ else:
+ single = False
+ retshape = np.shape(x)[:-1]
+ n = np.prod(retshape)
+ xx = np.reshape(x,(n,self.m))
+ xx = np.ascontiguousarray(xx)
+ dd = np.empty((n,k),dtype=np.double)
+ dd.fill(infinity)
+ ii = np.empty((n,k),dtype=np.long)
+ ii.fill(self.n)
+ for c in range(n):
+ self.__query(
+ (<double*>dd.data)+c*k,
+ (<long*>ii.data)+c*k,
+ (<double*>xx.data)+c*self.m,
+ k,
+ eps,
+ p,
+ distance_upper_bound,
+ <double*>cperiod.data)
+ if single:
+ if k==1:
+ return dd[0,0], ii[0,0]
+ else:
+ return dd[0], ii[0]
+ else:
+ if k==1:
+ return np.reshape(dd[...,0],retshape), np.reshape(ii[...,0],retshape)
+ else:
+ return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,))
+
+ def chainHOP_get_dens(cKDTree self, object mass, int num_neighbors=65, \
+ int nMerge=6):
+ """ query the tree for the nearest neighbors, to get the density
+ of particles for chainHOP.
+
+ Parameters:
+ ===========
+
+ mass: A array-like list of the masses of the particles, in the same
+ order as the data that went into building the kd tree.
+
+ num_neighbors: Optional, the number of neighbors to search for and to
+ use in the density calculation. Default is 65, and is probably what
+ one should stick with.
+
+ nMerge: The number of nearest neighbor tags to return for each particle.
+
+ Returns:
+ ========
+
+ dens: An array of the densities for each particle, in the same order
+ as the input data.
+
+ tags: A two-dimensional array of the indexes, nMerge nearest neighbors
+ for each particle.
+
+ """
+
+ # We're no longer returning all the tags in this step.
+ # We do it chunked, in find_chunk_nearest_neighbors.
+ #cdef np.ndarray[long, ndim=2] tags
+ cdef np.ndarray[double, ndim=1] dens
+ cdef np.ndarray[double, ndim=1] query
+ cdef np.ndarray[long, ndim=1] tags_temp
+ cdef np.ndarray[double, ndim=1] dist_temp
+ cdef int i, pj, j
+ cdef double ih2, fNorm, r2, rs
+
+ #tags = np.empty((self.n, nMerge), dtype=np.long)
+ dens = np.empty(self.n, dtype=np.double)
+ query = np.empty(self.m, dtype=np.double)
+ tags_temp = np.empty(num_neighbors, dtype=np.long)
+ dist_temp = np.empty(num_neighbors, dtype=np.double)
+ # Need to start out with zeros before we start adding to it.
+ dens.fill(0.0)
+
+ mass = np.array(mass).astype(np.double)
+ mass = np.ascontiguousarray(mass)
+
+ for i in range(self.n):
+ query = self.data[i]
+ (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
+
+ #calculate the density for this particle
+ ih2 = 4.0/np.max(dist_temp)
+ fNorm = 0.5*np.sqrt(ih2)*ih2/np.pi
+ for j in range(num_neighbors):
+ pj = tags_temp[j]
+ r2 = dist_temp[j] * ih2
+ rs = 2.0 - np.sqrt(r2)
+ if (r2 < 1.0):
+ rs = (1.0 - 0.75*rs*r2)
+ else:
+ rs = 0.25*rs*rs*rs
+ rs = rs * fNorm
+ dens[i] = dens[i] + rs * mass[pj]
+ dens[pj] = dens[pj] + rs * mass[i]
+
+ # store nMerge nearest neighbors
+ #tags[i,:] = tags_temp[:nMerge]
+
+ #return (dens, tags)
+ return dens
+
+ def find_chunk_nearest_neighbors(cKDTree self, int start, int finish, \
+ int num_neighbors=65):
+ """ query the tree in chunks, between start and finish, recording the
+ nearest neighbors.
+
+ Parameters:
+ ===========
+
+ start: The starting point in the dataset for this search.
+
+ finish: The ending point in the dataset for this search.
+
+ num_neighbors: Optional, the number of neighbors to search for.
+ The default is 65.
+
+ Returns:
+ ========
+
+ chunk_tags: A two-dimensional array of the nearest neighbor tags for the
+ points in this search.
+
+ """
+
+ cdef np.ndarray[long, ndim=2] chunk_tags
+ cdef np.ndarray[double, ndim=1] query
+ cdef np.ndarray[long, ndim=1] tags_temp
+ cdef np.ndarray[double, ndim=1] dist_temp
+ cdef int i
+
+ chunk_tags = np.empty((finish-start, num_neighbors), dtype=np.long)
+ query = np.empty(self.m, dtype=np.double)
+ tags_temp = np.empty(num_neighbors, dtype=np.long)
+ dist_temp = np.empty(num_neighbors, dtype=np.double)
+
+ for i in range(finish-start):
+ query = self.data[i+start]
+ (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
+ chunk_tags[i,:] = tags_temp[:]
+
+ return chunk_tags
+
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/common.h
--- /dev/null
+++ b/yt/utilities/spatial/common.h
@@ -0,0 +1,70 @@
+/**
+ * common.h
+ *
+ * Author: Damian Eads
+ * Date: September 22, 2007 (moved into new file on June 8, 2008)
+ *
+ * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
+ * Adapted for incorporation into Scipy, April 9, 2008.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of the author nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLUSTER_COMMON_H
+#define _CLUSTER_COMMON_H
+
+#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y))
+#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y))
+
+#define NCHOOSE2(_n) ((_n)*(_n-1)/2)
+
+#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8)
+#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \
+ CPY_BITS_PER_CHAR))
+#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \
+ ((CPY_BITS_PER_CHAR-1) - \
+ ((i) % CPY_BITS_PER_CHAR))) & 0x1)
+#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \
+ ((0x1) << ((CPY_BITS_PER_CHAR-1) \
+ -((i) % CPY_BITS_PER_CHAR))))
+#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \
+ ~((0x1) << ((CPY_BITS_PER_CHAR-1) \
+ -((i) % CPY_BITS_PER_CHAR))))
+
+#ifndef CPY_CEIL_DIV
+#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \
+ ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1))
+#endif
+
+
+#ifdef CPY_DEBUG
+#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__)
+#else
+#define CPY_DEBUG_MSG(...)
+#endif
+
+#endif
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/distance.py
--- /dev/null
+++ b/yt/utilities/spatial/distance.py
@@ -0,0 +1,2178 @@
+"""
+=====================================================
+Distance computations (:mod:`scipy.spatial.distance`)
+=====================================================
+
+.. sectionauthor:: Damian Eads
+
+Function Reference
+------------------
+
+Distance matrix computation from a collection of raw observation vectors
+stored in a rectangular array.
+
+.. autosummary::
+ :toctree: generated/
+
+ pdist -- pairwise distances between observation vectors.
+ cdist -- distances between between two collections of observation vectors
+ squareform -- convert distance matrix to a condensed one and vice versa
+
+Predicates for checking the validity of distance matrices, both
+condensed and redundant. Also contained in this module are functions
+for computing the number of observations in a distance matrix.
+
+.. autosummary::
+ :toctree: generated/
+
+ is_valid_dm -- checks for a valid distance matrix
+ is_valid_y -- checks for a valid condensed distance matrix
+ num_obs_dm -- # of observations in a distance matrix
+ num_obs_y -- # of observations in a condensed distance matrix
+
+Distance functions between two vectors ``u`` and ``v``. Computing
+distances over a large collection of vectors is inefficient for these
+functions. Use ``pdist`` for this purpose.
+
+.. autosummary::
+ :toctree: generated/
+
+ braycurtis -- the Bray-Curtis distance.
+ canberra -- the Canberra distance.
+ chebyshev -- the Chebyshev distance.
+ cityblock -- the Manhattan distance.
+ correlation -- the Correlation distance.
+ cosine -- the Cosine distance.
+ dice -- the Dice dissimilarity (boolean).
+ euclidean -- the Euclidean distance.
+ hamming -- the Hamming distance (boolean).
+ jaccard -- the Jaccard distance (boolean).
+ kulsinski -- the Kulsinski distance (boolean).
+ mahalanobis -- the Mahalanobis distance.
+ matching -- the matching dissimilarity (boolean).
+ minkowski -- the Minkowski distance.
+ rogerstanimoto -- the Rogers-Tanimoto dissimilarity (boolean).
+ russellrao -- the Russell-Rao dissimilarity (boolean).
+ seuclidean -- the normalized Euclidean distance.
+ sokalmichener -- the Sokal-Michener dissimilarity (boolean).
+ sokalsneath -- the Sokal-Sneath dissimilarity (boolean).
+ sqeuclidean -- the squared Euclidean distance.
+ yule -- the Yule dissimilarity (boolean).
+
+
+References
+----------
+
+.. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks.
+ http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
+ Accessed October 1, 2007.
+
+.. [Mti07] "Hierarchical clustering." API Reference Documentation.
+ The Wolfram Research, Inc.
+ http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/HierarchicalClustering.html.
+ Accessed October 1, 2007.
+
+.. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
+ Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
+
+.. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective
+ function." Journal of the American Statistical Association. 58(301):
+ pp. 236--44. 1963.
+
+.. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
+ 32(2): pp. 241--54. 1966.
+
+.. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
+ 855--60. 1962.
+
+.. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of
+ Classification. 12: pp. 73--90. 1995.
+
+.. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating
+ systematic relationships." Scientific Bulletins. 38(22):
+ pp. 1409--38. 1958.
+
+.. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering
+ algorithms: the problem of classifying everybody." Multivariate
+ Behavioral Research. 14: pp. 367--84. 1979.
+
+.. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
+ Prentice-Hall. Englewood Cliffs, NJ. 1988.
+
+.. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic
+ problems." Annals of Eugenics, 7(2): 179-188. 1936
+
+
+Copyright Notice
+----------------
+
+Copyright (C) Damian Eads, 2007-2008. New BSD License.
+
+"""
+
+import warnings
+import numpy as np
+from numpy.linalg import norm
+
+import _distance_wrap
+
+
+def _copy_array_if_base_present(a):
+ """
+ Copies the array if its base points to a parent array.
+ """
+ if a.base is not None:
+ return a.copy()
+ elif np.issubsctype(a, np.float32):
+ return np.array(a, dtype=np.double)
+ else:
+ return a
+
+
+def _copy_arrays_if_base_present(T):
+ """
+ Accepts a tuple of arrays T. Copies the array T[i] if its base array
+ points to an actual array. Otherwise, the reference is just copied.
+ This is useful if the arrays are being passed to a C function that
+ does not do proper striding.
+ """
+ l = [_copy_array_if_base_present(a) for a in T]
+ return l
+
+
+def _convert_to_bool(X):
+ if X.dtype != np.bool:
+ X = np.bool_(X)
+ if not X.flags.contiguous:
+ X = X.copy()
+ return X
+
+
+def _convert_to_double(X):
+ if X.dtype != np.double:
+ X = np.double(X)
+ if not X.flags.contiguous:
+ X = X.copy()
+ return X
+
+
+def _validate_vector(u, dtype=None):
+ # XXX Is order='c' really necessary?
+ u = np.asarray(u, dtype=dtype, order='c').squeeze()
+ # Ensure values such as u=1 and u=[1] still return 1-D arrays.
+ u = np.atleast_1d(u)
+ if u.ndim > 1:
+ raise ValueError("Input vector should be 1-D.")
+ return u
+
+
+def minkowski(u, v, p):
+ r"""
+ Computes the Minkowski distance between two vectors ``u`` and ``v``,
+ defined as
+
+ .. math::
+
+ {||u-v||}_p = (\sum{|u_i - v_i|^p})^{1/p}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An n-dimensional vector.
+ v : ndarray
+ An n-dimensional vector.
+ p : int
+ The order of the norm of the difference :math:`{||u-v||}_p`.
+
+ Returns
+ -------
+ d : double
+ The Minkowski distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ if p < 1:
+ raise ValueError("p must be at least 1")
+ dist = norm(u - v, ord=p)
+ return dist
+
+
+def wminkowski(u, v, p, w):
+ r"""
+ Computes the weighted Minkowski distance between two vectors ``u``
+ and ``v``, defined as
+
+ .. math::
+
+ \left(\sum{(w_i |u_i - v_i|^p)}\right)^{1/p}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+ p : int
+ The order of the norm of the difference :math:`{||u-v||}_p`.
+ w : ndarray
+ The weight vector.
+
+ Returns
+ -------
+ d : double
+ The Minkowski distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ w = _validate_vector(w)
+ if p < 1:
+ raise ValueError("p must be at least 1")
+ dist = norm(w * (u - v), ord=p)
+ return dist
+
+
+def euclidean(u, v):
+ """
+ Computes the Euclidean distance between two n-vectors ``u`` and ``v``,
+ which is defined as
+
+ .. math::
+
+ {||u-v||}_2
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Euclidean distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ dist = norm(u - v)
+ return dist
+
+
+def sqeuclidean(u, v):
+ """
+ Computes the squared Euclidean distance between two n-vectors u and v,
+ which is defined as
+
+ .. math::
+
+ {||u-v||}_2^2.
+
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The squared Euclidean distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ dist = ((u - v) ** 2).sum()
+ return dist
+
+
+def cosine(u, v):
+ r"""
+ Computes the Cosine distance between two n-vectors u and v, which
+ is defined as
+
+ .. math::
+
+ 1 - \frac{uv^T}
+ {||u||_2 ||v||_2}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Cosine distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v))
+ return dist
+
+
+def correlation(u, v):
+ r"""
+ Computes the correlation distance between two n-vectors ``u`` and
+ ``v``, which is defined as
+
+ .. math::
+
+ 1 - frac{(u - \bar{u}){(v - \bar{v})}^T}
+ {{||(u - \bar{u})||}_2 {||(v - \bar{v})||}_2^T}
+
+ where :math:`\bar{u}` is the mean of a vectors elements and ``n``
+ is the common dimensionality of ``u`` and ``v``.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The correlation distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ umu = u.mean()
+ vmu = v.mean()
+ um = u - umu
+ vm = v - vmu
+ dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm))
+ return dist
+
+
+def hamming(u, v):
+ r"""
+ Computes the Hamming distance between two n-vectors ``u`` and
+ ``v``, which is simply the proportion of disagreeing components in
+ ``u`` and ``v``. If ``u`` and ``v`` are boolean vectors, the Hamming
+ distance is
+
+ .. math::
+
+ \frac{c_{01} + c_{10}}{n}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Hamming distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ return (u != v).mean()
+
+
+def jaccard(u, v):
+ """
+ Computes the Jaccard-Needham dissimilarity between two boolean
+ n-vectors u and v, which is
+
+ .. math::
+
+ \frac{c_{TF} + c_{FT}}
+ {c_{TT} + c_{FT} + c_{TF}}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Jaccard distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ dist = (np.double(np.bitwise_and((u != v),
+ np.bitwise_or(u != 0, v != 0)).sum())
+ / np.double(np.bitwise_or(u != 0, v != 0).sum()))
+ return dist
+
+
+def kulsinski(u, v):
+ """
+ Computes the Kulsinski dissimilarity between two boolean n-vectors
+ u and v, which is defined as
+
+ .. math::
+
+ \frac{c_{TF} + c_{FT} - c_{TT} + n}
+ {c_{FT} + c_{TF} + n}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Kulsinski distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ n = float(len(u))
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
+
+ return (ntf + nft - ntt + n) / (ntf + nft + n)
+
+
+def seuclidean(u, v, V):
+ """
+ Returns the standardized Euclidean distance between two n-vectors
+ ``u`` and ``v``. ``V`` is an n-dimensional vector of component
+ variances. It is usually computed among a larger collection
+ vectors.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+ V : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The standardized Euclidean distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ V = _validate_vector(V, dtype=np.float64)
+ if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
+ raise TypeError('V must be a 1-D array of the same dimension '
+ 'as u and v.')
+ return np.sqrt(((u - v) ** 2 / V).sum())
+
+
+def cityblock(u, v):
+ """
+ Computes the Manhattan distance between two n-vectors u and v,
+ which is defined as
+
+ .. math::
+
+ \\sum_i {\\left| u_i - v_i \\right|}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The City Block distance between vectors ``u`` and ``v``.
+
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ return abs(u - v).sum()
+
+
+def mahalanobis(u, v, VI):
+ r"""
+ Computes the Mahalanobis distance between two n-vectors ``u`` and ``v``,
+ which is defiend as
+
+ .. math::
+
+ (u-v)V^{-1}(u-v)^T
+
+ where ``VI`` is the inverse covariance matrix :math:`V^{-1}`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Mahalanobis distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ VI = np.atleast_2d(VI)
+ delta = u - v
+ m = np.dot(np.dot(delta, VI), delta)
+ return np.sqrt(m)
+
+
+def chebyshev(u, v):
+ r"""
+ Computes the Chebyshev distance between two n-vectors u and v,
+ which is defined as
+
+ .. math::
+
+ \max_i {|u_i-v_i|}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Chebyshev distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ return max(abs(u - v))
+
+
+def braycurtis(u, v):
+ r"""
+ Computes the Bray-Curtis distance between two n-vectors ``u`` and
+ ``v``, which is defined as
+
+ .. math::
+
+ \sum{|u_i-v_i|} / \sum{|u_i+v_i|}.
+
+ The Bray-Curtis distance is in the range [0, 1] if all coordinates are
+ positive, and is undefined if the inputs are of length zero.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Bray-Curtis distance between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v, dtype=np.float64)
+ return abs(u - v).sum() / abs(u + v).sum()
+
+
+def canberra(u, v):
+ r"""
+ Computes the Canberra distance between two n-vectors u and v,
+ which is defined as
+
+ .. math::
+
+ \sum_u \frac{|u_i-v_i|}
+ {(|u_i|+|v_i|)}.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Canberra distance between vectors ``u`` and ``v``.
+
+ Notes
+ -----
+ Whe u[i] and v[i] are 0 for given i, then the fraction 0/0 = 0 is used in
+ the calculation.
+
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v, dtype=np.float64)
+ olderr = np.seterr(invalid='ignore')
+ try:
+ d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
+ finally:
+ np.seterr(**olderr)
+ return d
+
+
+def _nbool_correspond_all(u, v):
+ if u.dtype != v.dtype:
+ raise TypeError("Arrays being compared must be of the same data type.")
+
+ if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
+ not_u = 1.0 - u
+ not_v = 1.0 - v
+ nff = (not_u * not_v).sum()
+ nft = (not_u * v).sum()
+ ntf = (u * not_v).sum()
+ ntt = (u * v).sum()
+ elif u.dtype == np.bool:
+ not_u = ~u
+ not_v = ~v
+ nff = (not_u & not_v).sum()
+ nft = (not_u & v).sum()
+ ntf = (u & not_v).sum()
+ ntt = (u & v).sum()
+ else:
+ raise TypeError("Arrays being compared have unknown type.")
+
+ return (nff, nft, ntf, ntt)
+
+
+def _nbool_correspond_ft_tf(u, v):
+ if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
+ not_u = 1.0 - u
+ not_v = 1.0 - v
+ nft = (not_u * v).sum()
+ ntf = (u * not_v).sum()
+ else:
+ not_u = ~u
+ not_v = ~v
+ nft = (not_u & v).sum()
+ ntf = (u & not_v).sum()
+ return (nft, ntf)
+
+
+def yule(u, v):
+ r"""
+ Computes the Yule dissimilarity between two boolean n-vectors u and v,
+ which is defined as
+
+
+ .. math::
+
+ \frac{R}{c_{TT} + c_{FF} + \frac{R}{2}}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n` and :math:`R = 2.0 * (c_{TF} + c_{FT})`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Yule dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
+ return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
+
+
+def matching(u, v):
+ r"""
+ Computes the Matching dissimilarity between two boolean n-vectors
+ u and v, which is defined as
+
+ .. math::
+
+ \frac{c_{TF} + c_{FT}}{n}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Matching dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v)
+ return float(nft + ntf) / float(len(u))
+
+
+def dice(u, v):
+ r"""
+ Computes the Dice dissimilarity between two boolean n-vectors
+ ``u`` and ``v``, which is
+
+ .. math::
+
+ \frac{c_{TF} + c_{FT}}
+ {2c_{TT} + c_{FT} + c_{TF}}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Dice dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ if u.dtype == np.bool:
+ ntt = (u & v).sum()
+ else:
+ ntt = (u * v).sum()
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v)
+ return float(ntf + nft) / float(2.0 * ntt + ntf + nft)
+
+
+def rogerstanimoto(u, v):
+ r"""
+ Computes the Rogers-Tanimoto dissimilarity between two boolean
+ n-vectors ``u`` and ``v``, which is defined as
+
+ .. math::
+ \frac{R}
+ {c_{TT} + c_{FF} + R}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Rogers-Tanimoto dissimilarity between vectors
+ `u` and `v`.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
+
+
+def russellrao(u, v):
+ r"""
+ Computes the Russell-Rao dissimilarity between two boolean n-vectors
+ ``u`` and ``v``, which is defined as
+
+ .. math::
+
+ \frac{n - c_{TT}}
+ {n}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Russell-Rao dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ if u.dtype == np.bool:
+ ntt = (u & v).sum()
+ else:
+ ntt = (u * v).sum()
+ return float(len(u) - ntt) / float(len(u))
+
+
+def sokalmichener(u, v):
+ r"""
+ Computes the Sokal-Michener dissimilarity between two boolean vectors
+ ``u`` and ``v``, which is defined as
+
+ .. math::
+
+ \frac{R}
+ {S + R}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
+ :math:`S = c_{FF} + c_{TT}`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Sokal-Michener dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ if u.dtype == np.bool:
+ ntt = (u & v).sum()
+ nff = (~u & ~v).sum()
+ else:
+ ntt = (u * v).sum()
+ nff = ((1.0 - u) * (1.0 - v)).sum()
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v)
+ return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
+
+
+def sokalsneath(u, v):
+ r"""
+ Computes the Sokal-Sneath dissimilarity between two boolean vectors
+ ``u`` and ``v``,
+
+ .. math::
+
+ \frac{R}
+ {c_{TT} + R}
+
+ where :math:`c_{ij}` is the number of occurrences of
+ :math:`\mathtt{u[k]} = i` and :math:`\mathtt{v[k]} = j` for
+ :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
+
+ Parameters
+ ----------
+ u : ndarray
+ An :math:`n`-dimensional vector.
+ v : ndarray
+ An :math:`n`-dimensional vector.
+
+ Returns
+ -------
+ d : double
+ The Sokal-Sneath dissimilarity between vectors ``u`` and ``v``.
+ """
+ u = _validate_vector(u)
+ v = _validate_vector(v)
+ if u.dtype == np.bool:
+ ntt = (u & v).sum()
+ else:
+ ntt = (u * v).sum()
+ (nft, ntf) = _nbool_correspond_ft_tf(u, v)
+ denom = ntt + 2.0 * (ntf + nft)
+ if denom == 0:
+ raise ValueError('Sokal-Sneath dissimilarity is not defined for '
+ 'vectors that are entirely false.')
+ return float(2.0 * (ntf + nft)) / denom
+
+
+def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):
+ r"""
+ Computes the pairwise distances between m original observations in
+ n-dimensional space. Returns a condensed distance matrix Y. For
+ each :math:`i` and :math:`j` (where :math:`i<j<n`), the
+ metric ``dist(u=X[i], v=X[j])`` is computed and stored in the
+ :math:`ij`th entry.
+
+ See ``squareform`` for information on how to calculate the index of
+ this entry or to convert the condensed distance matrix to a
+ redundant square matrix.
+
+ The following are common calling conventions.
+
+ 1. ``Y = pdist(X, 'euclidean')``
+
+ Computes the distance between m points using Euclidean distance
+ (2-norm) as the distance metric between the points. The points
+ are arranged as m n-dimensional row vectors in the matrix X.
+
+ 2. ``Y = pdist(X, 'minkowski', p)``
+
+ Computes the distances using the Minkowski distance
+ :math:`||u-v||_p` (p-norm) where :math:`p \geq 1`.
+
+ 3. ``Y = pdist(X, 'cityblock')``
+
+ Computes the city block or Manhattan distance between the
+ points.
+
+ 4. ``Y = pdist(X, 'seuclidean', V=None)``
+
+ Computes the standardized Euclidean distance. The standardized
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
+
+ .. math::
+
+ \sqrt{\sum {(u_i-v_i)^2 / V[x_i]}}.
+
+
+ V is the variance vector; V[i] is the variance computed over all
+ the i'th components of the points. If not passed, it is
+ automatically computed.
+
+ 5. ``Y = pdist(X, 'sqeuclidean')``
+
+ Computes the squared Euclidean distance :math:`||u-v||_2^2` between
+ the vectors.
+
+ 6. ``Y = pdist(X, 'cosine')``
+
+ Computes the cosine distance between vectors u and v,
+
+ .. math::
+
+ 1 - \frac{uv^T}
+ {{|u|}_2 {|v|}_2}
+
+ where |*|_2 is the 2 norm of its argument *.
+
+ 7. ``Y = pdist(X, 'correlation')``
+
+ Computes the correlation distance between vectors u and v. This is
+
+ .. math::
+
+ 1 - \frac{(u - \bar{u})(v - \bar{v})^T}
+ {{|(u - \bar{u})|}{|(v - \bar{v})|}^T}
+
+ where :math:`\bar{v}` is the mean of the elements of vector v.
+
+ 8. ``Y = pdist(X, 'hamming')``
+
+ Computes the normalized Hamming distance, or the proportion of
+ those vector elements between two n-vectors ``u`` and ``v``
+ which disagree. To save memory, the matrix ``X`` can be of type
+ boolean.
+
+ 9. ``Y = pdist(X, 'jaccard')``
+
+ Computes the Jaccard distance between the points. Given two
+ vectors, ``u`` and ``v``, the Jaccard distance is the
+ proportion of those elements ``u[i]`` and ``v[i]`` that
+ disagree where at least one of them is non-zero.
+
+ 10. ``Y = pdist(X, 'chebyshev')``
+
+ Computes the Chebyshev distance between the points. The
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
+ maximum norm-1 distance between their respective elements. More
+ precisely, the distance is given by
+
+ .. math::
+
+ d(u,v) = \max_i {|u_i-v_i|}.
+
+ 11. ``Y = pdist(X, 'canberra')``
+
+ Computes the Canberra distance between the points. The
+ Canberra distance between two points ``u`` and ``v`` is
+
+ .. math::
+
+ d(u,v) = \sum_u \frac{|u_i-v_i|}
+ {(|u_i|+|v_i|)}
+
+
+ 12. ``Y = pdist(X, 'braycurtis')``
+
+ Computes the Bray-Curtis distance between the points. The
+ Bray-Curtis distance between two points ``u`` and ``v`` is
+
+
+ .. math::
+
+ d(u,v) = \frac{\sum_i {u_i-v_i}}
+ {\sum_i {u_i+v_i}}
+
+ 13. ``Y = pdist(X, 'mahalanobis', VI=None)``
+
+ Computes the Mahalanobis distance between the points. The
+ Mahalanobis distance between two points ``u`` and ``v`` is
+ :math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
+ variable) is the inverse covariance. If ``VI`` is not None,
+ ``VI`` will be used as the inverse covariance matrix.
+
+ 14. ``Y = pdist(X, 'yule')``
+
+ Computes the Yule distance between each pair of boolean
+ vectors. (see yule function documentation)
+
+ 15. ``Y = pdist(X, 'matching')``
+
+ Computes the matching distance between each pair of boolean
+ vectors. (see matching function documentation)
+
+ 16. ``Y = pdist(X, 'dice')``
+
+ Computes the Dice distance between each pair of boolean
+ vectors. (see dice function documentation)
+
+ 17. ``Y = pdist(X, 'kulsinski')``
+
+ Computes the Kulsinski distance between each pair of
+ boolean vectors. (see kulsinski function documentation)
+
+ 18. ``Y = pdist(X, 'rogerstanimoto')``
+
+ Computes the Rogers-Tanimoto distance between each pair of
+ boolean vectors. (see rogerstanimoto function documentation)
+
+ 19. ``Y = pdist(X, 'russellrao')``
+
+ Computes the Russell-Rao distance between each pair of
+ boolean vectors. (see russellrao function documentation)
+
+ 20. ``Y = pdist(X, 'sokalmichener')``
+
+ Computes the Sokal-Michener distance between each pair of
+ boolean vectors. (see sokalmichener function documentation)
+
+ 21. ``Y = pdist(X, 'sokalsneath')``
+
+ Computes the Sokal-Sneath distance between each pair of
+ boolean vectors. (see sokalsneath function documentation)
+
+ 22. ``Y = pdist(X, 'wminkowski')``
+
+ Computes the weighted Minkowski distance between each pair of
+ vectors. (see wminkowski function documentation)
+
+ 23. ``Y = pdist(X, f)``
+
+ Computes the distance between all pairs of vectors in X
+ using the user supplied 2-arity function f. For example,
+ Euclidean distance between the vectors could be computed
+ as follows::
+
+ dm = pdist(X, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum())))
+
+ Note that you should avoid passing a reference to one of
+ the distance functions defined in this library. For example,::
+
+ dm = pdist(X, sokalsneath)
+
+ would calculate the pair-wise distances between the vectors in
+ X using the Python function sokalsneath. This would result in
+ sokalsneath being called :math:`{n \choose 2}` times, which
+ is inefficient. Instead, the optimized C version is more
+ efficient, and we call it using the following syntax.::
+
+ dm = pdist(X, 'sokalsneath')
+
+ Parameters
+ ----------
+ X : ndarray
+ An m by n array of m original observations in an
+ n-dimensional space.
+ metric : string or function
+ The distance metric to use. The distance function can
+ be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
+ 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
+ 'jaccard', 'kulsinski', 'mahalanobis', 'matching',
+ 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
+ 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
+ w : ndarray
+ The weight vector (for weighted Minkowski).
+ p : double
+ The p-norm to apply (for Minkowski, weighted and unweighted)
+ V : ndarray
+ The variance vector (for standardized Euclidean).
+ VI : ndarray
+ The inverse of the covariance matrix (for Mahalanobis).
+
+ Returns
+ -------
+ Y : ndarray
+ A condensed distance matrix.
+
+ See Also
+ --------
+ squareform : converts between condensed distance matrices and
+ square distance matrices.
+ """
+
+# 21. Y = pdist(X, 'test_Y')
+#
+# Computes the distance between all pairs of vectors in X
+# using the distance metric Y but with a more succinct,
+# verifiable, but less efficient implementation.
+
+ X = np.asarray(X, order='c')
+
+ # The C code doesn't do striding.
+ [X] = _copy_arrays_if_base_present([_convert_to_double(X)])
+
+ s = X.shape
+ if len(s) != 2:
+ raise ValueError('A 2-dimensional array must be passed.')
+
+ m, n = s
+ dm = np.zeros((m * (m - 1) / 2,), dtype=np.double)
+
+ wmink_names = ['wminkowski', 'wmi', 'wm', 'wpnorm']
+ if w is None and (metric == wminkowski or metric in wmink_names):
+ raise ValueError('weighted minkowski requires a weight '
+ 'vector `w` to be given.')
+
+ if callable(metric):
+ if metric == minkowski:
+ def dfun(u, v):
+ return minkowski(u, v, p)
+ elif metric == wminkowski:
+ def dfun(u, v):
+ return wminkowski(u, v, p, w)
+ elif metric == seuclidean:
+ def dfun(u, v):
+ return seuclidean(u, v, V)
+ elif metric == mahalanobis:
+ def dfun(u, v):
+ return mahalanobis(u, v, V)
+ else:
+ dfun = metric
+
+ k = 0
+ for i in xrange(0, m - 1):
+ for j in xrange(i + 1, m):
+ dm[k] = dfun(X[i], X[j])
+ k = k + 1
+
+ elif isinstance(metric, basestring):
+ mstr = metric.lower()
+
+ #if X.dtype != np.double and \
+ # (mstr != 'hamming' and mstr != 'jaccard'):
+ # TypeError('A double array must be passed.')
+ if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
+ _distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm)
+ elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
+ _distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm)
+ dm = dm ** 2.0
+ elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
+ _distance_wrap.pdist_city_block_wrap(X, dm)
+ elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
+ if X.dtype == np.bool:
+ _distance_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm)
+ else:
+ _distance_wrap.pdist_hamming_wrap(_convert_to_double(X), dm)
+ elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
+ if X.dtype == np.bool:
+ _distance_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm)
+ else:
+ _distance_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm)
+ elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
+ _distance_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm)
+ elif mstr in set(['minkowski', 'mi', 'm']):
+ _distance_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p)
+ elif mstr in wmink_names:
+ _distance_wrap.pdist_weighted_minkowski_wrap(_convert_to_double(X),
+ dm, p, np.asarray(w))
+ elif mstr in set(['seuclidean', 'se', 's']):
+ if V is not None:
+ V = np.asarray(V, order='c')
+ if type(V) != np.ndarray:
+ raise TypeError('Variance vector V must be a numpy array')
+ if V.dtype != np.double:
+ raise TypeError('Variance vector V must contain doubles.')
+ if len(V.shape) != 1:
+ raise ValueError('Variance vector V must '
+ 'be one-dimensional.')
+ if V.shape[0] != n:
+ raise ValueError('Variance vector V must be of the same '
+ 'dimension as the vectors on which the distances '
+ 'are computed.')
+ # The C code doesn't do striding.
+ [VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
+ else:
+ VV = np.var(X, axis=0, ddof=1)
+ _distance_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm)
+ # Need to test whether vectorized cosine works better.
+ # Find out: Is there a dot subtraction operator so I can
+ # subtract matrices in a similar way to multiplying them?
+ # Need to get rid of as much unnecessary C code as possible.
+ elif mstr in set(['cosine', 'cos']):
+ norms = np.sqrt(np.sum(X * X, axis=1))
+ _distance_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms)
+ elif mstr in set(['old_cosine', 'old_cos']):
+ norms = np.sqrt(np.sum(X * X, axis=1))
+ nV = norms.reshape(m, 1)
+ # The numerator u * v
+ nm = np.dot(X, X.T)
+ # The denom. ||u||*||v||
+ de = np.dot(nV, nV.T)
+ dm = 1.0 - (nm / de)
+ dm[xrange(0, m), xrange(0, m)] = 0.0
+ dm = squareform(dm)
+ elif mstr in set(['correlation', 'co']):
+ X2 = X - X.mean(1)[:, np.newaxis]
+ #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
+ norms = np.sqrt(np.sum(X2 * X2, axis=1))
+ _distance_wrap.pdist_cosine_wrap(_convert_to_double(X2),
+ _convert_to_double(dm),
+ _convert_to_double(norms))
+ elif mstr in set(['mahalanobis', 'mahal', 'mah']):
+ if VI is not None:
+ VI = _convert_to_double(np.asarray(VI, order='c'))
+ if type(VI) != np.ndarray:
+ raise TypeError('VI must be a numpy array.')
+ if VI.dtype != np.double:
+ raise TypeError('The array must contain 64-bit floats.')
+ [VI] = _copy_arrays_if_base_present([VI])
+ else:
+ V = np.cov(X.T)
+ VI = _convert_to_double(np.linalg.inv(V).T.copy())
+ # (u-v)V^(-1)(u-v)^T
+ _distance_wrap.pdist_mahalanobis_wrap(_convert_to_double(X),
+ VI, dm)
+ elif mstr == 'canberra':
+ _distance_wrap.pdist_canberra_wrap(_convert_to_double(X), dm)
+ elif mstr == 'braycurtis':
+ _distance_wrap.pdist_bray_curtis_wrap(_convert_to_double(X), dm)
+ elif mstr == 'yule':
+ _distance_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm)
+ elif mstr == 'matching':
+ _distance_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm)
+ elif mstr == 'kulsinski':
+ _distance_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm)
+ elif mstr == 'dice':
+ _distance_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm)
+ elif mstr == 'rogerstanimoto':
+ _distance_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X),
+ dm)
+ elif mstr == 'russellrao':
+ _distance_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm)
+ elif mstr == 'sokalmichener':
+ _distance_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X),
+ dm)
+ elif mstr == 'sokalsneath':
+ _distance_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm)
+ elif metric == 'test_euclidean':
+ dm = pdist(X, euclidean)
+ elif metric == 'test_sqeuclidean':
+ if V is None:
+ V = np.var(X, axis=0, ddof=1)
+ else:
+ V = np.asarray(V, order='c')
+ dm = pdist(X, lambda u, v: seuclidean(u, v, V))
+ elif metric == 'test_braycurtis':
+ dm = pdist(X, braycurtis)
+ elif metric == 'test_mahalanobis':
+ if VI is None:
+ V = np.cov(X.T)
+ VI = np.linalg.inv(V)
+ else:
+ VI = np.asarray(VI, order='c')
+ [VI] = _copy_arrays_if_base_present([VI])
+ # (u-v)V^(-1)(u-v)^T
+ dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
+ elif metric == 'test_canberra':
+ dm = pdist(X, canberra)
+ elif metric == 'test_cityblock':
+ dm = pdist(X, cityblock)
+ elif metric == 'test_minkowski':
+ dm = pdist(X, minkowski, p=p)
+ elif metric == 'test_wminkowski':
+ dm = pdist(X, wminkowski, p=p, w=w)
+ elif metric == 'test_cosine':
+ dm = pdist(X, cosine)
+ elif metric == 'test_correlation':
+ dm = pdist(X, correlation)
+ elif metric == 'test_hamming':
+ dm = pdist(X, hamming)
+ elif metric == 'test_jaccard':
+ dm = pdist(X, jaccard)
+ elif metric == 'test_chebyshev' or metric == 'test_chebychev':
+ dm = pdist(X, chebyshev)
+ elif metric == 'test_yule':
+ dm = pdist(X, yule)
+ elif metric == 'test_matching':
+ dm = pdist(X, matching)
+ elif metric == 'test_dice':
+ dm = pdist(X, dice)
+ elif metric == 'test_kulsinski':
+ dm = pdist(X, kulsinski)
+ elif metric == 'test_rogerstanimoto':
+ dm = pdist(X, rogerstanimoto)
+ elif metric == 'test_russellrao':
+ dm = pdist(X, russellrao)
+ elif metric == 'test_sokalsneath':
+ dm = pdist(X, sokalsneath)
+ elif metric == 'test_sokalmichener':
+ dm = pdist(X, sokalmichener)
+ else:
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
+ else:
+ raise TypeError('2nd argument metric must be a string identifier '
+ 'or a function.')
+ return dm
+
+
+def squareform(X, force="no", checks=True):
+ r"""
+ Converts a vector-form distance vector to a square-form distance
+ matrix, and vice-versa.
+
+ Parameters
+ ----------
+ X : ndarray
+ Either a condensed or redundant distance matrix.
+
+ Returns
+ -------
+ Y : ndarray
+ If a condensed distance matrix is passed, a redundant
+ one is returned, or if a redundant one is passed, a
+ condensed distance matrix is returned.
+
+ force : string
+ As with MATLAB(TM), if force is equal to 'tovector' or
+ 'tomatrix', the input will be treated as a distance matrix
+ or distance vector respectively.
+
+ checks : bool
+ If ``checks`` is set to ``False``, no checks will be made
+ for matrix symmetry nor zero diagonals. This is useful if
+ it is known that ``X - X.T1`` is small and ``diag(X)`` is
+ close to zero. These values are ignored any way so they do
+ not disrupt the squareform transformation.
+
+
+ Calling Conventions
+ -------------------
+
+ 1. v = squareform(X)
+
+ Given a square d by d symmetric distance matrix ``X``,
+ ``v=squareform(X)`` returns a :math:`d*(d-1)/2` (or
+ `${n \choose 2}$`) sized vector v.
+
+ v[{n \choose 2}-{n-i \choose 2} + (j-i-1)] is the distance
+ between points i and j. If X is non-square or asymmetric, an error
+ is returned.
+
+ X = squareform(v)
+
+ Given a d*d(-1)/2 sized v for some integer d>=2 encoding distances
+ as described, X=squareform(v) returns a d by d distance matrix X. The
+ X[i, j] and X[j, i] values are set to
+ v[{n \choose 2}-{n-i \choose 2} + (j-u-1)] and all
+ diagonal elements are zero.
+
+ """
+
+ X = _convert_to_double(np.asarray(X, order='c'))
+
+ if not np.issubsctype(X, np.double):
+ raise TypeError('A double array must be passed.')
+
+ s = X.shape
+
+ if force.lower() == 'tomatrix':
+ if len(s) != 1:
+ raise ValueError("Forcing 'tomatrix' but input X is not a "
+ "distance vector.")
+ elif force.lower() == 'tovector':
+ if len(s) != 2:
+ raise ValueError("Forcing 'tovector' but input X is not a "
+ "distance matrix.")
+
+ # X = squareform(v)
+ if len(s) == 1:
+ if X.shape[0] == 0:
+ return np.zeros((1, 1), dtype=np.double)
+
+ # Grab the closest value to the square root of the number
+ # of elements times 2 to see if the number of elements
+ # is indeed a binomial coefficient.
+ d = int(np.ceil(np.sqrt(X.shape[0] * 2)))
+
+ # Check that v is of valid dimensions.
+ if d * (d - 1) / 2 != int(s[0]):
+ raise ValueError('Incompatible vector size. It must be a binomial '
+ 'coefficient n choose 2 for some integer n >= 2.')
+
+ # Allocate memory for the distance matrix.
+ M = np.zeros((d, d), dtype=np.double)
+
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ [X] = _copy_arrays_if_base_present([X])
+
+ # Fill in the values of the distance matrix.
+ _distance_wrap.to_squareform_from_vector_wrap(M, X)
+
+ # Return the distance matrix.
+ M = M + M.transpose()
+ return M
+ elif len(s) == 2:
+ if s[0] != s[1]:
+ raise ValueError('The matrix argument must be square.')
+ if checks:
+ is_valid_dm(X, throw=True, name='X')
+
+ # One-side of the dimensions is set here.
+ d = s[0]
+
+ if d <= 1:
+ return np.array([], dtype=np.double)
+
+ # Create a vector.
+ v = np.zeros(((d * (d - 1) / 2),), dtype=np.double)
+
+ # Since the C code does not support striding using strides.
+ # The dimensions are used instead.
+ [X] = _copy_arrays_if_base_present([X])
+
+ # Convert the vector to squareform.
+ _distance_wrap.to_vector_from_squareform_wrap(X, v)
+ return v
+ else:
+ raise ValueError(('The first argument must be one or two dimensional '
+ 'array. A %d-dimensional array is not '
+ 'permitted') % len(s))
+
+
+def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
+ """
+ Returns True if the variable D passed is a valid distance matrix.
+ Distance matrices must be 2-dimensional numpy arrays containing
+ doubles. They must have a zero-diagonal, and they must be symmetric.
+
+ Parameters
+ ----------
+ D : ndarray
+ The candidate object to test for validity.
+ tol : double
+ The distance matrix should be symmetric. tol is the maximum
+ difference between the :math:`ij`th entry and the
+ :math:`ji`th entry for the distance metric to be
+ considered symmetric.
+ throw : bool
+ An exception is thrown if the distance matrix passed is not
+ valid.
+ name : string
+ the name of the variable to checked. This is useful if
+ throw is set to ``True`` so the offending variable can be
+ identified in the exception message when an exception is
+ thrown.
+ warning : bool
+ Instead of throwing an exception, a warning message is
+ raised.
+
+ Returns
+ -------
+ Returns ``True`` if the variable ``D`` passed is a valid
+ distance matrix. Small numerical differences in ``D`` and
+ ``D.T`` and non-zeroness of the diagonal are ignored if they are
+ within the tolerance specified by ``tol``.
+ """
+ D = np.asarray(D, order='c')
+ valid = True
+ try:
+ s = D.shape
+ if D.dtype != np.double:
+ if name:
+ raise TypeError(('Distance matrix \'%s\' must contain doubles '
+ '(double).') % name)
+ else:
+ raise TypeError('Distance matrix must contain doubles '
+ '(double).')
+ if len(D.shape) != 2:
+ if name:
+ raise ValueError(('Distance matrix \'%s\' must have shape=2 '
+ '(i.e. be two-dimensional).') % name)
+ else:
+ raise ValueError('Distance matrix must have shape=2 (i.e. '
+ 'be two-dimensional).')
+ if tol == 0.0:
+ if not (D == D.T).all():
+ if name:
+ raise ValueError(('Distance matrix \'%s\' must be '
+ 'symmetric.') % name)
+ else:
+ raise ValueError('Distance matrix must be symmetric.')
+ if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
+ if name:
+ raise ValueError(('Distance matrix \'%s\' diagonal must '
+ 'be zero.') % name)
+ else:
+ raise ValueError('Distance matrix diagonal must be zero.')
+ else:
+ if not (D - D.T <= tol).all():
+ if name:
+ raise ValueError(('Distance matrix \'%s\' must be '
+ 'symmetric within tolerance %d.')
+ % (name, tol))
+ else:
+ raise ValueError('Distance matrix must be symmetric within'
+ ' tolerance %5.5f.' % tol)
+ if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
+ if name:
+ raise ValueError(('Distance matrix \'%s\' diagonal must be'
+ ' close to zero within tolerance %5.5f.')
+ % (name, tol))
+ else:
+ raise ValueError(('Distance matrix \'%s\' diagonal must be'
+ ' close to zero within tolerance %5.5f.')
+ % tol)
+ except Exception, e:
+ if throw:
+ raise
+ if warning:
+ warnings.warn(str(e))
+ valid = False
+ return valid
+
+
+def is_valid_y(y, warning=False, throw=False, name=None):
+ r"""
+ Returns ``True`` if the variable ``y`` passed is a valid condensed
+ distance matrix. Condensed distance matrices must be 1-dimensional
+ numpy arrays containing doubles. Their length must be a binomial
+ coefficient :math:`{n \choose 2}` for some positive integer n.
+
+
+ Parameters
+ ----------
+ y : ndarray
+ The condensed distance matrix.
+ warning : bool, optional
+ Invokes a warning if the variable passed is not a valid
+ condensed distance matrix. The warning message explains why
+ the distance matrix is not valid. 'name' is used when
+ referencing the offending variable.
+ throws : throw, optional
+ Throws an exception if the variable passed is not a valid
+ condensed distance matrix.
+ name : bool, optional
+ Used when referencing the offending variable in the
+ warning or exception message.
+
+ """
+ y = np.asarray(y, order='c')
+ valid = True
+ try:
+ if type(y) != np.ndarray:
+ if name:
+ raise TypeError(('\'%s\' passed as a condensed distance '
+ 'matrix is not a numpy array.') % name)
+ else:
+ raise TypeError('Variable is not a numpy array.')
+ if y.dtype != np.double:
+ if name:
+ raise TypeError(('Condensed distance matrix \'%s\' must '
+ 'contain doubles (double).') % name)
+ else:
+ raise TypeError('Condensed distance matrix must contain '
+ 'doubles (double).')
+ if len(y.shape) != 1:
+ if name:
+ raise ValueError(('Condensed distance matrix \'%s\' must '
+ 'have shape=1 (i.e. be one-dimensional).')
+ % name)
+ else:
+ raise ValueError('Condensed distance matrix must have shape=1 '
+ '(i.e. be one-dimensional).')
+ n = y.shape[0]
+ d = int(np.ceil(np.sqrt(n * 2)))
+ if (d * (d - 1) / 2) != n:
+ if name:
+ raise ValueError(('Length n of condensed distance matrix '
+ '\'%s\' must be a binomial coefficient, i.e.'
+ 'there must be a k such that '
+ '(k \choose 2)=n)!') % name)
+ else:
+ raise ValueError('Length n of condensed distance matrix must '
+ 'be a binomial coefficient, i.e. there must '
+ 'be a k such that (k \choose 2)=n)!')
+ except Exception, e:
+ if throw:
+ raise
+ if warning:
+ warnings.warn(str(e))
+ valid = False
+ return valid
+
+
+def num_obs_dm(d):
+ """
+ Returns the number of original observations that correspond to a
+ square, redundant distance matrix ``D``.
+
+ Parameters
+ ----------
+ d : ndarray
+ The target distance matrix.
+
+ Returns
+ -------
+ numobs : int
+ The number of observations in the redundant distance matrix.
+ """
+ d = np.asarray(d, order='c')
+ is_valid_dm(d, tol=np.inf, throw=True, name='d')
+ return d.shape[0]
+
+
+def num_obs_y(Y):
+ """
+ Returns the number of original observations that correspond to a
+ condensed distance matrix ``Y``.
+
+ Parameters
+ ----------
+ Y : ndarray
+ The number of original observations in the condensed
+ observation ``Y``.
+
+ Returns
+ -------
+ n : int
+ The number of observations in the condensed distance matrix
+ passed.
+ """
+ Y = np.asarray(Y, order='c')
+ is_valid_y(Y, throw=True, name='Y')
+ k = Y.shape[0]
+ if k == 0:
+ raise ValueError("The number of observations cannot be determined on "
+ "an empty distance matrix.")
+ d = int(np.ceil(np.sqrt(k * 2)))
+ if (d * (d - 1) / 2) != k:
+ raise ValueError("Invalid condensed distance matrix passed. Must be "
+ "some k where k=(n choose 2) for some n >= 2.")
+ return d
+
+
+def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
+ r"""
+ Computes distance between each pair of observation vectors in the
+ Cartesian product of two collections of vectors. ``XA`` is a
+ :math:`m_A` by :math:`n` array while ``XB`` is a :math:`m_B` by
+ :math:`n` array. A :math:`m_A` by :math:`m_B` array is
+ returned. An exception is thrown if ``XA`` and ``XB`` do not have
+ the same number of columns.
+
+ A rectangular distance matrix ``Y`` is returned. For each :math:`i`
+ and :math:`j`, the metric ``dist(u=XA[i], v=XB[j])`` is computed
+ and stored in the :math:`ij` th entry.
+
+ The following are common calling conventions:
+
+ 1. ``Y = cdist(XA, XB, 'euclidean')``
+
+ Computes the distance between :math:`m` points using
+ Euclidean distance (2-norm) as the distance metric between the
+ points. The points are arranged as :math:`m`
+ :math:`n`-dimensional row vectors in the matrix X.
+
+ 2. ``Y = cdist(XA, XB, 'minkowski', p)``
+
+ Computes the distances using the Minkowski distance
+ :math:`||u-v||_p` (:math:`p`-norm) where :math:`p \geq 1`.
+
+ 3. ``Y = cdist(XA, XB, 'cityblock')``
+
+ Computes the city block or Manhattan distance between the
+ points.
+
+ 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
+
+ Computes the standardized Euclidean distance. The standardized
+ Euclidean distance between two n-vectors ``u`` and ``v`` is
+
+ .. math::
+
+ \sqrt{\sum {(u_i-v_i)^2 / V[x_i]}}.
+
+ V is the variance vector; V[i] is the variance computed over all
+ the i'th components of the points. If not passed, it is
+ automatically computed.
+
+ 5. ``Y = cdist(XA, XB, 'sqeuclidean')``
+
+ Computes the squared Euclidean distance :math:`||u-v||_2^2` between
+ the vectors.
+
+ 6. ``Y = cdist(XA, XB, 'cosine')``
+
+ Computes the cosine distance between vectors u and v,
+
+ .. math::
+
+ \frac{1 - uv^T}
+ {{|u|}_2 {|v|}_2}
+
+ where :math:`|*|_2` is the 2-norm of its argument *.
+
+ 7. ``Y = cdist(XA, XB, 'correlation')``
+
+ Computes the correlation distance between vectors u and v. This is
+
+ .. math::
+
+ \frac{1 - (u - n{|u|}_1){(v - n{|v|}_1)}^T}
+ {{|(u - n{|u|}_1)|}_2 {|(v - n{|v|}_1)|}^T}
+
+ where :math:`|*|_1` is the Manhattan (or 1-norm) of its
+ argument, and :math:`n` is the common dimensionality of the
+ vectors.
+
+ 8. ``Y = cdist(XA, XB, 'hamming')``
+
+ Computes the normalized Hamming distance, or the proportion of
+ those vector elements between two n-vectors ``u`` and ``v``
+ which disagree. To save memory, the matrix ``X`` can be of type
+ boolean.
+
+ 9. ``Y = cdist(XA, XB, 'jaccard')``
+
+ Computes the Jaccard distance between the points. Given two
+ vectors, ``u`` and ``v``, the Jaccard distance is the
+ proportion of those elements ``u[i]`` and ``v[i]`` that
+ disagree where at least one of them is non-zero.
+
+ 10. ``Y = cdist(XA, XB, 'chebyshev')``
+
+ Computes the Chebyshev distance between the points. The
+ Chebyshev distance between two n-vectors ``u`` and ``v`` is the
+ maximum norm-1 distance between their respective elements. More
+ precisely, the distance is given by
+
+ .. math::
+
+ d(u,v) = \max_i {|u_i-v_i|}.
+
+ 11. ``Y = cdist(XA, XB, 'canberra')``
+
+ Computes the Canberra distance between the points. The
+ Canberra distance between two points ``u`` and ``v`` is
+
+ .. math::
+
+ d(u,v) = \sum_u \frac{|u_i-v_i|}
+ {(|u_i|+|v_i|)}
+
+
+ 12. ``Y = cdist(XA, XB, 'braycurtis')``
+
+ Computes the Bray-Curtis distance between the points. The
+ Bray-Curtis distance between two points ``u`` and ``v`` is
+
+
+ .. math::
+
+ d(u,v) = \frac{\sum_i (u_i-v_i)}
+ {\sum_i (u_i+v_i)}
+
+ 13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
+
+ Computes the Mahalanobis distance between the points. The
+ Mahalanobis distance between two points ``u`` and ``v`` is
+ :math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
+ variable) is the inverse covariance. If ``VI`` is not None,
+ ``VI`` will be used as the inverse covariance matrix.
+
+ 14. ``Y = cdist(XA, XB, 'yule')``
+
+ Computes the Yule distance between the boolean
+ vectors. (see yule function documentation)
+
+ 15. ``Y = cdist(XA, XB, 'matching')``
+
+ Computes the matching distance between the boolean
+ vectors. (see matching function documentation)
+
+ 16. ``Y = cdist(XA, XB, 'dice')``
+
+ Computes the Dice distance between the boolean vectors. (see
+ dice function documentation)
+
+ 17. ``Y = cdist(XA, XB, 'kulsinski')``
+
+ Computes the Kulsinski distance between the boolean
+ vectors. (see kulsinski function documentation)
+
+ 18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
+
+ Computes the Rogers-Tanimoto distance between the boolean
+ vectors. (see rogerstanimoto function documentation)
+
+ 19. ``Y = cdist(XA, XB, 'russellrao')``
+
+ Computes the Russell-Rao distance between the boolean
+ vectors. (see russellrao function documentation)
+
+ 20. ``Y = cdist(XA, XB, 'sokalmichener')``
+
+ Computes the Sokal-Michener distance between the boolean
+ vectors. (see sokalmichener function documentation)
+
+ 21. ``Y = cdist(XA, XB, 'sokalsneath')``
+
+ Computes the Sokal-Sneath distance between the vectors. (see
+ sokalsneath function documentation)
+
+
+ 22. ``Y = cdist(XA, XB, 'wminkowski')``
+
+ Computes the weighted Minkowski distance between the
+ vectors. (see sokalsneath function documentation)
+
+ 23. ``Y = cdist(XA, XB, f)``
+
+ Computes the distance between all pairs of vectors in X
+ using the user supplied 2-arity function f. For example,
+ Euclidean distance between the vectors could be computed
+ as follows::
+
+ dm = cdist(XA, XB, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum())))
+
+ Note that you should avoid passing a reference to one of
+ the distance functions defined in this library. For example,::
+
+ dm = cdist(XA, XB, sokalsneath)
+
+ would calculate the pair-wise distances between the vectors in
+ X using the Python function sokalsneath. This would result in
+ sokalsneath being called :math:`{n \choose 2}` times, which
+ is inefficient. Instead, the optimized C version is more
+ efficient, and we call it using the following syntax.::
+
+ dm = cdist(XA, XB, 'sokalsneath')
+
+ Parameters
+ ----------
+ XA : ndarray
+ An :math:`m_A` by :math:`n` array of :math:`m_A`
+ original observations in an :math:`n`-dimensional space.
+ XB : ndarray
+ An :math:`m_B` by :math:`n` array of :math:`m_B`
+ original observations in an :math:`n`-dimensional space.
+ metric : string or function
+ The distance metric to use. The distance function can
+ be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
+ 'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
+ 'jaccard', 'kulsinski', 'mahalanobis', 'matching',
+ 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
+ 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski',
+ 'yule'.
+ w : ndarray
+ The weight vector (for weighted Minkowski).
+ p : double
+ The p-norm to apply (for Minkowski, weighted and unweighted)
+ V : ndarray
+ The variance vector (for standardized Euclidean).
+ VI : ndarray
+ The inverse of the covariance matrix (for Mahalanobis).
+
+
+ Returns
+ -------
+ Y : ndarray
+ A :math:`m_A` by :math:`m_B` distance matrix.
+ """
+
+# 21. Y = cdist(XA, XB, 'test_Y')
+#
+# Computes the distance between all pairs of vectors in X
+# using the distance metric Y but with a more succint,
+# verifiable, but less efficient implementation.
+
+ XA = np.asarray(XA, order='c')
+ XB = np.asarray(XB, order='c')
+
+ #if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double):
+ # raise TypeError('Floating point arrays must be 64-bit (got %r).' %
+ # (X.dtype.type,))
+
+ # The C code doesn't do striding.
+ [XA] = _copy_arrays_if_base_present([_convert_to_double(XA)])
+ [XB] = _copy_arrays_if_base_present([_convert_to_double(XB)])
+
+ s = XA.shape
+ sB = XB.shape
+
+ if len(s) != 2:
+ raise ValueError('XA must be a 2-dimensional array.')
+ if len(sB) != 2:
+ raise ValueError('XB must be a 2-dimensional array.')
+ if s[1] != sB[1]:
+ raise ValueError('XA and XB must have the same number of columns '
+ '(i.e. feature dimension.)')
+
+ mA = s[0]
+ mB = sB[0]
+ n = s[1]
+ dm = np.zeros((mA, mB), dtype=np.double)
+
+ if callable(metric):
+ if metric == minkowski:
+ for i in xrange(0, mA):
+ for j in xrange(0, mB):
+ dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
+ elif metric == wminkowski:
+ for i in xrange(0, mA):
+ for j in xrange(0, mB):
+ dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
+ elif metric == seuclidean:
+ for i in xrange(0, mA):
+ for j in xrange(0, mB):
+ dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
+ elif metric == mahalanobis:
+ for i in xrange(0, mA):
+ for j in xrange(0, mB):
+ dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
+ else:
+ for i in xrange(0, mA):
+ for j in xrange(0, mB):
+ dm[i, j] = metric(XA[i, :], XB[j, :])
+ elif isinstance(metric, basestring):
+ mstr = metric.lower()
+
+ #if XA.dtype != np.double and \
+ # (mstr != 'hamming' and mstr != 'jaccard'):
+ # TypeError('A double array must be passed.')
+ if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
+ _distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
+ _distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ dm **= 2.0
+ elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
+ _distance_wrap.cdist_city_block_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
+ if XA.dtype == np.bool:
+ _distance_wrap.cdist_hamming_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB),
+ dm)
+ else:
+ _distance_wrap.cdist_hamming_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
+ if XA.dtype == np.bool:
+ _distance_wrap.cdist_jaccard_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB),
+ dm)
+ else:
+ _distance_wrap.cdist_jaccard_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
+ _distance_wrap.cdist_chebyshev_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr in set(['minkowski', 'mi', 'm', 'pnorm']):
+ _distance_wrap.cdist_minkowski_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm, p)
+ elif mstr in set(['wminkowski', 'wmi', 'wm', 'wpnorm']):
+ _distance_wrap.cdist_weighted_minkowski_wrap(_convert_to_double(XA),
+ _convert_to_double(XB),
+ dm, p,
+ _convert_to_double(w))
+ elif mstr in set(['seuclidean', 'se', 's']):
+ if V is not None:
+ V = np.asarray(V, order='c')
+ if type(V) != np.ndarray:
+ raise TypeError('Variance vector V must be a numpy array')
+ if V.dtype != np.double:
+ raise TypeError('Variance vector V must contain doubles.')
+ if len(V.shape) != 1:
+ raise ValueError('Variance vector V must be '
+ 'one-dimensional.')
+ if V.shape[0] != n:
+ raise ValueError('Variance vector V must be of the same '
+ 'dimension as the vectors on which the '
+ 'distances are computed.')
+ # The C code doesn't do striding.
+ [VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
+ else:
+ X = np.vstack([XA, XB])
+ VV = np.var(X, axis=0, ddof=1)
+ X = None
+ del X
+ _distance_wrap.cdist_seuclidean_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), VV, dm)
+ # Need to test whether vectorized cosine works better.
+ # Find out: Is there a dot subtraction operator so I can
+ # subtract matrices in a similar way to multiplying them?
+ # Need to get rid of as much unnecessary C code as possible.
+ elif mstr in set(['cosine', 'cos']):
+ normsA = np.sqrt(np.sum(XA * XA, axis=1))
+ normsB = np.sqrt(np.sum(XB * XB, axis=1))
+ _distance_wrap.cdist_cosine_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm,
+ normsA,
+ normsB)
+ elif mstr in set(['correlation', 'co']):
+ XA2 = XA - XA.mean(1)[:, np.newaxis]
+ XB2 = XB - XB.mean(1)[:, np.newaxis]
+ #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
+ normsA = np.sqrt(np.sum(XA2 * XA2, axis=1))
+ normsB = np.sqrt(np.sum(XB2 * XB2, axis=1))
+ _distance_wrap.cdist_cosine_wrap(_convert_to_double(XA2),
+ _convert_to_double(XB2),
+ _convert_to_double(dm),
+ _convert_to_double(normsA),
+ _convert_to_double(normsB))
+ elif mstr in set(['mahalanobis', 'mahal', 'mah']):
+ if VI is not None:
+ VI = _convert_to_double(np.asarray(VI, order='c'))
+ if type(VI) != np.ndarray:
+ raise TypeError('VI must be a numpy array.')
+ if VI.dtype != np.double:
+ raise TypeError('The array must contain 64-bit floats.')
+ [VI] = _copy_arrays_if_base_present([VI])
+ else:
+ X = np.vstack([XA, XB])
+ V = np.cov(X.T)
+ X = None
+ del X
+ VI = _convert_to_double(np.linalg.inv(V).T.copy())
+ # (u-v)V^(-1)(u-v)^T
+ _distance_wrap.cdist_mahalanobis_wrap(_convert_to_double(XA),
+ _convert_to_double(XB),
+ VI, dm)
+ elif mstr == 'canberra':
+ _distance_wrap.cdist_canberra_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr == 'braycurtis':
+ _distance_wrap.cdist_bray_curtis_wrap(_convert_to_double(XA),
+ _convert_to_double(XB), dm)
+ elif mstr == 'yule':
+ _distance_wrap.cdist_yule_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB), dm)
+ elif mstr == 'matching':
+ _distance_wrap.cdist_matching_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB), dm)
+ elif mstr == 'kulsinski':
+ _distance_wrap.cdist_kulsinski_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB), dm)
+ elif mstr == 'dice':
+ _distance_wrap.cdist_dice_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB), dm)
+ elif mstr == 'rogerstanimoto':
+ _distance_wrap.cdist_rogerstanimoto_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB),
+ dm)
+ elif mstr == 'russellrao':
+ _distance_wrap.cdist_russellrao_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB), dm)
+ elif mstr == 'sokalmichener':
+ _distance_wrap.cdist_sokalmichener_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB),
+ dm)
+ elif mstr == 'sokalsneath':
+ _distance_wrap.cdist_sokalsneath_bool_wrap(_convert_to_bool(XA),
+ _convert_to_bool(XB),
+ dm)
+ elif metric == 'test_euclidean':
+ dm = cdist(XA, XB, euclidean)
+ elif metric == 'test_seuclidean':
+ if V is None:
+ V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
+ else:
+ V = np.asarray(V, order='c')
+ dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
+ elif metric == 'test_sqeuclidean':
+ dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
+ elif metric == 'test_braycurtis':
+ dm = cdist(XA, XB, braycurtis)
+ elif metric == 'test_mahalanobis':
+ if VI is None:
+ X = np.vstack([XA, XB])
+ V = np.cov(X.T)
+ VI = np.linalg.inv(V)
+ X = None
+ del X
+ else:
+ VI = np.asarray(VI, order='c')
+ [VI] = _copy_arrays_if_base_present([VI])
+ # (u-v)V^(-1)(u-v)^T
+ dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
+ elif metric == 'test_canberra':
+ dm = cdist(XA, XB, canberra)
+ elif metric == 'test_cityblock':
+ dm = cdist(XA, XB, cityblock)
+ elif metric == 'test_minkowski':
+ dm = cdist(XA, XB, minkowski, p=p)
+ elif metric == 'test_wminkowski':
+ dm = cdist(XA, XB, wminkowski, p=p, w=w)
+ elif metric == 'test_cosine':
+ dm = cdist(XA, XB, cosine)
+ elif metric == 'test_correlation':
+ dm = cdist(XA, XB, correlation)
+ elif metric == 'test_hamming':
+ dm = cdist(XA, XB, hamming)
+ elif metric == 'test_jaccard':
+ dm = cdist(XA, XB, jaccard)
+ elif metric == 'test_chebyshev' or metric == 'test_chebychev':
+ dm = cdist(XA, XB, chebyshev)
+ elif metric == 'test_yule':
+ dm = cdist(XA, XB, yule)
+ elif metric == 'test_matching':
+ dm = cdist(XA, XB, matching)
+ elif metric == 'test_dice':
+ dm = cdist(XA, XB, dice)
+ elif metric == 'test_kulsinski':
+ dm = cdist(XA, XB, kulsinski)
+ elif metric == 'test_rogerstanimoto':
+ dm = cdist(XA, XB, rogerstanimoto)
+ elif metric == 'test_russellrao':
+ dm = cdist(XA, XB, russellrao)
+ elif metric == 'test_sokalsneath':
+ dm = cdist(XA, XB, sokalsneath)
+ elif metric == 'test_sokalmichener':
+ dm = cdist(XA, XB, sokalmichener)
+ else:
+ raise ValueError('Unknown Distance Metric: %s' % mstr)
+ else:
+ raise TypeError('2nd argument metric must be a string identifier '
+ 'or a function.')
+ return dm
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/kdtree.py
--- /dev/null
+++ b/yt/utilities/spatial/kdtree.py
@@ -0,0 +1,881 @@
+# Copyright Anne M. Archibald 2008
+# Released under the scipy license
+import sys
+import numpy as np
+from heapq import heappush, heappop
+#import scipy.sparse
+
+__all__ = ['minkowski_distance_p', 'minkowski_distance',
+ 'distance_matrix',
+ 'Rectangle', 'KDTree']
+
+
+def minkowski_distance_p(x, y, p=2):
+ """
+ Compute the p-th power of the L**p distance between x and y.
+
+ For efficiency, this function computes the L**p distance but does
+ not extract the pth root. If p is 1 or infinity, this is equal to
+ the actual L**p distance.
+
+ Parameters
+ ----------
+ x : array_like, M by K
+
+ y : array_like, N by K
+
+ p : float, 1 <= p <= infinity
+ Which Minkowski p-norm to use.
+
+ Examples
+ --------
+ >>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
+ array([2, 1])
+
+ """
+ x = np.asarray(x)
+ y = np.asarray(y)
+ if p == np.inf:
+ return np.amax(np.abs(y-x), axis=-1)
+ elif p == 1:
+ return np.sum(np.abs(y-x), axis=-1)
+ else:
+ return np.sum(np.abs(y-x)**p, axis=-1)
+
+def minkowski_distance(x, y, p=2):
+ """
+ Compute the L**p distance between x and y.
+
+ Parameters
+ ----------
+ x : array_like, M by K
+
+ y : array_like, N by K
+
+ p : float, 1 <= p <= infinity
+ Which Minkowski p-norm to use.
+
+ Examples
+ --------
+ >>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
+ array([ 1.41421356, 1. ])
+
+ """
+ x = np.asarray(x)
+ y = np.asarray(y)
+ if p == np.inf or p == 1:
+ return minkowski_distance_p(x, y, p)
+ else:
+ return minkowski_distance_p(x, y, p)**(1./p)
+
+class Rectangle(object):
+ """Hyperrectangle class.
+
+ Represents a Cartesian product of intervals.
+ """
+ def __init__(self, maxes, mins):
+ """Construct a hyperrectangle."""
+ self.maxes = np.maximum(maxes,mins).astype(np.float)
+ self.mins = np.minimum(maxes,mins).astype(np.float)
+ self.m, = self.maxes.shape
+
+ def __repr__(self):
+ return "<Rectangle %s>" % zip(self.mins, self.maxes)
+
+ def volume(self):
+ """Total volume."""
+ return np.prod(self.maxes-self.mins)
+
+ def split(self, d, split):
+ """Produce two hyperrectangles by splitting along axis d.
+
+ In general, if you need to compute maximum and minimum
+ distances to the children, it can be done more efficiently
+ by updating the maximum and minimum distances to the parent.
+ """ # FIXME: do this
+ mid = np.copy(self.maxes)
+ mid[d] = split
+ less = Rectangle(self.mins, mid)
+ mid = np.copy(self.mins)
+ mid[d] = split
+ greater = Rectangle(mid, self.maxes)
+ return less, greater
+
+ def min_distance_point(self, x, p=2.):
+ """Compute the minimum distance between x and a point in the hyperrectangle."""
+ return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
+
+ def max_distance_point(self, x, p=2.):
+ """Compute the maximum distance between x and a point in the hyperrectangle."""
+ return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
+
+ def min_distance_rectangle(self, other, p=2.):
+ """Compute the minimum distance between points in the two hyperrectangles."""
+ return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
+
+ def max_distance_rectangle(self, other, p=2.):
+ """Compute the maximum distance between points in the two hyperrectangles."""
+ return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
+
+
+class KDTree(object):
+ """
+ kd-tree for quick nearest-neighbor lookup
+
+ This class provides an index into a set of k-dimensional points
+ which can be used to rapidly look up the nearest neighbors of any
+ point.
+
+ The algorithm used is described in Maneewongvatana and Mount 1999.
+ The general idea is that the kd-tree is a binary tree, each of whose
+ nodes represents an axis-aligned hyperrectangle. Each node specifies
+ an axis and splits the set of points based on whether their coordinate
+ along that axis is greater than or less than a particular value.
+
+ During construction, the axis and splitting point are chosen by the
+ "sliding midpoint" rule, which ensures that the cells do not all
+ become long and thin.
+
+ The tree can be queried for the r closest neighbors of any given point
+ (optionally returning only those within some maximum distance of the
+ point). It can also be queried, with a substantial gain in efficiency,
+ for the r approximate closest neighbors.
+
+ For large dimensions (20 is already large) do not expect this to run
+ significantly faster than brute force. High-dimensional nearest-neighbor
+ queries are a substantial open problem in computer science.
+
+ The tree also supports all-neighbors queries, both with arrays of points
+ and with other kd-trees. These do use a reasonably efficient algorithm,
+ but the kd-tree is not necessarily the best data structure for this
+ sort of calculation.
+
+ """
+ def __init__(self, data, leafsize=10):
+ """Construct a kd-tree.
+
+ Parameters
+ ----------
+ data : array_like, shape (n,k)
+ The data points to be indexed. This array is not copied, and
+ so modifying this data will result in bogus results.
+ leafsize : positive int
+ The number of points at which the algorithm switches over to
+ brute-force.
+ """
+ self.data = np.asarray(data)
+ self.n, self.m = np.shape(self.data)
+ self.leafsize = int(leafsize)
+ if self.leafsize<1:
+ raise ValueError("leafsize must be at least 1")
+ self.maxes = np.amax(self.data,axis=0)
+ self.mins = np.amin(self.data,axis=0)
+
+ self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
+
+ class node(object):
+ if sys.version_info[0] >= 3:
+ def __lt__(self, other): id(self) < id(other)
+ def __gt__(self, other): id(self) > id(other)
+ def __le__(self, other): id(self) <= id(other)
+ def __ge__(self, other): id(self) >= id(other)
+ def __eq__(self, other): id(self) == id(other)
+
+ class leafnode(node):
+ def __init__(self, idx):
+ self.idx = idx
+ self.children = len(idx)
+
+ class innernode(node):
+ def __init__(self, split_dim, split, less, greater):
+ self.split_dim = split_dim
+ self.split = split
+ self.less = less
+ self.greater = greater
+ self.children = less.children+greater.children
+
+ def __build(self, idx, maxes, mins):
+ if len(idx)<=self.leafsize:
+ return KDTree.leafnode(idx)
+ else:
+ data = self.data[idx]
+ #maxes = np.amax(data,axis=0)
+ #mins = np.amin(data,axis=0)
+ d = np.argmax(maxes-mins)
+ maxval = maxes[d]
+ minval = mins[d]
+ if maxval==minval:
+ # all points are identical; warn user?
+ return KDTree.leafnode(idx)
+ data = data[:,d]
+
+ # sliding midpoint rule; see Maneewongvatana and Mount 1999
+ # for arguments that this is a good idea.
+ split = (maxval+minval)/2
+ less_idx = np.nonzero(data<=split)[0]
+ greater_idx = np.nonzero(data>split)[0]
+ if len(less_idx)==0:
+ split = np.amin(data)
+ less_idx = np.nonzero(data<=split)[0]
+ greater_idx = np.nonzero(data>split)[0]
+ if len(greater_idx)==0:
+ split = np.amax(data)
+ less_idx = np.nonzero(data<split)[0]
+ greater_idx = np.nonzero(data>=split)[0]
+ if len(less_idx)==0:
+ # _still_ zero? all must have the same value
+ if not np.all(data==data[0]):
+ raise ValueError("Troublesome data array: %s" % data)
+ split = data[0]
+ less_idx = np.arange(len(data)-1)
+ greater_idx = np.array([len(data)-1])
+
+ lessmaxes = np.copy(maxes)
+ lessmaxes[d] = split
+ greatermins = np.copy(mins)
+ greatermins[d] = split
+ return KDTree.innernode(d, split,
+ self.__build(idx[less_idx],lessmaxes,mins),
+ self.__build(idx[greater_idx],maxes,greatermins))
+
+ def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
+
+ side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
+ if p!=np.inf:
+ side_distances**=p
+ min_distance = np.sum(side_distances)
+ else:
+ min_distance = np.amax(side_distances)
+
+ # priority queue for chasing nodes
+ # entries are:
+ # minimum distance between the cell and the target
+ # distances between the nearest side of the cell and the target
+ # the head node of the cell
+ q = [(min_distance,
+ tuple(side_distances),
+ self.tree)]
+ # priority queue for the nearest neighbors
+ # furthest known neighbor first
+ # entries are (-distance**p, i)
+ neighbors = []
+
+ if eps==0:
+ epsfac=1
+ elif p==np.inf:
+ epsfac = 1/(1+eps)
+ else:
+ epsfac = 1/(1+eps)**p
+
+ if p!=np.inf and distance_upper_bound!=np.inf:
+ distance_upper_bound = distance_upper_bound**p
+
+ while q:
+ min_distance, side_distances, node = heappop(q)
+ if isinstance(node, KDTree.leafnode):
+ # brute-force
+ data = self.data[node.idx]
+ ds = minkowski_distance_p(data,x[np.newaxis,:],p)
+ for i in range(len(ds)):
+ if ds[i]<distance_upper_bound:
+ if len(neighbors)==k:
+ heappop(neighbors)
+ heappush(neighbors, (-ds[i], node.idx[i]))
+ if len(neighbors)==k:
+ distance_upper_bound = -neighbors[0][0]
+ else:
+ # we don't push cells that are too far onto the queue at all,
+ # but since the distance_upper_bound decreases, we might get
+ # here even if the cell's too far
+ if min_distance>distance_upper_bound*epsfac:
+ # since this is the nearest cell, we're done, bail out
+ break
+ # compute minimum distances to the children and push them on
+ if x[node.split_dim]<node.split:
+ near, far = node.less, node.greater
+ else:
+ near, far = node.greater, node.less
+
+ # near child is at the same distance as the current node
+ heappush(q,(min_distance, side_distances, near))
+
+ # far child is further by an amount depending only
+ # on the split value
+ sd = list(side_distances)
+ if p == np.inf:
+ min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
+ elif p == 1:
+ sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
+ min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
+ else:
+ sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
+ min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
+
+ # far child might be too far, if so, don't bother pushing it
+ if min_distance<=distance_upper_bound*epsfac:
+ heappush(q,(min_distance, tuple(sd), far))
+
+ if p==np.inf:
+ return sorted([(-d,i) for (d,i) in neighbors])
+ else:
+ return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
+
+ def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
+ """
+ Query the kd-tree for nearest neighbors
+
+ Parameters
+ ----------
+ x : array_like, last dimension self.m
+ An array of points to query.
+ k : integer
+ The number of nearest neighbors to return.
+ eps : nonnegative float
+ Return approximate nearest neighbors; the kth returned value
+ is guaranteed to be no further than (1+eps) times the
+ distance to the real kth nearest neighbor.
+ p : float, 1<=p<=infinity
+ Which Minkowski p-norm to use.
+ 1 is the sum-of-absolute-values "Manhattan" distance
+ 2 is the usual Euclidean distance
+ infinity is the maximum-coordinate-difference distance
+ distance_upper_bound : nonnegative float
+ Return only neighbors within this distance. This is used to prune
+ tree searches, so if you are doing a series of nearest-neighbor
+ queries, it may help to supply the distance to the nearest neighbor
+ of the most recent point.
+
+ Returns
+ -------
+ d : array of floats
+ The distances to the nearest neighbors.
+ If x has shape tuple+(self.m,), then d has shape tuple if
+ k is one, or tuple+(k,) if k is larger than one. Missing
+ neighbors are indicated with infinite distances. If k is None,
+ then d is an object array of shape tuple, containing lists
+ of distances. In either case the hits are sorted by distance
+ (nearest first).
+ i : array of integers
+ The locations of the neighbors in self.data. i is the same
+ shape as d.
+
+ Examples
+ --------
+ >>> from scipy.spatial import KDTree
+ >>> x, y = np.mgrid[0:5, 2:8]
+ >>> tree = KDTree(zip(x.ravel(), y.ravel()))
+ >>> tree.data
+ array([[0, 2],
+ [0, 3],
+ [0, 4],
+ [0, 5],
+ [0, 6],
+ [0, 7],
+ [1, 2],
+ [1, 3],
+ [1, 4],
+ [1, 5],
+ [1, 6],
+ [1, 7],
+ [2, 2],
+ [2, 3],
+ [2, 4],
+ [2, 5],
+ [2, 6],
+ [2, 7],
+ [3, 2],
+ [3, 3],
+ [3, 4],
+ [3, 5],
+ [3, 6],
+ [3, 7],
+ [4, 2],
+ [4, 3],
+ [4, 4],
+ [4, 5],
+ [4, 6],
+ [4, 7]])
+ >>> pts = np.array([[0, 0], [2.1, 2.9]])
+ >>> tree.query(pts)
+ (array([ 2. , 0.14142136]), array([ 0, 13]))
+
+ """
+ x = np.asarray(x)
+ if np.shape(x)[-1] != self.m:
+ raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
+ if p<1:
+ raise ValueError("Only p-norms with 1<=p<=infinity permitted")
+ retshape = np.shape(x)[:-1]
+ if retshape!=():
+ if k is None:
+ dd = np.empty(retshape,dtype=np.object)
+ ii = np.empty(retshape,dtype=np.object)
+ elif k>1:
+ dd = np.empty(retshape+(k,),dtype=np.float)
+ dd.fill(np.inf)
+ ii = np.empty(retshape+(k,),dtype=np.int)
+ ii.fill(self.n)
+ elif k==1:
+ dd = np.empty(retshape,dtype=np.float)
+ dd.fill(np.inf)
+ ii = np.empty(retshape,dtype=np.int)
+ ii.fill(self.n)
+ else:
+ raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
+ for c in np.ndindex(retshape):
+ hits = self.__query(x[c], k=k, p=p, distance_upper_bound=distance_upper_bound)
+ if k is None:
+ dd[c] = [d for (d,i) in hits]
+ ii[c] = [i for (d,i) in hits]
+ elif k>1:
+ for j in range(len(hits)):
+ dd[c+(j,)], ii[c+(j,)] = hits[j]
+ elif k==1:
+ if len(hits)>0:
+ dd[c], ii[c] = hits[0]
+ else:
+ dd[c] = np.inf
+ ii[c] = self.n
+ return dd, ii
+ else:
+ hits = self.__query(x, k=k, p=p, distance_upper_bound=distance_upper_bound)
+ if k is None:
+ return [d for (d,i) in hits], [i for (d,i) in hits]
+ elif k==1:
+ if len(hits)>0:
+ return hits[0]
+ else:
+ return np.inf, self.n
+ elif k>1:
+ dd = np.empty(k,dtype=np.float)
+ dd.fill(np.inf)
+ ii = np.empty(k,dtype=np.int)
+ ii.fill(self.n)
+ for j in range(len(hits)):
+ dd[j], ii[j] = hits[j]
+ return dd, ii
+ else:
+ raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
+
+
+ def __query_ball_point(self, x, r, p=2., eps=0):
+ R = Rectangle(self.maxes, self.mins)
+
+ def traverse_checking(node, rect):
+ if rect.min_distance_point(x, p) > r / (1. + eps):
+ return []
+ elif rect.max_distance_point(x, p) < r * (1. + eps):
+ return traverse_no_checking(node)
+ elif isinstance(node, KDTree.leafnode):
+ d = self.data[node.idx]
+ return node.idx[minkowski_distance(d, x, p) <= r].tolist()
+ else:
+ less, greater = rect.split(node.split_dim, node.split)
+ return traverse_checking(node.less, less) + \
+ traverse_checking(node.greater, greater)
+
+ def traverse_no_checking(node):
+ if isinstance(node, KDTree.leafnode):
+ return node.idx.tolist()
+ else:
+ return traverse_no_checking(node.less) + \
+ traverse_no_checking(node.greater)
+
+ return traverse_checking(self.tree, R)
+
+ def query_ball_point(self, x, r, p=2., eps=0):
+ """Find all points within distance r of point(s) x.
+
+ Parameters
+ ----------
+ x : array_like, shape tuple + (self.m,)
+ The point or points to search for neighbors of.
+ r : positive float
+ The radius of points to return.
+ p : float, optional
+ Which Minkowski p-norm to use. Should be in the range [1, inf].
+ eps : nonnegative float, optional
+ Approximate search. Branches of the tree are not explored if their
+ nearest points are further than ``r / (1 + eps)``, and branches are
+ added in bulk if their furthest points are nearer than
+ ``r * (1 + eps)``.
+
+ Returns
+ -------
+ results : list or array of lists
+ If `x` is a single point, returns a list of the indices of the
+ neighbors of `x`. If `x` is an array of points, returns an object
+ array of shape tuple containing lists of neighbors.
+
+ Notes
+ -----
+ If you have many points whose neighbors you want to find, you may save
+ substantial amounts of time by putting them in a KDTree and using
+ query_ball_tree.
+
+ Examples
+ --------
+ >>> from scipy import spatial
+ >>> x, y = np.mgrid[0:4, 0:4]
+ >>> points = zip(x.ravel(), y.ravel())
+ >>> tree = spatial.KDTree(points)
+ >>> tree.query_ball_point([2, 0], 1)
+ [4, 8, 9, 12]
+
+ """
+ x = np.asarray(x)
+ if x.shape[-1] != self.m:
+ raise ValueError("Searching for a %d-dimensional point in a " \
+ "%d-dimensional KDTree" % (x.shape[-1], self.m))
+ if len(x.shape) == 1:
+ return self.__query_ball_point(x, r, p, eps)
+ else:
+ retshape = x.shape[:-1]
+ result = np.empty(retshape, dtype=np.object)
+ for c in np.ndindex(retshape):
+ result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
+ return result
+
+ def query_ball_tree(self, other, r, p=2., eps=0):
+ """Find all pairs of points whose distance is at most r
+
+ Parameters
+ ==========
+
+ other : KDTree
+ The tree containing points to search against
+ r : positive float
+ The maximum distance
+ p : float 1<=p<=infinity
+ Which Minkowski norm to use
+ eps : nonnegative float
+ Approximate search. Branches of the tree are not explored
+ if their nearest points are further than r/(1+eps), and branches
+ are added in bulk if their furthest points are nearer than r*(1+eps).
+
+ Returns
+ =======
+
+ results : list of lists
+ For each element self.data[i] of this tree, results[i] is a list of the
+ indices of its neighbors in other.data.
+ """
+ results = [[] for i in range(self.n)]
+ def traverse_checking(node1, rect1, node2, rect2):
+ if rect1.min_distance_rectangle(rect2, p)>r/(1.+eps):
+ return
+ elif rect1.max_distance_rectangle(rect2, p)<r*(1.+eps):
+ traverse_no_checking(node1, node2)
+ elif isinstance(node1, KDTree.leafnode):
+ if isinstance(node2, KDTree.leafnode):
+ d = other.data[node2.idx]
+ for i in node1.idx:
+ results[i] += node2.idx[minkowski_distance(d,self.data[i],p)<=r].tolist()
+ else:
+ less, greater = rect2.split(node2.split_dim, node2.split)
+ traverse_checking(node1,rect1,node2.less,less)
+ traverse_checking(node1,rect1,node2.greater,greater)
+ elif isinstance(node2, KDTree.leafnode):
+ less, greater = rect1.split(node1.split_dim, node1.split)
+ traverse_checking(node1.less,less,node2,rect2)
+ traverse_checking(node1.greater,greater,node2,rect2)
+ else:
+ less1, greater1 = rect1.split(node1.split_dim, node1.split)
+ less2, greater2 = rect2.split(node2.split_dim, node2.split)
+ traverse_checking(node1.less,less1,node2.less,less2)
+ traverse_checking(node1.less,less1,node2.greater,greater2)
+ traverse_checking(node1.greater,greater1,node2.less,less2)
+ traverse_checking(node1.greater,greater1,node2.greater,greater2)
+
+ def traverse_no_checking(node1, node2):
+ if isinstance(node1, KDTree.leafnode):
+ if isinstance(node2, KDTree.leafnode):
+ for i in node1.idx:
+ results[i] += node2.idx.tolist()
+ else:
+ traverse_no_checking(node1, node2.less)
+ traverse_no_checking(node1, node2.greater)
+ else:
+ traverse_no_checking(node1.less, node2)
+ traverse_no_checking(node1.greater, node2)
+
+ traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
+ other.tree, Rectangle(other.maxes, other.mins))
+ return results
+
+ def query_pairs(self, r, p=2., eps=0):
+ """Find all pairs of points whose distance is at most r
+
+ Parameters
+ ==========
+
+ r : positive float
+ The maximum distance
+ p : float 1<=p<=infinity
+ Which Minkowski norm to use
+ eps : nonnegative float
+ Approximate search. Branches of the tree are not explored
+ if their nearest points are further than r/(1+eps), and branches
+ are added in bulk if their furthest points are nearer than r*(1+eps).
+
+ Returns
+ =======
+
+ results : set
+ set of pairs (i,j), i<j, for which the corresponing positions are
+ close.
+
+ """
+ results = set()
+ visited = set()
+ def test_set_visited(node1, node2):
+ i, j = sorted((id(node1),id(node2)))
+ if (i,j) in visited:
+ return True
+ else:
+ visited.add((i,j))
+ return False
+ def traverse_checking(node1, rect1, node2, rect2):
+ if test_set_visited(node1, node2):
+ return
+
+ if id(node2)<id(node1):
+ # This node pair will be visited in the other order
+ #return
+ pass
+
+ if isinstance(node1, KDTree.leafnode):
+ if isinstance(node2, KDTree.leafnode):
+ d = self.data[node2.idx]
+ for i in node1.idx:
+ for j in node2.idx[minkowski_distance(d,self.data[i],p)<=r]:
+ if i<j:
+ results.add((i,j))
+ elif j<i:
+ results.add((j,i))
+ else:
+ less, greater = rect2.split(node2.split_dim, node2.split)
+ traverse_checking(node1,rect1,node2.less,less)
+ traverse_checking(node1,rect1,node2.greater,greater)
+ elif isinstance(node2, KDTree.leafnode):
+ less, greater = rect1.split(node1.split_dim, node1.split)
+ traverse_checking(node1.less,less,node2,rect2)
+ traverse_checking(node1.greater,greater,node2,rect2)
+ elif rect1.min_distance_rectangle(rect2, p)>r/(1.+eps):
+ return
+ elif rect1.max_distance_rectangle(rect2, p)<r*(1.+eps):
+ traverse_no_checking(node1.less, node2)
+ traverse_no_checking(node1.greater, node2)
+ else:
+ less1, greater1 = rect1.split(node1.split_dim, node1.split)
+ less2, greater2 = rect2.split(node2.split_dim, node2.split)
+ traverse_checking(node1.less,less1,node2.less,less2)
+ traverse_checking(node1.less,less1,node2.greater,greater2)
+ traverse_checking(node1.greater,greater1,node2.less,less2)
+ traverse_checking(node1.greater,greater1,node2.greater,greater2)
+
+ def traverse_no_checking(node1, node2):
+ if test_set_visited(node1, node2):
+ return
+
+ if id(node2)<id(node1):
+ # This node pair will be visited in the other order
+ #return
+ pass
+ if isinstance(node1, KDTree.leafnode):
+ if isinstance(node2, KDTree.leafnode):
+ for i in node1.idx:
+ for j in node2.idx:
+ if i<j:
+ results.add((i,j))
+ elif j<i:
+ results.add((j,i))
+ else:
+ traverse_no_checking(node1, node2.less)
+ traverse_no_checking(node1, node2.greater)
+ else:
+ traverse_no_checking(node1.less, node2)
+ traverse_no_checking(node1.greater, node2)
+
+ traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
+ self.tree, Rectangle(self.maxes, self.mins))
+ return results
+
+
+ def count_neighbors(self, other, r, p=2.):
+ """Count how many nearby pairs can be formed.
+
+ Count the number of pairs (x1,x2) can be formed, with x1 drawn
+ from self and x2 drawn from other, and where distance(x1,x2,p)<=r.
+ This is the "two-point correlation" described in Gray and Moore 2000,
+ "N-body problems in statistical learning", and the code here is based
+ on their algorithm.
+
+ Parameters
+ ==========
+
+ other : KDTree
+
+ r : float or one-dimensional array of floats
+ The radius to produce a count for. Multiple radii are searched with a single
+ tree traversal.
+ p : float, 1<=p<=infinity
+ Which Minkowski p-norm to use
+
+ Returns
+ =======
+
+ result : integer or one-dimensional array of integers
+ The number of pairs. Note that this is internally stored in a numpy int,
+ and so may overflow if very large (two billion).
+ """
+
+ def traverse(node1, rect1, node2, rect2, idx):
+ min_r = rect1.min_distance_rectangle(rect2,p)
+ max_r = rect1.max_distance_rectangle(rect2,p)
+ c_greater = r[idx]>max_r
+ result[idx[c_greater]] += node1.children*node2.children
+ idx = idx[(min_r<=r[idx]) & (r[idx]<=max_r)]
+ if len(idx)==0:
+ return
+
+ if isinstance(node1,KDTree.leafnode):
+ if isinstance(node2,KDTree.leafnode):
+ ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
+ other.data[node2.idx][np.newaxis,:,:],
+ p).ravel()
+ ds.sort()
+ result[idx] += np.searchsorted(ds,r[idx],side='right')
+ else:
+ less, greater = rect2.split(node2.split_dim, node2.split)
+ traverse(node1, rect1, node2.less, less, idx)
+ traverse(node1, rect1, node2.greater, greater, idx)
+ else:
+ if isinstance(node2,KDTree.leafnode):
+ less, greater = rect1.split(node1.split_dim, node1.split)
+ traverse(node1.less, less, node2, rect2, idx)
+ traverse(node1.greater, greater, node2, rect2, idx)
+ else:
+ less1, greater1 = rect1.split(node1.split_dim, node1.split)
+ less2, greater2 = rect2.split(node2.split_dim, node2.split)
+ traverse(node1.less,less1,node2.less,less2,idx)
+ traverse(node1.less,less1,node2.greater,greater2,idx)
+ traverse(node1.greater,greater1,node2.less,less2,idx)
+ traverse(node1.greater,greater1,node2.greater,greater2,idx)
+ R1 = Rectangle(self.maxes, self.mins)
+ R2 = Rectangle(other.maxes, other.mins)
+ if np.shape(r) == ():
+ r = np.array([r])
+ result = np.zeros(1,dtype=int)
+ traverse(self.tree, R1, other.tree, R2, np.arange(1))
+ return result[0]
+ elif len(np.shape(r))==1:
+ r = np.asarray(r)
+ n, = r.shape
+ result = np.zeros(n,dtype=int)
+ traverse(self.tree, R1, other.tree, R2, np.arange(n))
+ return result
+ else:
+ raise ValueError("r must be either a single value or a one-dimensional array of values")
+
+ def sparse_distance_matrix(self, other, max_distance, p=2.):
+ """Compute a sparse distance matrix
+
+ Computes a distance matrix between two KDTrees, leaving as zero
+ any distance greater than max_distance.
+
+ Parameters
+ ==========
+
+ other : KDTree
+
+ max_distance : positive float
+
+ Returns
+ =======
+
+ result : dok_matrix
+ Sparse matrix representing the results in "dictionary of keys" format.
+ """
+ result = scipy.sparse.dok_matrix((self.n,other.n))
+
+ def traverse(node1, rect1, node2, rect2):
+ if rect1.min_distance_rectangle(rect2, p)>max_distance:
+ return
+ elif isinstance(node1, KDTree.leafnode):
+ if isinstance(node2, KDTree.leafnode):
+ for i in node1.idx:
+ for j in node2.idx:
+ d = minkowski_distance(self.data[i],other.data[j],p)
+ if d<=max_distance:
+ result[i,j] = d
+ else:
+ less, greater = rect2.split(node2.split_dim, node2.split)
+ traverse(node1,rect1,node2.less,less)
+ traverse(node1,rect1,node2.greater,greater)
+ elif isinstance(node2, KDTree.leafnode):
+ less, greater = rect1.split(node1.split_dim, node1.split)
+ traverse(node1.less,less,node2,rect2)
+ traverse(node1.greater,greater,node2,rect2)
+ else:
+ less1, greater1 = rect1.split(node1.split_dim, node1.split)
+ less2, greater2 = rect2.split(node2.split_dim, node2.split)
+ traverse(node1.less,less1,node2.less,less2)
+ traverse(node1.less,less1,node2.greater,greater2)
+ traverse(node1.greater,greater1,node2.less,less2)
+ traverse(node1.greater,greater1,node2.greater,greater2)
+ traverse(self.tree, Rectangle(self.maxes, self.mins),
+ other.tree, Rectangle(other.maxes, other.mins))
+
+ return result
+
+
+def distance_matrix(x,y,p=2,threshold=1000000):
+ """
+ Compute the distance matrix.
+
+ Returns the matrix of all pair-wise distances.
+
+ Parameters
+ ----------
+ x : array_like, `M` by `K`
+ TODO: description needed
+ y : array_like, `N` by `K`
+ TODO: description needed
+ p : float, 1 <= p <= infinity
+ Which Minkowski p-norm to use.
+ threshold : positive integer
+ If `M * N * K` > threshold, use a Python loop instead of creating
+ a very large temporary [what? array?].
+
+ Returns
+ -------
+ result : array_like, `M` by `N`
+
+ Examples
+ --------
+ >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
+ array([[ 1. , 1.41421356],
+ [ 1.41421356, 1. ]])
+
+ """
+
+ x = np.asarray(x)
+ m, k = x.shape
+ y = np.asarray(y)
+ n, kk = y.shape
+
+ if k != kk:
+ raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
+
+ if m*n*k <= threshold:
+ return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
+ else:
+ result = np.empty((m,n),dtype=np.float) #FIXME: figure out the best dtype
+ if m<n:
+ for i in range(m):
+ result[i,:] = minkowski_distance(x[i],y,p)
+ else:
+ for j in range(n):
+ result[:,j] = minkowski_distance(x,y[j],p)
+ return result
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/setup.py
--- /dev/null
+++ b/yt/utilities/spatial/setup.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+from os.path import join
+
+def configuration(parent_package = '', top_path = None):
+ from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+ from numpy.distutils.system_info import get_info
+ from distutils.sysconfig import get_python_inc
+
+ config = Configuration('spatial', parent_package, top_path)
+
+ config.add_data_dir('tests')
+
+# qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c',
+# 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c',
+# 'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c',
+# 'userprintf.c']
+
+# config.add_library('qhull',
+# sources=[join('qhull', 'src', x) for x in qhull_src],
+# include_dirs=[get_python_inc(),
+# get_numpy_include_dirs()],
+# # XXX: GCC dependency!
+# #extra_compiler_args=['-fno-strict-aliasing'],
+# )
+
+# lapack = dict(get_info('lapack_opt'))
+# try:
+# libs = ['qhull'] + lapack.pop('libraries')
+# except KeyError:
+# libs = ['qhull']
+# config.add_extension('qhull',
+# sources=['qhull.c'],
+# libraries=libs,
+# **lapack)
+
+ config.add_extension('ckdtree', sources=['ckdtree.pyx'],
+ libraries=["m"],
+ include_dirs = [get_numpy_include_dirs()])
+
+ config.add_extension('_distance_wrap',
+ sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')],
+ include_dirs = [get_numpy_include_dirs()])
+
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(maintainer = "SciPy Developers",
+ author = "Anne Archibald",
+ maintainer_email = "scipy-dev at scipy.org",
+ description = "Spatial algorithms and data structures",
+ url = "http://www.scipy.org",
+ license = "SciPy License (BSD Style)",
+ **configuration(top_path='').todict()
+ )
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/setupscons.py
--- /dev/null
+++ b/yt/utilities/spatial/setupscons.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+from os.path import join
+
+def configuration(parent_package = '', top_path = None):
+ from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+ config = Configuration('spatial', parent_package, top_path)
+
+ config.add_data_dir('tests')
+ config.add_sconscript('SConstruct')
+
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(maintainer = "SciPy Developers",
+ author = "Anne Archibald",
+ maintainer_email = "scipy-dev at scipy.org",
+ description = "Spatial algorithms and data structures",
+ url = "http://www.scipy.org",
+ license = "SciPy License (BSD Style)",
+ **configuration(top_path='').todict()
+ )
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/src/common.h
--- /dev/null
+++ b/yt/utilities/spatial/src/common.h
@@ -0,0 +1,70 @@
+/**
+ * common.h
+ *
+ * Author: Damian Eads
+ * Date: September 22, 2007 (moved into new file on June 8, 2008)
+ *
+ * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
+ * Adapted for incorporation into Scipy, April 9, 2008.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of the author nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLUSTER_COMMON_H
+#define _CLUSTER_COMMON_H
+
+#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y))
+#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y))
+
+#define NCHOOSE2(_n) ((_n)*(_n-1)/2)
+
+#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8)
+#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \
+ CPY_BITS_PER_CHAR))
+#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \
+ ((CPY_BITS_PER_CHAR-1) - \
+ ((i) % CPY_BITS_PER_CHAR))) & 0x1)
+#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \
+ ((0x1) << ((CPY_BITS_PER_CHAR-1) \
+ -((i) % CPY_BITS_PER_CHAR))))
+#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \
+ ~((0x1) << ((CPY_BITS_PER_CHAR-1) \
+ -((i) % CPY_BITS_PER_CHAR))))
+
+#ifndef CPY_CEIL_DIV
+#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \
+ ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1))
+#endif
+
+
+#ifdef CPY_DEBUG
+#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__)
+#else
+#define CPY_DEBUG_MSG(...)
+#endif
+
+#endif
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/src/distance.c
--- /dev/null
+++ b/yt/utilities/spatial/src/distance.c
@@ -0,0 +1,958 @@
+/**
+ * distance.c
+ *
+ * Author: Damian Eads
+ * Date: September 22, 2007 (moved to new file on June 8, 2008)
+ *
+ * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
+ * Adapted for incorporation into Scipy, April 9, 2008.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of the author nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <Python.h>
+#include <numpy/ndarrayobject.h>
+
+#include <math.h>
+#include <stdlib.h>
+#include "common.h"
+#include "distance.h"
+
+static NPY_INLINE double euclidean_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = u[i] - v[i];
+ s = s + d * d;
+ }
+ return sqrt(s);
+}
+
+static NPY_INLINE double ess_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = fabs(u[i] - v[i]);
+ s = s + d * d;
+ }
+ return s;
+}
+
+static NPY_INLINE double chebyshev_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double d, maxv = 0.0;
+ for (i = 0; i < n; i++) {
+ d = fabs(u[i] - v[i]);
+ if (d > maxv) {
+ maxv = d;
+ }
+ }
+ return maxv;
+}
+
+static NPY_INLINE double canberra_distance(const double *u, const double *v, int n) {
+ int i;
+ double snum = 0.0, sdenom = 0.0, tot = 0.0;
+ for (i = 0; i < n; i++) {
+ snum = fabs(u[i] - v[i]);
+ sdenom = fabs(u[i]) + fabs(v[i]);
+ if (sdenom > 0.0) {
+ tot += snum / sdenom;
+ }
+ }
+ return tot;
+}
+
+static NPY_INLINE double bray_curtis_distance(const double *u, const double *v, int n) {
+ int i;
+ double s1 = 0.0, s2 = 0.0;
+ for (i = 0; i < n; i++) {
+ s1 += fabs(u[i] - v[i]);
+ s2 += fabs(u[i] + v[i]);
+ }
+ return s1 / s2;
+}
+
+static NPY_INLINE double mahalanobis_distance(const double *u, const double *v,
+ const double *covinv, double *dimbuf1,
+ double *dimbuf2, int n) {
+ int i, j;
+ double s;
+ const double *covrow = covinv;
+ for (i = 0; i < n; i++) {
+ dimbuf1[i] = u[i] - v[i];
+ }
+ for (i = 0; i < n; i++) {
+ covrow = covinv + (i * n);
+ s = 0.0;
+ for (j = 0; j < n; j++) {
+ s += dimbuf1[j] * covrow[j];
+ }
+ dimbuf2[i] = s;
+ }
+ s = 0.0;
+ for (i = 0; i < n; i++) {
+ s += dimbuf1[i] * dimbuf2[i];
+ }
+ return sqrt(s);
+}
+
+double hamming_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double s = 0.0;
+ for (i = 0; i < n; i++) {
+ s = s + (u[i] != v[i]);
+ }
+ return s / (double)n;
+}
+
+static NPY_INLINE double hamming_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ double s = 0.0;
+ for (i = 0; i < n; i++) {
+ s = s + (u[i] != v[i]);
+ }
+ return s / (double)n;
+}
+
+static NPY_INLINE double yule_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ int ntt = 0, nff = 0, nft = 0, ntf = 0;
+ for (i = 0; i < n; i++) {
+ ntt += (u[i] && v[i]);
+ ntf += (u[i] && !v[i]);
+ nft += (!u[i] && v[i]);
+ nff += (!u[i] && !v[i]);
+ }
+ return (2.0 * ntf * nft) / (double)(ntt * nff + ntf * nft);
+}
+
+static NPY_INLINE double matching_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ int nft = 0, ntf = 0;
+ for (i = 0; i < n; i++) {
+ ntf += (u[i] && !v[i]);
+ nft += (!u[i] && v[i]);
+ }
+ return (double)(ntf + nft) / (double)(n);
+}
+
+static NPY_INLINE double dice_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ int ntt = 0, nft = 0, ntf = 0;
+ for (i = 0; i < n; i++) {
+ ntt += (u[i] && v[i]);
+ ntf += (u[i] && !v[i]);
+ nft += (!u[i] && v[i]);
+ }
+ return (double)(nft + ntf) / (double)(2.0 * ntt + ntf + nft);
+}
+
+
+static NPY_INLINE double rogerstanimoto_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ int ntt = 0, nff = 0, nft = 0, ntf = 0;
+ for (i = 0; i < n; i++) {
+ ntt += (u[i] && v[i]);
+ ntf += (u[i] && !v[i]);
+ nft += (!u[i] && v[i]);
+ nff += (!u[i] && !v[i]);
+ }
+ return (2.0 * (ntf + nft)) / ((double)ntt + nff + (2.0 * (ntf + nft)));
+}
+
+static NPY_INLINE double russellrao_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ /** int nff = 0, nft = 0, ntf = 0;**/
+ int ntt = 0;
+ for (i = 0; i < n; i++) {
+ /** nff += (!u[i] && !v[i]);
+ ntf += (u[i] && !v[i]);
+ nft += (!u[i] && v[i]);**/
+ ntt += (u[i] && v[i]);
+ }
+ /** return (double)(ntf + nft + nff) / (double)n;**/
+ return (double) (n - ntt) / (double) n;
+}
+
+static NPY_INLINE double kulsinski_distance_bool(const char *u, const char *v, int n) {
+ int _i = 0;
+ int ntt = 0, nft = 0, ntf = 0, nff = 0;
+ for (_i = 0; _i < n; _i++) {
+ ntt += (u[_i] && v[_i]);
+ ntf += (u[_i] && !v[_i]);
+ nft += (!u[_i] && v[_i]);
+ nff += (!u[_i] && !v[_i]);
+ }
+ return ((double)(ntf + nft - ntt + n)) / ((double)(ntf + nft + n));
+}
+
+static NPY_INLINE double sokalsneath_distance_bool(const char *u, const char *v, int n) {
+ int _i = 0;
+ int ntt = 0, nft = 0, ntf = 0;
+ for (_i = 0; _i < n; _i++) {
+ ntt += (u[_i] && v[_i]);
+ ntf += (u[_i] && !v[_i]);
+ nft += (!u[_i] && v[_i]);
+ }
+ return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt);
+}
+
+static NPY_INLINE double sokalmichener_distance_bool(const char *u, const char *v, int n) {
+ int _i = 0;
+ int ntt = 0, nft = 0, ntf = 0, nff = 0;
+ for (_i = 0; _i < n; _i++) {
+ ntt += (u[_i] && v[_i]);
+ nff += (!u[_i] && !v[_i]);
+ ntf += (u[_i] && !v[_i]);
+ nft += (!u[_i] && v[_i]);
+ }
+ return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt + nff);
+}
+
+static NPY_INLINE double jaccard_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double denom = 0.0, num = 0.0;
+ for (i = 0; i < n; i++) {
+ num += (u[i] != v[i]) && ((u[i] != 0.0) || (v[i] != 0.0));
+ denom += (u[i] != 0.0) || (v[i] != 0.0);
+ }
+ return num / denom;
+}
+
+static NPY_INLINE double jaccard_distance_bool(const char *u, const char *v, int n) {
+ int i = 0;
+ double num = 0.0, denom = 0.0;
+ for (i = 0; i < n; i++) {
+ num += (u[i] != v[i]) && ((u[i] != 0) || (v[i] != 0));
+ denom += (u[i] != 0) || (v[i] != 0);
+ }
+ return num / denom;
+}
+
+static NPY_INLINE double dot_product(const double *u, const double *v, int n) {
+ int i;
+ double s = 0.0;
+ for (i = 0; i < n; i++) {
+ s += u[i] * v[i];
+ }
+ return s;
+}
+
+static NPY_INLINE double cosine_distance(const double *u, const double *v, int n,
+ const double nu, const double nv) {
+ return 1.0 - (dot_product(u, v, n) / (nu * nv));
+}
+
+static NPY_INLINE double seuclidean_distance(const double *var,
+ const double *u, const double *v, int n) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = u[i] - v[i];
+ s = s + (d * d) / var[i];
+ }
+ return sqrt(s);
+}
+
+static NPY_INLINE double city_block_distance(const double *u, const double *v, int n) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = fabs(u[i] - v[i]);
+ s = s + d;
+ }
+ return s;
+}
+
+double minkowski_distance(const double *u, const double *v, int n, double p) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = fabs(u[i] - v[i]);
+ s = s + pow(d, p);
+ }
+ return pow(s, 1.0 / p);
+}
+
+double weighted_minkowski_distance(const double *u, const double *v, int n, double p, const double *w) {
+ int i = 0;
+ double s = 0.0, d;
+ for (i = 0; i < n; i++) {
+ d = fabs(u[i] - v[i]) * w[i];
+ s = s + pow(d, p);
+ }
+ return pow(s, 1.0 / p);
+}
+
+void compute_mean_vector(double *res, const double *X, int m, int n) {
+ int i, j;
+ const double *v;
+ for (i = 0; i < n; i++) {
+ res[i] = 0.0;
+ }
+ for (j = 0; j < m; j++) {
+
+ v = X + (j * n);
+ for (i = 0; i < n; i++) {
+ res[i] += v[i];
+ }
+ }
+ for (i = 0; i < n; i++) {
+ res[i] /= (double)m;
+ }
+}
+
+void pdist_euclidean(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = euclidean_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_mahalanobis(const double *X, const double *covinv,
+ double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ double *dimbuf1, *dimbuf2;
+ dimbuf1 = (double*)malloc(sizeof(double) * 2 * n);
+ dimbuf2 = dimbuf1 + n;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n);
+ }
+ }
+ dimbuf2 = 0;
+ free(dimbuf1);
+}
+
+void pdist_bray_curtis(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = bray_curtis_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_canberra(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = canberra_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_hamming(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = hamming_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_hamming_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = hamming_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_jaccard(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = jaccard_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_jaccard_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = jaccard_distance_bool(u, v, n);
+ }
+ }
+}
+
+
+void pdist_chebyshev(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = chebyshev_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = cosine_distance(u, v, n, norms[i], norms[j]);
+ }
+ }
+}
+
+void pdist_seuclidean(const double *X, const double *var,
+ double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = seuclidean_distance(var, u, v, n);
+ }
+ }
+}
+
+void pdist_city_block(const double *X, double *dm, int m, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = city_block_distance(u, v, n);
+ }
+ }
+}
+
+void pdist_minkowski(const double *X, double *dm, int m, int n, double p) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = minkowski_distance(u, v, n, p);
+ }
+ }
+}
+
+void pdist_weighted_minkowski(const double *X, double *dm, int m, int n, double p, const double *w) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = weighted_minkowski_distance(u, v, n, p, w);
+ }
+ }
+}
+
+void pdist_yule_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = yule_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_matching_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = matching_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_dice_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = dice_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = rogerstanimoto_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_russellrao_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = russellrao_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_kulsinski_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = kulsinski_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = sokalsneath_distance_bool(u, v, n);
+ }
+ }
+}
+
+void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < m; i++) {
+ for (j = i + 1; j < m; j++, it++) {
+ u = X + (n * i);
+ v = X + (n * j);
+ *it = sokalmichener_distance_bool(u, v, n);
+ }
+ }
+}
+
+void dist_to_squareform_from_vector(double *M, const double *v, int n) {
+ double *it;
+ const double *cit;
+ int i, j;
+ cit = v;
+ for (i = 0; i < n - 1; i++) {
+ it = M + (i * n) + i + 1;
+ for (j = i + 1; j < n; j++, it++, cit++) {
+ *it = *cit;
+ }
+ }
+}
+
+void dist_to_vector_from_squareform(const double *M, double *v, int n) {
+ double *it;
+ const double *cit;
+ int i, j;
+ it = v;
+ for (i = 0; i < n - 1; i++) {
+ cit = M + (i * n) + i + 1;
+ for (j = i + 1; j < n; j++, it++, cit++) {
+ *it = *cit;
+ }
+ }
+}
+
+
+/** cdist */
+
+void cdist_euclidean(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = euclidean_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_mahalanobis(const double *XA,
+ const double *XB,
+ const double *covinv,
+ double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ double *dimbuf1, *dimbuf2;
+ dimbuf1 = (double*)malloc(sizeof(double) * 2 * n);
+ dimbuf2 = dimbuf1 + n;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n);
+ }
+ }
+ dimbuf2 = 0;
+ free(dimbuf1);
+}
+
+void cdist_bray_curtis(const double *XA, const double *XB,
+ double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = bray_curtis_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_canberra(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = canberra_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_hamming(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = hamming_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_hamming_bool(const char *XA,
+ const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = hamming_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_jaccard(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = jaccard_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_jaccard_bool(const char *XA,
+ const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = jaccard_distance_bool(u, v, n);
+ }
+ }
+}
+
+
+void cdist_chebyshev(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = chebyshev_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_cosine(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n,
+ const double *normsA, const double *normsB) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = cosine_distance(u, v, n, normsA[i], normsB[j]);
+ }
+ }
+}
+
+void cdist_seuclidean(const double *XA,
+ const double *XB,
+ const double *var,
+ double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = seuclidean_distance(var, u, v, n);
+ }
+ }
+}
+
+void cdist_city_block(const double *XA, const double *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = city_block_distance(u, v, n);
+ }
+ }
+}
+
+void cdist_minkowski(const double *XA, const double *XB, double *dm, int mA, int mB, int n, double p) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = minkowski_distance(u, v, n, p);
+ }
+ }
+}
+
+void cdist_weighted_minkowski(const double *XA, const double *XB, double *dm, int mA, int mB, int n, double p, const double *w) {
+ int i, j;
+ const double *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = weighted_minkowski_distance(u, v, n, p, w);
+ }
+ }
+}
+
+void cdist_yule_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = yule_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_matching_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = matching_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_dice_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = dice_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_rogerstanimoto_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = rogerstanimoto_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_russellrao_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = russellrao_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_kulsinski_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = kulsinski_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_sokalsneath_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = sokalsneath_distance_bool(u, v, n);
+ }
+ }
+}
+
+void cdist_sokalmichener_bool(const char *XA, const char *XB, double *dm, int mA, int mB, int n) {
+ int i, j;
+ const char *u, *v;
+ double *it = dm;
+ for (i = 0; i < mA; i++) {
+ for (j = 0; j < mB; j++, it++) {
+ u = XA + (n * i);
+ v = XB + (n * j);
+ *it = sokalmichener_distance_bool(u, v, n);
+ }
+ }
+}
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/src/distance.h
--- /dev/null
+++ b/yt/utilities/spatial/src/distance.h
@@ -0,0 +1,116 @@
+/**
+ * distance.h
+ *
+ * Author: Damian Eads
+ * Date: September 22, 2007 (moved to new file on June 8, 2008)
+ * Adapted for incorporation into Scipy, April 9, 2008.
+ *
+ * Copyright (c) 2007, 2008, Damian Eads. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of the author nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPY_DISTANCE_H
+#define _CPY_DISTANCE_H
+
+void dist_to_squareform_from_vector(double *M, const double *v, int n);
+void dist_to_vector_from_squareform(const double *M, double *v, int n);
+void pdist_euclidean(const double *X, double *dm, int m, int n);
+void pdist_seuclidean(const double *X,
+ const double *var, double *dm, int m, int n);
+void pdist_mahalanobis(const double *X, const double *covinv,
+ double *dm, int m, int n);
+void pdist_bray_curtis(const double *X, double *dm, int m, int n);
+void pdist_canberra(const double *X, double *dm, int m, int n);
+void pdist_hamming(const double *X, double *dm, int m, int n);
+void pdist_hamming_bool(const char *X, double *dm, int m, int n);
+void pdist_city_block(const double *X, double *dm, int m, int n);
+void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms);
+void pdist_chebyshev(const double *X, double *dm, int m, int n);
+void pdist_jaccard(const double *X, double *dm, int m, int n);
+void pdist_jaccard_bool(const char *X, double *dm, int m, int n);
+void pdist_kulsinski_bool(const char *X, double *dm, int m, int n);
+void pdist_minkowski(const double *X, double *dm, int m, int n, double p);
+void pdist_weighted_minkowski(const double *X, double *dm, int m, int n, double p, const double *w);
+void pdist_yule_bool(const char *X, double *dm, int m, int n);
+void pdist_matching_bool(const char *X, double *dm, int m, int n);
+void pdist_dice_bool(const char *X, double *dm, int m, int n);
+void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n);
+void pdist_russellrao_bool(const char *X, double *dm, int m, int n);
+void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n);
+void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n);
+
+void cdist_euclidean(const double *XA, const double *XB, double *dm, int mA, int mB, int n);
+void cdist_mahalanobis(const double *XA, const double *XB,
+ const double *covinv,
+ double *dm, int mA, int mB, int n);
+void cdist_bray_curtis(const double *XA, const double *XB,
+ double *dm, int mA, int mB, int n);
+void cdist_canberra(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n);
+void cdist_hamming(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n);
+void cdist_hamming_bool(const char *XA,
+ const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_jaccard(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n);
+void cdist_jaccard_bool(const char *XA,
+ const char *XB, double *dm, int mA, int mB, int n);
+void cdist_chebyshev(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n);
+void cdist_cosine(const double *XA,
+ const double *XB, double *dm, int mA, int mB, int n,
+ const double *normsA, const double *normsB);
+void cdist_seuclidean(const double *XA,
+ const double *XB,
+ const double *var,
+ double *dm, int mA, int mB, int n);
+void cdist_city_block(const double *XA, const double *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_minkowski(const double *XA, const double *XB, double *dm,
+ int mA, int mB, int n, double p);
+void cdist_weighted_minkowski(const double *XA, const double *XB, double *dm,
+ int mA, int mB, int n, double p, const double *w);
+void cdist_yule_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_matching_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_dice_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_rogerstanimoto_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_russellrao_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_kulsinski_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_sokalsneath_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+void cdist_sokalmichener_bool(const char *XA, const char *XB, double *dm,
+ int mA, int mB, int n);
+
+#endif
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/utilities/spatial/src/distance_wrap.c
--- /dev/null
+++ b/yt/utilities/spatial/src/distance_wrap.c
@@ -0,0 +1,1163 @@
+/**
+ * distance_wrap.c
+ *
+ * Author: Damian Eads
+ * Date: September 22, 2007 (moved to new file on June 8, 2008)
+ * Adapted for incorporation into Scipy, April 9, 2008.
+ *
+ * Copyright (c) 2007, Damian Eads. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the
+ * following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of the author nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <math.h>
+#include "distance.h"
+#include "Python.h"
+#include <numpy/arrayobject.h>
+#include <stdio.h>
+
+extern PyObject *cdist_euclidean_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_euclidean(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_canberra_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_canberra(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_bray_curtis_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_bray_curtis(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *cdist_mahalanobis_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *covinv_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ const double *covinv;
+ if (!PyArg_ParseTuple(args, "O!O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &covinv_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ covinv = (const double*)covinv_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_mahalanobis(XA, XB, covinv, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *cdist_chebyshev_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_chebyshev(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *cdist_cosine_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_, *normsA_, *normsB_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB, *normsA, *normsB;
+ if (!PyArg_ParseTuple(args, "O!O!O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_,
+ &PyArray_Type, &normsA_,
+ &PyArray_Type, &normsB_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ normsA = (const double*)normsA_->data;
+ normsB = (const double*)normsB_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_cosine(XA, XB, dm, mA, mB, n, normsA, normsB);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_seuclidean_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_, *var_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB, *var;
+ if (!PyArg_ParseTuple(args, "O!O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &var_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ var = (double*)var_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_seuclidean(XA, XB, var, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_city_block_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_city_block(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_hamming_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_hamming(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_hamming_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_hamming_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_jaccard_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_jaccard(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_jaccard_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_jaccard_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_minkowski_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB;
+ double p;
+ if (!PyArg_ParseTuple(args, "O!O!O!d",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_,
+ &p)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+ cdist_minkowski(XA, XB, dm, mA, mB, n, p);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_weighted_minkowski_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_, *w_;
+ int mA, mB, n;
+ double *dm;
+ const double *XA, *XB, *w;
+ double p;
+ if (!PyArg_ParseTuple(args, "O!O!O!dO!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_,
+ &p,
+ &PyArray_Type, &w_)) {
+ return 0;
+ }
+ else {
+ XA = (const double*)XA_->data;
+ XB = (const double*)XB_->data;
+ w = (const double*)w_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+ cdist_weighted_minkowski(XA, XB, dm, mA, mB, n, p, w);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *cdist_yule_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_yule_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_matching_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_matching_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_dice_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_dice_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_rogerstanimoto_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_russellrao_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_russellrao_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_kulsinski_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_sokalmichener_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *cdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *XA_, *XB_, *dm_;
+ int mA, mB, n;
+ double *dm;
+ const char *XA, *XB;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &XA_, &PyArray_Type, &XB_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ XA = (const char*)XA_->data;
+ XB = (const char*)XB_->data;
+ dm = (double*)dm_->data;
+ mA = XA_->dimensions[0];
+ mB = XB_->dimensions[0];
+ n = XA_->dimensions[1];
+
+ cdist_sokalsneath_bool(XA, XB, dm, mA, mB, n);
+ }
+ return Py_BuildValue("");
+}
+
+/***************************** pdist ***/
+
+extern PyObject *pdist_euclidean_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_euclidean(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_canberra_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_canberra(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_bray_curtis_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_bray_curtis(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *pdist_mahalanobis_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *covinv_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ const double *covinv;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &covinv_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ covinv = (const double*)covinv_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_mahalanobis(X, covinv, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *pdist_chebyshev_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_chebyshev(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *pdist_cosine_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_, *norms_;
+ int m, n;
+ double *dm;
+ const double *X, *norms;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_,
+ &PyArray_Type, &norms_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ norms = (const double*)norms_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_cosine(X, dm, m, n, norms);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_seuclidean_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_, *var_;
+ int m, n;
+ double *dm;
+ const double *X, *var;
+ if (!PyArg_ParseTuple(args, "O!O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &var_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (double*)X_->data;
+ dm = (double*)dm_->data;
+ var = (double*)var_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_seuclidean(X, var, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_city_block_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_city_block(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_hamming_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_hamming(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_hamming_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_hamming_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_jaccard_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const double *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_jaccard(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_jaccard_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_jaccard_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_minkowski_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm, *X;
+ double p;
+ if (!PyArg_ParseTuple(args, "O!O!d",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_,
+ &p)) {
+ return 0;
+ }
+ else {
+ X = (double*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_minkowski(X, dm, m, n, p);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *pdist_weighted_minkowski_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_, *w_;
+ int m, n;
+ double *dm, *X, *w;
+ double p;
+ if (!PyArg_ParseTuple(args, "O!O!dO!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_,
+ &p,
+ &PyArray_Type, &w_)) {
+ return 0;
+ }
+ else {
+ X = (double*)X_->data;
+ dm = (double*)dm_->data;
+ w = (const double*)w_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_weighted_minkowski(X, dm, m, n, p, w);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+extern PyObject *pdist_yule_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_yule_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_matching_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_matching_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_dice_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_dice_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_rogerstanimoto_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_russellrao_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_russellrao_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_kulsinski_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_sokalmichener_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *pdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *X_, *dm_;
+ int m, n;
+ double *dm;
+ const char *X;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &X_,
+ &PyArray_Type, &dm_)) {
+ return 0;
+ }
+ else {
+ X = (const char*)X_->data;
+ dm = (double*)dm_->data;
+ m = X_->dimensions[0];
+ n = X_->dimensions[1];
+
+ pdist_sokalsneath_bool(X, dm, m, n);
+ }
+ return Py_BuildValue("");
+}
+
+extern PyObject *to_squareform_from_vector_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *M_, *v_;
+ int n;
+ const double *v;
+ double *M;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &M_,
+ &PyArray_Type, &v_)) {
+ return 0;
+ }
+ else {
+ M = (double*)M_->data;
+ v = (const double*)v_->data;
+ n = M_->dimensions[0];
+ dist_to_squareform_from_vector(M, v, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+extern PyObject *to_vector_from_squareform_wrap(PyObject *self, PyObject *args) {
+ PyArrayObject *M_, *v_;
+ int n;
+ double *v;
+ const double *M;
+ if (!PyArg_ParseTuple(args, "O!O!",
+ &PyArray_Type, &M_,
+ &PyArray_Type, &v_)) {
+ return 0;
+ }
+ else {
+ M = (const double*)M_->data;
+ v = (double*)v_->data;
+ n = M_->dimensions[0];
+ dist_to_vector_from_squareform(M, v, n);
+ }
+ return Py_BuildValue("d", 0.0);
+}
+
+
+static PyMethodDef _distanceWrapMethods[] = {
+ {"cdist_bray_curtis_wrap", cdist_bray_curtis_wrap, METH_VARARGS},
+ {"cdist_canberra_wrap", cdist_canberra_wrap, METH_VARARGS},
+ {"cdist_chebyshev_wrap", cdist_chebyshev_wrap, METH_VARARGS},
+ {"cdist_city_block_wrap", cdist_city_block_wrap, METH_VARARGS},
+ {"cdist_cosine_wrap", cdist_cosine_wrap, METH_VARARGS},
+ {"cdist_dice_bool_wrap", cdist_dice_bool_wrap, METH_VARARGS},
+ {"cdist_euclidean_wrap", cdist_euclidean_wrap, METH_VARARGS},
+ {"cdist_hamming_wrap", cdist_hamming_wrap, METH_VARARGS},
+ {"cdist_hamming_bool_wrap", cdist_hamming_bool_wrap, METH_VARARGS},
+ {"cdist_jaccard_wrap", cdist_jaccard_wrap, METH_VARARGS},
+ {"cdist_jaccard_bool_wrap", cdist_jaccard_bool_wrap, METH_VARARGS},
+ {"cdist_kulsinski_bool_wrap", cdist_kulsinski_bool_wrap, METH_VARARGS},
+ {"cdist_mahalanobis_wrap", cdist_mahalanobis_wrap, METH_VARARGS},
+ {"cdist_matching_bool_wrap", cdist_matching_bool_wrap, METH_VARARGS},
+ {"cdist_minkowski_wrap", cdist_minkowski_wrap, METH_VARARGS},
+ {"cdist_weighted_minkowski_wrap", cdist_weighted_minkowski_wrap, METH_VARARGS},
+ {"cdist_rogerstanimoto_bool_wrap", cdist_rogerstanimoto_bool_wrap, METH_VARARGS},
+ {"cdist_russellrao_bool_wrap", cdist_russellrao_bool_wrap, METH_VARARGS},
+ {"cdist_seuclidean_wrap", cdist_seuclidean_wrap, METH_VARARGS},
+ {"cdist_sokalmichener_bool_wrap", cdist_sokalmichener_bool_wrap, METH_VARARGS},
+ {"cdist_sokalsneath_bool_wrap", cdist_sokalsneath_bool_wrap, METH_VARARGS},
+ {"cdist_yule_bool_wrap", cdist_yule_bool_wrap, METH_VARARGS},
+ {"pdist_bray_curtis_wrap", pdist_bray_curtis_wrap, METH_VARARGS},
+ {"pdist_canberra_wrap", pdist_canberra_wrap, METH_VARARGS},
+ {"pdist_chebyshev_wrap", pdist_chebyshev_wrap, METH_VARARGS},
+ {"pdist_city_block_wrap", pdist_city_block_wrap, METH_VARARGS},
+ {"pdist_cosine_wrap", pdist_cosine_wrap, METH_VARARGS},
+ {"pdist_dice_bool_wrap", pdist_dice_bool_wrap, METH_VARARGS},
+ {"pdist_euclidean_wrap", pdist_euclidean_wrap, METH_VARARGS},
+ {"pdist_hamming_wrap", pdist_hamming_wrap, METH_VARARGS},
+ {"pdist_hamming_bool_wrap", pdist_hamming_bool_wrap, METH_VARARGS},
+ {"pdist_jaccard_wrap", pdist_jaccard_wrap, METH_VARARGS},
+ {"pdist_jaccard_bool_wrap", pdist_jaccard_bool_wrap, METH_VARARGS},
+ {"pdist_kulsinski_bool_wrap", pdist_kulsinski_bool_wrap, METH_VARARGS},
+ {"pdist_mahalanobis_wrap", pdist_mahalanobis_wrap, METH_VARARGS},
+ {"pdist_matching_bool_wrap", pdist_matching_bool_wrap, METH_VARARGS},
+ {"pdist_minkowski_wrap", pdist_minkowski_wrap, METH_VARARGS},
+ {"pdist_weighted_minkowski_wrap", pdist_weighted_minkowski_wrap, METH_VARARGS},
+ {"pdist_rogerstanimoto_bool_wrap", pdist_rogerstanimoto_bool_wrap, METH_VARARGS},
+ {"pdist_russellrao_bool_wrap", pdist_russellrao_bool_wrap, METH_VARARGS},
+ {"pdist_seuclidean_wrap", pdist_seuclidean_wrap, METH_VARARGS},
+ {"pdist_sokalmichener_bool_wrap", pdist_sokalmichener_bool_wrap, METH_VARARGS},
+ {"pdist_sokalsneath_bool_wrap", pdist_sokalsneath_bool_wrap, METH_VARARGS},
+ {"pdist_yule_bool_wrap", pdist_yule_bool_wrap, METH_VARARGS},
+ {"to_squareform_from_vector_wrap",
+ to_squareform_from_vector_wrap, METH_VARARGS},
+ {"to_vector_from_squareform_wrap",
+ to_vector_from_squareform_wrap, METH_VARARGS},
+ {NULL, NULL} /* Sentinel - marks the end of this structure */
+};
+
+#if PY_VERSION_HEX >= 0x03000000
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_distance_wrap",
+ NULL,
+ -1,
+ _distanceWrapMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyObject *PyInit__distance_wrap(void)
+{
+ PyObject *m;
+
+ m = PyModule_Create(&moduledef);
+ import_array();
+
+ return m;
+}
+#else
+PyMODINIT_FUNC init_distance_wrap(void)
+{
+ (void) Py_InitModule("_distance_wrap", _distanceWrapMethods);
+ import_array(); // Must be present for NumPy. Called first after above line.
+}
+#endif
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -638,8 +638,8 @@
transfer_function = None, fields = None,
sub_samples = 5, log_fields = None, volume = None,
pf = None, use_kd=True, no_ghost=False):
- ParallelAnalysisInterface.__init__(self)
- if pf is not None: self.pf = pf
+ ParallelAnalysisInterface.__init__(self)
+ if pf is not None: self.pf = pf
self.center = na.array(center, dtype='float64')
self.radius = radius
self.nside = nside
@@ -708,8 +708,8 @@
sub_samples = 5, log_fields = None, volume = None,
pf = None, use_kd=True, no_ghost=False,
rays_per_cell = 0.1, max_nside = 8192):
- ParallelAnalysisInterface.__init__(self)
- if pf is not None: self.pf = pf
+ ParallelAnalysisInterface.__init__(self)
+ if pf is not None: self.pf = pf
self.center = na.array(center, dtype='float64')
self.radius = radius
self.use_kd = use_kd
@@ -762,8 +762,8 @@
class StereoPairCamera(Camera):
def __init__(self, original_camera, relative_separation = 0.005):
- ParallelAnalysisInterface.__init__(self)
- self.original_camera = original_camera
+ ParallelAnalysisInterface.__init__(self)
+ self.original_camera = original_camera
self.relative_separation = relative_separation
def split(self):
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -177,6 +177,7 @@
self.brick_dimensions[i,:],
))
self.bricks = na.array(bricks, dtype='object')
+ f.close()
def reset_cast(self):
pass
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -71,6 +71,7 @@
g = f['G'].value
b = f['B'].value
a = f['A'].value
+ f.close()
else:
print 'No support for fits import.'
return na.array([r,g,b,a]).swapaxes(0,2).swapaxes(0,1)
diff -r 67fc87eea9cb66e5ce7fe308eb5859bc133993da -r 8521b78302398b431cb8fa664c100001a4781190 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -553,8 +553,8 @@
r,g,b,a = cmap(rel)
if alpha is None: alpha = a
self.add_gaussian(v, w, [r,g,b,alpha])
- print "Adding gaussian at %s with width %s and colors %s" % (
- v, w, (r,g,b,alpha))
+ mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
+ v, w, (r,g,b,alpha)))
def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
colormap="gist_stern", col_bounds = None):
@@ -690,7 +690,7 @@
# Now we set up the scattering
scat = (johnson_filters[f]["Lchar"]**-4 / mscat)*anorm
tf = TransferFunction(rho_bounds)
- print "Adding: %s with relative scattering %s" % (f, scat)
+ mylog.debug("Adding: %s with relative scattering %s" % (f, scat))
tf.y *= 0.0; tf.y += scat
self.add_field_table(tf, 1, weight_field_id = 1)
self.link_channels(i+3, i+3)
https://bitbucket.org/yt_analysis/yt/changeset/6a91e1d32321/
changeset: 6a91e1d32321
branch: yt
user: MatthewTurk
date: 2011-11-09 19:48:08
summary: Fixing a few problems with the GDF and NumPy 1.6.1 and with Enzo and numpy 1.6.1
affected #: 2 files
diff -r 8521b78302398b431cb8fa664c100001a4781190 -r 6a91e1d3232102b733e2d313ab537a1ce4c0434d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -254,7 +254,9 @@
self.__pointer_handler(vv)
pbar.finish()
self._fill_arrays(ei, si, LE, RE, np)
- self.grids = na.array(self.grids, dtype='object')
+ temp_grids = na.empty(len(grids), dtype='object')
+ temp_grids[:] = self.grids
+ self.grids = temp_grids
self.filenames = fn
self._store_binary_hierarchy()
t2 = time.time()
diff -r 8521b78302398b431cb8fa664c100001a4781190 -r 6a91e1d3232102b733e2d313ab537a1ce4c0434d yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -100,12 +100,12 @@
# 'Chombo_global'
levels = f.listnames()[1:]
dxs=[]
- self.grids = []
+ self.grids = na.empty(self.num_grids, dtype='object')
for i, grid in enumerate(f['data'].keys()):
- self.grids.append(self.grid(i, self, f['grid_level'][i],
- f['grid_left_index'][i],
- f['grid_dimensions'][i]))
- self.grids[-1]._level_id = f['grid_level'][i]
+ self.grids[i] = self.grid(i, self, f['grid_level'][i],
+ f['grid_left_index'][i],
+ f['grid_dimensions'][i])
+ self.grids[i]._level_id = f['grid_level'][i]
dx = (self.parameter_file.domain_right_edge-
self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
@@ -116,7 +116,6 @@
self.grid_dimensions = f['grid_dimensions'][:]
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
self.grid_particle_count = f['grid_particle_count'][:]
- self.grids = na.array(self.grids, dtype='object')
def _populate_grid_objects(self):
for g in self.grids:
https://bitbucket.org/yt_analysis/yt/changeset/1fda5d8ba5a0/
changeset: 1fda5d8ba5a0
branch: yt
user: MatthewTurk
date: 2011-11-09 19:51:26
summary: Ensuring 32 bit grid dimensions
affected #: 1 file
diff -r 6a91e1d3232102b733e2d313ab537a1ce4c0434d -r 1fda5d8ba5a0373ce202c6e3241500db2389a13a yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -113,7 +113,7 @@
dxs.append(dx)
dx = na.array(dxs)
self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
- self.grid_dimensions = f['grid_dimensions'][:]
+ self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
self.grid_particle_count = f['grid_particle_count'][:]
https://bitbucket.org/yt_analysis/yt/changeset/81feded84333/
changeset: 81feded84333
branch: yt
user: MatthewTurk
date: 2011-11-09 20:33:37
summary: Merging for bugfix
affected #: 1 file
diff -r 1fda5d8ba5a0373ce202c6e3241500db2389a13a -r 81feded84333a3ed48e592f81ea7e62e94c96597 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -185,20 +185,21 @@
return self.grids[grid_i], grid_i
def get_periodic_box_grids(self, left_edge, right_edge):
- left_edge = na.array(left_edge)
- right_edge = na.array(right_edge)
mask = na.zeros(self.grids.shape, dtype='bool')
dl = self.parameter_file.domain_left_edge
dr = self.parameter_file.domain_right_edge
+ left_edge = na.array(left_edge)
+ right_edge = na.array(right_edge)
+ dw = dr - dl
+ left_dist = left_edge - dl
db = right_edge - left_edge
for off_x in [-1, 0, 1]:
nle = left_edge.copy()
- nre = left_edge.copy()
- nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+ nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
for off_y in [-1, 0, 1]:
- nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+ nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
for off_z in [-1, 0, 1]:
- nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+ nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
nre = nle + db
g, gi = self.get_box_grids(nle, nre)
mask[gi] = True
@@ -215,20 +216,21 @@
return self.grids[mask], na.where(mask)
def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
- left_edge = na.array(left_edge)
- right_edge = na.array(right_edge)
mask = na.zeros(self.grids.shape, dtype='bool')
dl = self.parameter_file.domain_left_edge
dr = self.parameter_file.domain_right_edge
+ left_edge = na.array(left_edge)
+ right_edge = na.array(right_edge)
+ dw = dr - dl
+ left_dist = left_edge - dl
db = right_edge - left_edge
for off_x in [-1, 0, 1]:
nle = left_edge.copy()
- nre = left_edge.copy()
- nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+ nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
for off_y in [-1, 0, 1]:
- nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+ nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
for off_z in [-1, 0, 1]:
- nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+ nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
nre = nle + db
g, gi = self.get_box_grids_below_level(nle, nre, level)
mask[gi] = True
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list