[yt-svn] commit/yt: 46 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Nov 1 09:02:55 PDT 2013
46 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/29e24eb0f722/
Changeset: 29e24eb0f722
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-16 04:16:09
Summary: Missed a return statement for marching cubes.
Affected #: 1 file
diff -r 5df0d5742ff2e8abcb6dd032f82247af9c6f0409 -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf yt/utilities/lib/marching_cubes.pyx
--- a/yt/utilities/lib/marching_cubes.pyx
+++ b/yt/utilities/lib/marching_cubes.pyx
@@ -535,6 +535,7 @@
vertices = np.zeros((triangles.count*3,3), dtype='float64')
if do_sample == 0:
FillAndWipeTriangles(vertices, triangles.first)
+ return vertices
cdef int nskip
if do_sample == 1:
nskip = 1
https://bitbucket.org/yt_analysis/yt/commits/09fcbdcc5ae8/
Changeset: 09fcbdcc5ae8
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-19 00:18:29
Summary: Fixing NMSU-ART root level and a bug in initializer of IOHandler.
Affected #: 2 files
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r 09fcbdcc5ae8b38bc37a5a95901d936303497086 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -459,8 +459,9 @@
for j in range(2):
for k in range(2):
ii = ((k*2)+j)*2+i
+ # Note: C order because our index converts C to F.
source[field][:,ii] = \
- dt[i::2,j::2,k::2].ravel(order="F")
+ dt[i::2,j::2,k::2].ravel(order="C")
oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
del source
# Now we continue with the additional levels.
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r 09fcbdcc5ae8b38bc37a5a95901d936303497086 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,10 +36,10 @@
masks = None
caching = True
- def __init__(self):
+ def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
- super(IOHandlerART, self).__init__()
+ super(IOHandlerART, self).__init__(*args, **kwargs)
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
https://bitbucket.org/yt_analysis/yt/commits/b94c1e8965b4/
Changeset: b94c1e8965b4
Branch: yt-3.0
User: xarthisius
Date: 2013-10-12 12:15:13
Summary: [gdf] update wrt recent changes, projections and slices are working now
Affected #: 3 files
diff -r 5df0d5742ff2e8abcb6dd032f82247af9c6f0409 -r b94c1e8965b4174aadbd35d2d7bfd93d6d77ea52 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -16,35 +16,37 @@
import h5py
import numpy as np
import weakref
-from yt.funcs import *
+import os
+from yt.funcs import \
+ just_one, ensure_tuple
from yt.data_objects.grid_patch import \
- AMRGridPatch
+ AMRGridPatch
from yt.geometry.grid_geometry_handler import \
- GridGeometryHandler
+ GridGeometryHandler
from yt.data_objects.static_output import \
- StaticOutput
+ StaticOutput
from yt.utilities.lib import \
get_box_grids_level
-from yt.utilities.io_handler import \
- io_registry
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
- FieldInfoContainer, NullFunc
-import pdb
+ NullFunc
+
def _get_convert(fname):
def _conv(data):
- return data.convert(fname)
+ return 1.0 # data.convert(fname) FIXME
return _conv
+
class GDFGrid(AMRGridPatch):
_id_offset = 0
+
def __init__(self, id, hierarchy, level, start, dimensions):
- AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
- hierarchy = hierarchy)
+ AMRGridPatch.__init__(self, id, filename=hierarchy.hierarchy_filename,
+ hierarchy=hierarchy)
self.Parent = []
self.Children = []
self.Level = level
@@ -59,39 +61,41 @@
if len(self.Parent) > 0:
self.dds = self.Parent[0].dds / self.pf.refine_by
else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
+ LE, RE = self.hierarchy.grid_left_edge[id, :], \
+ self.hierarchy.grid_right_edge[id, :]
+ self.dds = np.array((RE - LE) / self.ActiveDimensions)
if self.pf.data_software != "piernik":
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+ if self.pf.dimensionality < 2:
+ self.dds[1] = 1.0
+ if self.pf.dimensionality < 3:
+ self.dds[2] = 1.0
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = \
+ self.dds
- @property
- def filename(self):
- return None
class GDFHierarchy(GridGeometryHandler):
grid = GDFGrid
+ filtered_particle_types = []
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
+ self.hierarchy_filename = self.parameter_file.parameter_filename
+ h5f = h5py.File(self.hierarchy_filename, 'r')
self.data_style = data_style
+ GridGeometryHandler.__init__(self, pf, data_style)
self.max_level = 10 # FIXME
# for now, the hierarchy file is the parameter file!
- self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
- self._fhandle = h5py.File(self.hierarchy_filename,'r')
- GridGeometryHandler.__init__(self,pf,data_style)
-
- self._fhandle.close()
+ h5f.close()
def _initialize_data_storage(self):
pass
def _detect_fields(self):
- self.field_list = self._fhandle['field_types'].keys()
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.field_list = h5f['field_types'].keys()
+ h5f.close()
def _setup_classes(self):
dd = self._get_data_reader_dict()
@@ -99,15 +103,17 @@
self.object_types.sort()
def _count_grids(self):
- self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.num_grids = h5f['/grid_parent_id'].shape[0]
+ h5f.close()
def _parse_hierarchy(self):
- f = self._fhandle
+ h5f = h5py.File(self.hierarchy_filename, 'r')
dxs = []
self.grids = np.empty(self.num_grids, dtype='object')
- levels = (f['grid_level'][:]).copy()
- glis = (f['grid_left_index'][:]).copy()
- gdims = (f['grid_dimensions'][:]).copy()
+ levels = (h5f['grid_level'][:]).copy()
+ glis = (h5f['grid_left_index'][:]).copy()
+ gdims = (h5f['grid_dimensions'][:]).copy()
active_dims = ~((np.max(gdims, axis=0) == 1) &
(self.parameter_file.domain_dimensions == 1))
@@ -117,16 +123,18 @@
gdims[i])
self.grids[i]._level_id = levels[i]
- dx = (self.parameter_file.domain_right_edge-
- self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
+ dx = (self.parameter_file.domain_right_edge -
+ self.parameter_file.domain_left_edge) / \
+ self.parameter_file.domain_dimensions
+ dx[active_dims] /= self.parameter_file.refine_by ** levels[i]
dxs.append(dx)
dx = np.array(dxs)
- self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+ self.grid_left_edge = self.parameter_file.domain_left_edge + dx * glis
self.grid_dimensions = gdims.astype("int32")
- self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
- self.grid_particle_count = f['grid_particle_count'][:]
+ self.grid_right_edge = self.grid_left_edge + dx * self.grid_dimensions
+ self.grid_particle_count = h5f['grid_particle_count'][:]
del levels, glis, gdims
+ h5f.close()
def _populate_grid_objects(self):
mask = np.empty(self.grids.size, dtype='int32')
@@ -138,8 +146,8 @@
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
- get_box_grids_level(self.grid_left_edge[gi,:],
- self.grid_right_edge[gi,:],
+ get_box_grids_level(self.grid_left_edge[gi, :],
+ self.grid_right_edge[gi, :],
self.grid_levels[gi],
self.grid_left_edge, self.grid_right_edge,
self.grid_levels, mask)
@@ -158,32 +166,33 @@
Gets back all the grids between a left edge and right edge
"""
eps = np.finfo(np.float64).eps
- grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1) \
- & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+ grid_i = np.where(np.all((self.grid_right_edge - left_edge) > eps, axis=1) &
+ np.all((right_edge - self.grid_left_edge) > eps, axis=1))
return self.grids[grid_i], grid_i
-
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self._get_box_grids(grid.LeftEdge, grid.RightEdge)
mask[grid_ind] = True
return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
class GDFStaticOutput(StaticOutput):
_hierarchy_class = GDFHierarchy
_fieldinfo_fallback = GDFFieldInfo
_fieldinfo_known = KnownGDFFields
def __init__(self, filename, data_style='grid_data_format',
- storage_filename = None):
+ storage_filename=None):
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
def _set_units(self):
"""
- Generates the conversion to various physical _units based on the parameter file
+ Generates the conversion to various physical _units
+ based on the parameter file
"""
self.units = {}
self.time_units = {}
@@ -192,16 +201,17 @@
self.time_units['1'] = 1
self.units['1'] = 1.0
self.units['cm'] = 1.0
- self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ self.units['unitary'] = 1.0 / (self.domain_right_edge -
+ self.domain_left_edge).max()
for unit in mpc_conversion.keys():
- self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+ self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
# This should be improved.
- self._handle = h5py.File(self.parameter_filename, "r")
- for field_name in self._handle["/field_types"]:
- current_field = self._handle["/field_types/%s" % field_name]
+ h5f = h5py.File(self.parameter_filename, "r")
+ for field_name in h5f["/field_types"]:
+ current_field = h5f["/field_types/%s" % field_name]
if 'field_to_cgs' in current_field.attrs:
self.units[field_name] = current_field.attrs['field_to_cgs']
else:
@@ -210,15 +220,16 @@
if type(current_field.attrs['field_units']) == str:
current_fields_unit = current_field.attrs['field_units']
else:
- current_fields_unit = just_one(current_field.attrs['field_units'])
+ current_fields_unit = \
+ just_one(current_field.attrs['field_units'])
else:
current_fields_unit = ""
- self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
- units=current_fields_unit, projected_units="",
- convert_function=_get_convert(field_name))
+ self._fieldinfo_known.add_field(
+ field_name, function=NullFunc, take_log=False,
+ units=current_fields_unit, projected_units="",
+ convert_function=_get_convert(field_name))
- self._handle.close()
- del self._handle
+ h5f.close()
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
@@ -232,13 +243,15 @@
self.domain_right_edge = sp["domain_right_edge"][:]
self.domain_dimensions = sp["domain_dimensions"][:]
refine_by = sp["refine_by"]
- if refine_by is None: refine_by = 2
+ if refine_by is None:
+ refine_by = 2
self.refine_by = refine_by
self.dimensionality = sp["dimensionality"]
self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
- if sp["num_ghost_zones"] != 0: raise RuntimeError
+ if sp["num_ghost_zones"] != 0:
+ raise RuntimeError
self.num_ghost_zones = sp["num_ghost_zones"]
self.field_ordering = sp["field_ordering"]
self.boundary_conditions = sp["boundary_conditions"][:]
@@ -252,15 +265,16 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
- self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+ self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
+ # Hardcode for now until field staggering is supported.
+ self.parameters["HydroMethod"] = 0
self._handle.close()
del self._handle
@classmethod
def _is_valid(self, *args, **kwargs):
try:
- fileh = h5py.File(args[0],'r')
+ fileh = h5py.File(args[0], 'r')
if "gridded_data_format" in fileh:
fileh.close()
return True
@@ -271,4 +285,3 @@
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
-
diff -r 5df0d5742ff2e8abcb6dd032f82247af9c6f0409 -r b94c1e8965b4174aadbd35d2d7bfd93d6d77ea52 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -16,14 +16,8 @@
from yt.data_objects.field_info_container import \
FieldInfoContainer, \
FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType, \
NullFunc, \
TranslationFunc
-import yt.fields.universal_fields
log_translation_dict = {"Density": "density",
"Pressure": "pressure"}
@@ -31,7 +25,7 @@
translation_dict = {"x-velocity": "velocity_x",
"y-velocity": "velocity_y",
"z-velocity": "velocity_z"}
-
+
# translation_dict = {"mag_field_x": "cell_centered_B_x ",
# "mag_field_y": "cell_centered_B_y ",
# "mag_field_z": "cell_centered_B_z "}
@@ -43,39 +37,39 @@
add_gdf_field = KnownGDFFields.add_field
add_gdf_field("density", function=NullFunc, take_log=True,
- units=r"\rm{g}/\rm{cm}^3",
- projected_units =r"\rm{g}/\rm{cm}^2")
+ units=r"\rm{g}/\rm{cm}^3",
+ projected_units=r"\rm{g}/\rm{cm}^2")
add_gdf_field("specific_energy", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("pressure", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("velocity_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
-for f,v in log_translation_dict.items():
+for f, v in log_translation_dict.items():
add_field(f, TranslationFunc(v), take_log=True,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
-for f,v in translation_dict.items():
+for f, v in translation_dict.items():
add_field(f, TranslationFunc(v), take_log=False,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
diff -r 5df0d5742ff2e8abcb6dd032f82247af9c6f0409 -r b94c1e8965b4174aadbd35d2d7bfd93d6d77ea52 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -14,14 +14,20 @@
#-----------------------------------------------------------------------------
import numpy as np
+import h5py
+import exceptions
from yt.funcs import \
mylog
from yt.utilities.io_handler import \
BaseIOHandler
-def field_dname(grid_id, field_name):
- return "/data/grid_%010i/%s" % (grid_id, field_name)
+def _grid_dname(grid_id):
+ return "/data/grid_%010i" % grid_id
+
+
+def _field_dname(grid_id, field_name):
+ return "%s/%s" % (_grid_dname(grid_id), field_name)
# TODO all particle bits were removed
@@ -30,50 +36,84 @@
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
- def __init__(self, pf, *args, **kwargs):
- # TODO check if _num_per_stride is needed
- self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
- BaseIOHandler.__init__(self, *args, **kwargs)
- self.pf = pf
- self._handle = pf._handle
+ def _read_field_names(self, grid):
+ if grid.filename is None:
+ return []
+ print 'grid.filename = %', grid.filename
+ h5f = h5py.File(grid.filename, mode="r")
+ group = h5f[_grid_dname(grid.id)]
+ fields = []
+ for name, v in group.iteritems():
+ # NOTE: This won't work with 1D datasets.
+ if not hasattr(v, "shape"):
+ continue
+ elif len(v.dims) == 1:
+ fields.append(("io", str(name)))
+ else:
+ fields.append(("gas", str(name)))
+ h5f.close()
+ return fields
-
- def _read_data_set(self, grid, field):
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)
- else:
- data = self._handle[field_dname(grid.id, field)][:, :, :]
- return data.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- slc = [slice(None), slice(None), slice(None)]
- slc[axis] = slice(coord, coord + 1)
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)[slc]
- else:
- data = self._handle[field_dname(grid.id, field)][slc]
- return data.astype("float64")
+ @property
+ def _read_exception(self):
+ return (exceptions.KeyError, )
def _read_fluid_selection(self, chunks, selector, fields, size):
+ rv = {}
chunks = list(chunks)
+
+ if selector.__class__.__name__ == "GridSelector":
+ if not (len(chunks) == len(chunks[0].objs) == 1):
+ raise RuntimeError
+ grid = chunks[0].objs[0]
+ h5f = h5py.File(grid.filename, 'r')
+ gds = h5f.get(_grid_dname(grid.id))
+ for ftype, fname in fields:
+ if self.pf.field_ordering == 1:
+ rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0, 2)
+ else:
+ rv[(ftype, fname)] = gds.get(fname).value
+ h5f.close()
+ return rv
+ if size is None:
+ size = sum((grid.count(selector) for chunk in chunks
+ for grid in chunk.objs))
+
if any((ftype != "gas" for ftype, fname in fields)):
raise NotImplementedError
- fhandle = self._handle
- rv = {}
+
for field in fields:
ftype, fname = field
- rv[field] = np.empty(
- size, dtype=fhandle[field_dname(0, fname)].dtype)
+ fsize = size
+ # check the dtype instead
+ rv[field] = np.empty(fsize, dtype="float64")
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fname for ftype, fname in fields], ngrids)
- for field in fields:
- ftype, fname = field
- ind = 0
- for chunk in chunks:
- for grid in chunk.objs:
- data = fhandle[field_dname(grid.id, fname)][:]
- if self.pf.field_ordering == 1:
- data = data.swapaxes(0, 2)
- ind += g.select(selector, data, rv[field], ind) # caches
+ ind = 0
+ for chunk in chunks:
+ fid = None
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ if fid is None:
+ fid = h5py.h5f.open(grid.filename, h5py.h5f.ACC_RDONLY)
+ if self.pf.field_ordering == 1:
+ # check the dtype instead
+ data = np.empty(grid.ActiveDimensions[::-1],
+ dtype="float64")
+ data_view = data.swapaxes(0, 2)
+ else:
+ # check the dtype instead
+ data_view = data = np.empty(grid.ActiveDimensions,
+ dtype="float64")
+ for field in fields:
+ ftype, fname = field
+ dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
+ dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+ # caches
+ nd = grid.select(selector, data_view, rv[field], ind)
+ ind += nd # I don't get that part, only last nd is added
+ if fid is not None:
+ fid.close()
return rv
https://bitbucket.org/yt_analysis/yt/commits/529acf3ab809/
Changeset: 529acf3ab809
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-19 05:08:46
Summary: Merged in xarthisius/yt-3.0 (pull request #116)
Update for GDF frontend
Affected #: 3 files
diff -r 09fcbdcc5ae8b38bc37a5a95901d936303497086 -r 529acf3ab809bbc3031a75197d3c8fffb443f6b2 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -16,35 +16,37 @@
import h5py
import numpy as np
import weakref
-from yt.funcs import *
+import os
+from yt.funcs import \
+ just_one, ensure_tuple
from yt.data_objects.grid_patch import \
- AMRGridPatch
+ AMRGridPatch
from yt.geometry.grid_geometry_handler import \
- GridGeometryHandler
+ GridGeometryHandler
from yt.data_objects.static_output import \
- StaticOutput
+ StaticOutput
from yt.utilities.lib import \
get_box_grids_level
-from yt.utilities.io_handler import \
- io_registry
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
- FieldInfoContainer, NullFunc
-import pdb
+ NullFunc
+
def _get_convert(fname):
def _conv(data):
- return data.convert(fname)
+ return 1.0 # data.convert(fname) FIXME
return _conv
+
class GDFGrid(AMRGridPatch):
_id_offset = 0
+
def __init__(self, id, hierarchy, level, start, dimensions):
- AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
- hierarchy = hierarchy)
+ AMRGridPatch.__init__(self, id, filename=hierarchy.hierarchy_filename,
+ hierarchy=hierarchy)
self.Parent = []
self.Children = []
self.Level = level
@@ -59,39 +61,41 @@
if len(self.Parent) > 0:
self.dds = self.Parent[0].dds / self.pf.refine_by
else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
+ LE, RE = self.hierarchy.grid_left_edge[id, :], \
+ self.hierarchy.grid_right_edge[id, :]
+ self.dds = np.array((RE - LE) / self.ActiveDimensions)
if self.pf.data_software != "piernik":
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+ if self.pf.dimensionality < 2:
+ self.dds[1] = 1.0
+ if self.pf.dimensionality < 3:
+ self.dds[2] = 1.0
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = \
+ self.dds
- @property
- def filename(self):
- return None
class GDFHierarchy(GridGeometryHandler):
grid = GDFGrid
+ filtered_particle_types = []
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
+ self.hierarchy_filename = self.parameter_file.parameter_filename
+ h5f = h5py.File(self.hierarchy_filename, 'r')
self.data_style = data_style
+ GridGeometryHandler.__init__(self, pf, data_style)
self.max_level = 10 # FIXME
# for now, the hierarchy file is the parameter file!
- self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
- self._fhandle = h5py.File(self.hierarchy_filename,'r')
- GridGeometryHandler.__init__(self,pf,data_style)
-
- self._fhandle.close()
+ h5f.close()
def _initialize_data_storage(self):
pass
def _detect_fields(self):
- self.field_list = self._fhandle['field_types'].keys()
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.field_list = h5f['field_types'].keys()
+ h5f.close()
def _setup_classes(self):
dd = self._get_data_reader_dict()
@@ -99,15 +103,17 @@
self.object_types.sort()
def _count_grids(self):
- self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.num_grids = h5f['/grid_parent_id'].shape[0]
+ h5f.close()
def _parse_hierarchy(self):
- f = self._fhandle
+ h5f = h5py.File(self.hierarchy_filename, 'r')
dxs = []
self.grids = np.empty(self.num_grids, dtype='object')
- levels = (f['grid_level'][:]).copy()
- glis = (f['grid_left_index'][:]).copy()
- gdims = (f['grid_dimensions'][:]).copy()
+ levels = (h5f['grid_level'][:]).copy()
+ glis = (h5f['grid_left_index'][:]).copy()
+ gdims = (h5f['grid_dimensions'][:]).copy()
active_dims = ~((np.max(gdims, axis=0) == 1) &
(self.parameter_file.domain_dimensions == 1))
@@ -117,16 +123,18 @@
gdims[i])
self.grids[i]._level_id = levels[i]
- dx = (self.parameter_file.domain_right_edge-
- self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
+ dx = (self.parameter_file.domain_right_edge -
+ self.parameter_file.domain_left_edge) / \
+ self.parameter_file.domain_dimensions
+ dx[active_dims] /= self.parameter_file.refine_by ** levels[i]
dxs.append(dx)
dx = np.array(dxs)
- self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+ self.grid_left_edge = self.parameter_file.domain_left_edge + dx * glis
self.grid_dimensions = gdims.astype("int32")
- self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
- self.grid_particle_count = f['grid_particle_count'][:]
+ self.grid_right_edge = self.grid_left_edge + dx * self.grid_dimensions
+ self.grid_particle_count = h5f['grid_particle_count'][:]
del levels, glis, gdims
+ h5f.close()
def _populate_grid_objects(self):
mask = np.empty(self.grids.size, dtype='int32')
@@ -138,8 +146,8 @@
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
- get_box_grids_level(self.grid_left_edge[gi,:],
- self.grid_right_edge[gi,:],
+ get_box_grids_level(self.grid_left_edge[gi, :],
+ self.grid_right_edge[gi, :],
self.grid_levels[gi],
self.grid_left_edge, self.grid_right_edge,
self.grid_levels, mask)
@@ -158,32 +166,33 @@
Gets back all the grids between a left edge and right edge
"""
eps = np.finfo(np.float64).eps
- grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1) \
- & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+ grid_i = np.where(np.all((self.grid_right_edge - left_edge) > eps, axis=1) &
+ np.all((right_edge - self.grid_left_edge) > eps, axis=1))
return self.grids[grid_i], grid_i
-
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self._get_box_grids(grid.LeftEdge, grid.RightEdge)
mask[grid_ind] = True
return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
class GDFStaticOutput(StaticOutput):
_hierarchy_class = GDFHierarchy
_fieldinfo_fallback = GDFFieldInfo
_fieldinfo_known = KnownGDFFields
def __init__(self, filename, data_style='grid_data_format',
- storage_filename = None):
+ storage_filename=None):
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
def _set_units(self):
"""
- Generates the conversion to various physical _units based on the parameter file
+ Generates the conversion to various physical _units
+ based on the parameter file
"""
self.units = {}
self.time_units = {}
@@ -192,16 +201,17 @@
self.time_units['1'] = 1
self.units['1'] = 1.0
self.units['cm'] = 1.0
- self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ self.units['unitary'] = 1.0 / (self.domain_right_edge -
+ self.domain_left_edge).max()
for unit in mpc_conversion.keys():
- self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+ self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
# This should be improved.
- self._handle = h5py.File(self.parameter_filename, "r")
- for field_name in self._handle["/field_types"]:
- current_field = self._handle["/field_types/%s" % field_name]
+ h5f = h5py.File(self.parameter_filename, "r")
+ for field_name in h5f["/field_types"]:
+ current_field = h5f["/field_types/%s" % field_name]
if 'field_to_cgs' in current_field.attrs:
self.units[field_name] = current_field.attrs['field_to_cgs']
else:
@@ -210,15 +220,16 @@
if type(current_field.attrs['field_units']) == str:
current_fields_unit = current_field.attrs['field_units']
else:
- current_fields_unit = just_one(current_field.attrs['field_units'])
+ current_fields_unit = \
+ just_one(current_field.attrs['field_units'])
else:
current_fields_unit = ""
- self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
- units=current_fields_unit, projected_units="",
- convert_function=_get_convert(field_name))
+ self._fieldinfo_known.add_field(
+ field_name, function=NullFunc, take_log=False,
+ units=current_fields_unit, projected_units="",
+ convert_function=_get_convert(field_name))
- self._handle.close()
- del self._handle
+ h5f.close()
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
@@ -232,13 +243,15 @@
self.domain_right_edge = sp["domain_right_edge"][:]
self.domain_dimensions = sp["domain_dimensions"][:]
refine_by = sp["refine_by"]
- if refine_by is None: refine_by = 2
+ if refine_by is None:
+ refine_by = 2
self.refine_by = refine_by
self.dimensionality = sp["dimensionality"]
self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
- if sp["num_ghost_zones"] != 0: raise RuntimeError
+ if sp["num_ghost_zones"] != 0:
+ raise RuntimeError
self.num_ghost_zones = sp["num_ghost_zones"]
self.field_ordering = sp["field_ordering"]
self.boundary_conditions = sp["boundary_conditions"][:]
@@ -252,15 +265,16 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
- self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+ self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
+ # Hardcode for now until field staggering is supported.
+ self.parameters["HydroMethod"] = 0
self._handle.close()
del self._handle
@classmethod
def _is_valid(self, *args, **kwargs):
try:
- fileh = h5py.File(args[0],'r')
+ fileh = h5py.File(args[0], 'r')
if "gridded_data_format" in fileh:
fileh.close()
return True
@@ -271,4 +285,3 @@
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
-
diff -r 09fcbdcc5ae8b38bc37a5a95901d936303497086 -r 529acf3ab809bbc3031a75197d3c8fffb443f6b2 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -16,14 +16,8 @@
from yt.data_objects.field_info_container import \
FieldInfoContainer, \
FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType, \
NullFunc, \
TranslationFunc
-import yt.fields.universal_fields
log_translation_dict = {"Density": "density",
"Pressure": "pressure"}
@@ -31,7 +25,7 @@
translation_dict = {"x-velocity": "velocity_x",
"y-velocity": "velocity_y",
"z-velocity": "velocity_z"}
-
+
# translation_dict = {"mag_field_x": "cell_centered_B_x ",
# "mag_field_y": "cell_centered_B_y ",
# "mag_field_z": "cell_centered_B_z "}
@@ -43,39 +37,39 @@
add_gdf_field = KnownGDFFields.add_field
add_gdf_field("density", function=NullFunc, take_log=True,
- units=r"\rm{g}/\rm{cm}^3",
- projected_units =r"\rm{g}/\rm{cm}^2")
+ units=r"\rm{g}/\rm{cm}^3",
+ projected_units=r"\rm{g}/\rm{cm}^2")
add_gdf_field("specific_energy", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("pressure", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("velocity_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
-for f,v in log_translation_dict.items():
+for f, v in log_translation_dict.items():
add_field(f, TranslationFunc(v), take_log=True,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
-for f,v in translation_dict.items():
+for f, v in translation_dict.items():
add_field(f, TranslationFunc(v), take_log=False,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
diff -r 09fcbdcc5ae8b38bc37a5a95901d936303497086 -r 529acf3ab809bbc3031a75197d3c8fffb443f6b2 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -14,14 +14,20 @@
#-----------------------------------------------------------------------------
import numpy as np
+import h5py
+import exceptions
from yt.funcs import \
mylog
from yt.utilities.io_handler import \
BaseIOHandler
-def field_dname(grid_id, field_name):
- return "/data/grid_%010i/%s" % (grid_id, field_name)
+def _grid_dname(grid_id):
+ return "/data/grid_%010i" % grid_id
+
+
+def _field_dname(grid_id, field_name):
+ return "%s/%s" % (_grid_dname(grid_id), field_name)
# TODO all particle bits were removed
@@ -30,50 +36,84 @@
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
- def __init__(self, pf, *args, **kwargs):
- # TODO check if _num_per_stride is needed
- self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
- BaseIOHandler.__init__(self, *args, **kwargs)
- self.pf = pf
- self._handle = pf._handle
+ def _read_field_names(self, grid):
+ if grid.filename is None:
+ return []
+ print 'grid.filename = %', grid.filename
+ h5f = h5py.File(grid.filename, mode="r")
+ group = h5f[_grid_dname(grid.id)]
+ fields = []
+ for name, v in group.iteritems():
+ # NOTE: This won't work with 1D datasets.
+ if not hasattr(v, "shape"):
+ continue
+ elif len(v.dims) == 1:
+ fields.append(("io", str(name)))
+ else:
+ fields.append(("gas", str(name)))
+ h5f.close()
+ return fields
-
- def _read_data_set(self, grid, field):
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)
- else:
- data = self._handle[field_dname(grid.id, field)][:, :, :]
- return data.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- slc = [slice(None), slice(None), slice(None)]
- slc[axis] = slice(coord, coord + 1)
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)[slc]
- else:
- data = self._handle[field_dname(grid.id, field)][slc]
- return data.astype("float64")
+ @property
+ def _read_exception(self):
+ return (exceptions.KeyError, )
def _read_fluid_selection(self, chunks, selector, fields, size):
+ rv = {}
chunks = list(chunks)
+
+ if selector.__class__.__name__ == "GridSelector":
+ if not (len(chunks) == len(chunks[0].objs) == 1):
+ raise RuntimeError
+ grid = chunks[0].objs[0]
+ h5f = h5py.File(grid.filename, 'r')
+ gds = h5f.get(_grid_dname(grid.id))
+ for ftype, fname in fields:
+ if self.pf.field_ordering == 1:
+ rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0, 2)
+ else:
+ rv[(ftype, fname)] = gds.get(fname).value
+ h5f.close()
+ return rv
+ if size is None:
+ size = sum((grid.count(selector) for chunk in chunks
+ for grid in chunk.objs))
+
if any((ftype != "gas" for ftype, fname in fields)):
raise NotImplementedError
- fhandle = self._handle
- rv = {}
+
for field in fields:
ftype, fname = field
- rv[field] = np.empty(
- size, dtype=fhandle[field_dname(0, fname)].dtype)
+ fsize = size
+ # check the dtype instead
+ rv[field] = np.empty(fsize, dtype="float64")
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fname for ftype, fname in fields], ngrids)
- for field in fields:
- ftype, fname = field
- ind = 0
- for chunk in chunks:
- for grid in chunk.objs:
- data = fhandle[field_dname(grid.id, fname)][:]
- if self.pf.field_ordering == 1:
- data = data.swapaxes(0, 2)
- ind += g.select(selector, data, rv[field], ind) # caches
+ ind = 0
+ for chunk in chunks:
+ fid = None
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ if fid is None:
+ fid = h5py.h5f.open(grid.filename, h5py.h5f.ACC_RDONLY)
+ if self.pf.field_ordering == 1:
+ # check the dtype instead
+ data = np.empty(grid.ActiveDimensions[::-1],
+ dtype="float64")
+ data_view = data.swapaxes(0, 2)
+ else:
+ # check the dtype instead
+ data_view = data = np.empty(grid.ActiveDimensions,
+ dtype="float64")
+ for field in fields:
+ ftype, fname = field
+ dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
+ dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+ # caches
+ nd = grid.select(selector, data_view, rv[field], ind)
+ ind += nd # I don't get that part, only last nd is added
+ if fid is not None:
+ fid.close()
return rv
https://bitbucket.org/yt_analysis/yt/commits/ae37a5f23f9b/
Changeset: ae37a5f23f9b
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-18 23:57:43
Summary: Updating GDF to work.
Affected #: 2 files
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -67,10 +67,6 @@
if self.pf.dimensionality < 3: self.dds[2] = 1.0
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
- @property
- def filename(self):
- return None
-
class GDFHierarchy(GridGeometryHandler):
grid = GDFGrid
@@ -151,7 +147,7 @@
self.max_level = self.grid_levels.max()
def _setup_derived_fields(self):
- self.derived_field_list = []
+ super(GDFHierarchy, self)._setup_derived_fields()
def _get_box_grids(self, left_edge, right_edge):
"""
@@ -163,6 +159,8 @@
return self.grids[grid_i], grid_i
+ def convert(self, unit):
+ return 1.0
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -14,6 +14,7 @@
#-----------------------------------------------------------------------------
import numpy as np
+import h5py
from yt.funcs import \
mylog
from yt.utilities.io_handler import \
@@ -31,34 +32,14 @@
_data_string = 'data:datatype=0'
def __init__(self, pf, *args, **kwargs):
- # TODO check if _num_per_stride is needed
- self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
- BaseIOHandler.__init__(self, *args, **kwargs)
- self.pf = pf
- self._handle = pf._handle
-
-
- def _read_data_set(self, grid, field):
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)
- else:
- data = self._handle[field_dname(grid.id, field)][:, :, :]
- return data.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- slc = [slice(None), slice(None), slice(None)]
- slc[axis] = slice(coord, coord + 1)
- if self.pf.field_ordering == 1:
- data = self._handle[field_dname(grid.id, field)][:].swapaxes(0, 2)[slc]
- else:
- data = self._handle[field_dname(grid.id, field)][slc]
- return data.astype("float64")
+ super(IOHandlerGDFHDF5, self).__init__(pf)
+ # Now we cache the particle fields
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
if any((ftype != "gas" for ftype, fname in fields)):
raise NotImplementedError
- fhandle = self._handle
+ fhandle = h5py.File(self.pf.parameter_filename, "r")
rv = {}
for field in fields:
ftype, fname = field
@@ -75,5 +56,6 @@
data = fhandle[field_dname(grid.id, fname)][:]
if self.pf.field_ordering == 1:
data = data.swapaxes(0, 2)
- ind += g.select(selector, data, rv[field], ind) # caches
+ ind += grid.select(selector, data, rv[field], ind) # caches
+ fhandle.close()
return rv
https://bitbucket.org/yt_analysis/yt/commits/c2fd25967f13/
Changeset: c2fd25967f13
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-19 10:00:24
Summary: Merging.
Affected #: 5 files
diff -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 -r c2fd25967f13bbce30893e657da162498309e8bd yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -459,8 +459,9 @@
for j in range(2):
for k in range(2):
ii = ((k*2)+j)*2+i
+ # Note: C order because our index converts C to F.
source[field][:,ii] = \
- dt[i::2,j::2,k::2].ravel(order="F")
+ dt[i::2,j::2,k::2].ravel(order="C")
oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
del source
# Now we continue with the additional levels.
diff -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 -r c2fd25967f13bbce30893e657da162498309e8bd yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,10 +36,10 @@
masks = None
caching = True
- def __init__(self):
+ def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
- super(IOHandlerART, self).__init__()
+ super(IOHandlerART, self).__init__(*args, **kwargs)
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
diff -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 -r c2fd25967f13bbce30893e657da162498309e8bd yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -16,35 +16,37 @@
import h5py
import numpy as np
import weakref
-from yt.funcs import *
+import os
+from yt.funcs import \
+ just_one, ensure_tuple
from yt.data_objects.grid_patch import \
- AMRGridPatch
+ AMRGridPatch
from yt.geometry.grid_geometry_handler import \
- GridGeometryHandler
+ GridGeometryHandler
from yt.data_objects.static_output import \
- StaticOutput
+ StaticOutput
from yt.utilities.lib import \
get_box_grids_level
-from yt.utilities.io_handler import \
- io_registry
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
from .fields import GDFFieldInfo, KnownGDFFields
from yt.data_objects.field_info_container import \
- FieldInfoContainer, NullFunc
-import pdb
+ NullFunc
+
def _get_convert(fname):
def _conv(data):
- return data.convert(fname)
+ return 1.0 # data.convert(fname) FIXME
return _conv
+
class GDFGrid(AMRGridPatch):
_id_offset = 0
+
def __init__(self, id, hierarchy, level, start, dimensions):
- AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
- hierarchy = hierarchy)
+ AMRGridPatch.__init__(self, id, filename=hierarchy.hierarchy_filename,
+ hierarchy=hierarchy)
self.Parent = []
self.Children = []
self.Level = level
@@ -59,35 +61,41 @@
if len(self.Parent) > 0:
self.dds = self.Parent[0].dds / self.pf.refine_by
else:
- LE, RE = self.hierarchy.grid_left_edge[id,:], \
- self.hierarchy.grid_right_edge[id,:]
- self.dds = np.array((RE-LE)/self.ActiveDimensions)
+ LE, RE = self.hierarchy.grid_left_edge[id, :], \
+ self.hierarchy.grid_right_edge[id, :]
+ self.dds = np.array((RE - LE) / self.ActiveDimensions)
if self.pf.data_software != "piernik":
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
- self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+ if self.pf.dimensionality < 2:
+ self.dds[1] = 1.0
+ if self.pf.dimensionality < 3:
+ self.dds[2] = 1.0
+ self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = \
+ self.dds
+
class GDFHierarchy(GridGeometryHandler):
grid = GDFGrid
+ filtered_particle_types = []
def __init__(self, pf, data_style='grid_data_format'):
self.parameter_file = weakref.proxy(pf)
+ self.hierarchy_filename = self.parameter_file.parameter_filename
+ h5f = h5py.File(self.hierarchy_filename, 'r')
self.data_style = data_style
+ GridGeometryHandler.__init__(self, pf, data_style)
self.max_level = 10 # FIXME
# for now, the hierarchy file is the parameter file!
- self.hierarchy_filename = self.parameter_file.parameter_filename
self.directory = os.path.dirname(self.hierarchy_filename)
- self._fhandle = h5py.File(self.hierarchy_filename,'r')
- GridGeometryHandler.__init__(self,pf,data_style)
-
- self._fhandle.close()
+ h5f.close()
def _initialize_data_storage(self):
pass
def _detect_fields(self):
- self.field_list = self._fhandle['field_types'].keys()
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.field_list = h5f['field_types'].keys()
+ h5f.close()
def _setup_classes(self):
dd = self._get_data_reader_dict()
@@ -95,15 +103,17 @@
self.object_types.sort()
def _count_grids(self):
- self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
+ h5f = h5py.File(self.hierarchy_filename, 'r')
+ self.num_grids = h5f['/grid_parent_id'].shape[0]
+ h5f.close()
def _parse_hierarchy(self):
- f = self._fhandle
+ h5f = h5py.File(self.hierarchy_filename, 'r')
dxs = []
self.grids = np.empty(self.num_grids, dtype='object')
- levels = (f['grid_level'][:]).copy()
- glis = (f['grid_left_index'][:]).copy()
- gdims = (f['grid_dimensions'][:]).copy()
+ levels = (h5f['grid_level'][:]).copy()
+ glis = (h5f['grid_left_index'][:]).copy()
+ gdims = (h5f['grid_dimensions'][:]).copy()
active_dims = ~((np.max(gdims, axis=0) == 1) &
(self.parameter_file.domain_dimensions == 1))
@@ -113,16 +123,18 @@
gdims[i])
self.grids[i]._level_id = levels[i]
- dx = (self.parameter_file.domain_right_edge-
- self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
- dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
+ dx = (self.parameter_file.domain_right_edge -
+ self.parameter_file.domain_left_edge) / \
+ self.parameter_file.domain_dimensions
+ dx[active_dims] /= self.parameter_file.refine_by ** levels[i]
dxs.append(dx)
dx = np.array(dxs)
- self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+ self.grid_left_edge = self.parameter_file.domain_left_edge + dx * glis
self.grid_dimensions = gdims.astype("int32")
- self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
- self.grid_particle_count = f['grid_particle_count'][:]
+ self.grid_right_edge = self.grid_left_edge + dx * self.grid_dimensions
+ self.grid_particle_count = h5f['grid_particle_count'][:]
del levels, glis, gdims
+ h5f.close()
def _populate_grid_objects(self):
mask = np.empty(self.grids.size, dtype='int32')
@@ -134,8 +146,8 @@
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
- get_box_grids_level(self.grid_left_edge[gi,:],
- self.grid_right_edge[gi,:],
+ get_box_grids_level(self.grid_left_edge[gi, :],
+ self.grid_right_edge[gi, :],
self.grid_levels[gi],
self.grid_left_edge, self.grid_right_edge,
self.grid_levels, mask)
@@ -154,34 +166,33 @@
Gets back all the grids between a left edge and right edge
"""
eps = np.finfo(np.float64).eps
- grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1) \
- & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+ grid_i = np.where(np.all((self.grid_right_edge - left_edge) > eps, axis=1) &
+ np.all((right_edge - self.grid_left_edge) > eps, axis=1))
return self.grids[grid_i], grid_i
- def convert(self, unit):
- return 1.0
-
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self._get_box_grids(grid.LeftEdge, grid.RightEdge)
mask[grid_ind] = True
return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
class GDFStaticOutput(StaticOutput):
_hierarchy_class = GDFHierarchy
_fieldinfo_fallback = GDFFieldInfo
_fieldinfo_known = KnownGDFFields
def __init__(self, filename, data_style='grid_data_format',
- storage_filename = None):
+ storage_filename=None):
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
self.filename = filename
def _set_units(self):
"""
- Generates the conversion to various physical _units based on the parameter file
+ Generates the conversion to various physical _units
+ based on the parameter file
"""
self.units = {}
self.time_units = {}
@@ -190,16 +201,17 @@
self.time_units['1'] = 1
self.units['1'] = 1.0
self.units['cm'] = 1.0
- self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+ self.units['unitary'] = 1.0 / (self.domain_right_edge -
+ self.domain_left_edge).max()
for unit in mpc_conversion.keys():
- self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+ self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
for unit in sec_conversion.keys():
self.time_units[unit] = 1.0 / sec_conversion[unit]
# This should be improved.
- self._handle = h5py.File(self.parameter_filename, "r")
- for field_name in self._handle["/field_types"]:
- current_field = self._handle["/field_types/%s" % field_name]
+ h5f = h5py.File(self.parameter_filename, "r")
+ for field_name in h5f["/field_types"]:
+ current_field = h5f["/field_types/%s" % field_name]
if 'field_to_cgs' in current_field.attrs:
self.units[field_name] = current_field.attrs['field_to_cgs']
else:
@@ -208,15 +220,16 @@
if type(current_field.attrs['field_units']) == str:
current_fields_unit = current_field.attrs['field_units']
else:
- current_fields_unit = just_one(current_field.attrs['field_units'])
+ current_fields_unit = \
+ just_one(current_field.attrs['field_units'])
else:
current_fields_unit = ""
- self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
- units=current_fields_unit, projected_units="",
- convert_function=_get_convert(field_name))
+ self._fieldinfo_known.add_field(
+ field_name, function=NullFunc, take_log=False,
+ units=current_fields_unit, projected_units="",
+ convert_function=_get_convert(field_name))
- self._handle.close()
- del self._handle
+ h5f.close()
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
@@ -230,13 +243,15 @@
self.domain_right_edge = sp["domain_right_edge"][:]
self.domain_dimensions = sp["domain_dimensions"][:]
refine_by = sp["refine_by"]
- if refine_by is None: refine_by = 2
+ if refine_by is None:
+ refine_by = 2
self.refine_by = refine_by
self.dimensionality = sp["dimensionality"]
self.current_time = sp["current_time"]
self.unique_identifier = sp["unique_identifier"]
self.cosmological_simulation = sp["cosmological_simulation"]
- if sp["num_ghost_zones"] != 0: raise RuntimeError
+ if sp["num_ghost_zones"] != 0:
+ raise RuntimeError
self.num_ghost_zones = sp["num_ghost_zones"]
self.field_ordering = sp["field_ordering"]
self.boundary_conditions = sp["boundary_conditions"][:]
@@ -250,15 +265,16 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
- self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+ self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
+ # Hardcode for now until field staggering is supported.
+ self.parameters["HydroMethod"] = 0
self._handle.close()
del self._handle
@classmethod
def _is_valid(self, *args, **kwargs):
try:
- fileh = h5py.File(args[0],'r')
+ fileh = h5py.File(args[0], 'r')
if "gridded_data_format" in fileh:
fileh.close()
return True
@@ -269,4 +285,3 @@
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
-
diff -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 -r c2fd25967f13bbce30893e657da162498309e8bd yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -16,14 +16,8 @@
from yt.data_objects.field_info_container import \
FieldInfoContainer, \
FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType, \
NullFunc, \
TranslationFunc
-import yt.fields.universal_fields
log_translation_dict = {"Density": "density",
"Pressure": "pressure"}
@@ -31,7 +25,7 @@
translation_dict = {"x-velocity": "velocity_x",
"y-velocity": "velocity_y",
"z-velocity": "velocity_z"}
-
+
# translation_dict = {"mag_field_x": "cell_centered_B_x ",
# "mag_field_y": "cell_centered_B_y ",
# "mag_field_z": "cell_centered_B_z "}
@@ -43,39 +37,39 @@
add_gdf_field = KnownGDFFields.add_field
add_gdf_field("density", function=NullFunc, take_log=True,
- units=r"\rm{g}/\rm{cm}^3",
- projected_units =r"\rm{g}/\rm{cm}^2")
+ units=r"\rm{g}/\rm{cm}^3",
+ projected_units=r"\rm{g}/\rm{cm}^2")
add_gdf_field("specific_energy", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("pressure", function=NullFunc, take_log=True,
- units=r"\rm{erg}/\rm{g}")
+ units=r"\rm{erg}/\rm{g}")
add_gdf_field("velocity_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("velocity_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
- units=r"\rm{cm}/\rm{s}")
+ units=r"\rm{cm}/\rm{s}")
-for f,v in log_translation_dict.items():
+for f, v in log_translation_dict.items():
add_field(f, TranslationFunc(v), take_log=True,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
-for f,v in translation_dict.items():
+for f, v in translation_dict.items():
add_field(f, TranslationFunc(v), take_log=False,
units=KnownGDFFields[v].get_units(),
projected_units=KnownGDFFields[v].get_projected_units())
diff -r ae37a5f23f9be27c92fbac3bd7f970421448fdf0 -r c2fd25967f13bbce30893e657da162498309e8bd yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -15,14 +15,19 @@
import numpy as np
import h5py
+import exceptions
from yt.funcs import \
mylog
from yt.utilities.io_handler import \
BaseIOHandler
-def field_dname(grid_id, field_name):
- return "/data/grid_%010i/%s" % (grid_id, field_name)
+def _grid_dname(grid_id):
+ return "/data/grid_%010i" % grid_id
+
+
+def _field_dname(grid_id, field_name):
+ return "%s/%s" % (_grid_dname(grid_id), field_name)
# TODO all particle bits were removed
@@ -31,31 +36,84 @@
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
- def __init__(self, pf, *args, **kwargs):
- super(IOHandlerGDFHDF5, self).__init__(pf)
- # Now we cache the particle fields
+ def _read_field_names(self, grid):
+ if grid.filename is None:
+ return []
+ print 'grid.filename = %', grid.filename
+ h5f = h5py.File(grid.filename, mode="r")
+ group = h5f[_grid_dname(grid.id)]
+ fields = []
+ for name, v in group.iteritems():
+ # NOTE: This won't work with 1D datasets.
+ if not hasattr(v, "shape"):
+ continue
+ elif len(v.dims) == 1:
+ fields.append(("io", str(name)))
+ else:
+ fields.append(("gas", str(name)))
+ h5f.close()
+ return fields
+
+ @property
+ def _read_exception(self):
+ return (exceptions.KeyError, )
def _read_fluid_selection(self, chunks, selector, fields, size):
+ rv = {}
chunks = list(chunks)
+
+ if selector.__class__.__name__ == "GridSelector":
+ if not (len(chunks) == len(chunks[0].objs) == 1):
+ raise RuntimeError
+ grid = chunks[0].objs[0]
+ h5f = h5py.File(grid.filename, 'r')
+ gds = h5f.get(_grid_dname(grid.id))
+ for ftype, fname in fields:
+ if self.pf.field_ordering == 1:
+ rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0, 2)
+ else:
+ rv[(ftype, fname)] = gds.get(fname).value
+ h5f.close()
+ return rv
+ if size is None:
+ size = sum((grid.count(selector) for chunk in chunks
+ for grid in chunk.objs))
+
if any((ftype != "gas" for ftype, fname in fields)):
raise NotImplementedError
- fhandle = h5py.File(self.pf.parameter_filename, "r")
- rv = {}
+
for field in fields:
ftype, fname = field
- rv[field] = np.empty(
- size, dtype=fhandle[field_dname(0, fname)].dtype)
+ fsize = size
+ # check the dtype instead
+ rv[field] = np.empty(fsize, dtype="float64")
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fname for ftype, fname in fields], ngrids)
- for field in fields:
- ftype, fname = field
- ind = 0
- for chunk in chunks:
- for grid in chunk.objs:
- data = fhandle[field_dname(grid.id, fname)][:]
- if self.pf.field_ordering == 1:
- data = data.swapaxes(0, 2)
- ind += grid.select(selector, data, rv[field], ind) # caches
- fhandle.close()
+ ind = 0
+ for chunk in chunks:
+ fid = None
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ if fid is None:
+ fid = h5py.h5f.open(grid.filename, h5py.h5f.ACC_RDONLY)
+ if self.pf.field_ordering == 1:
+ # check the dtype instead
+ data = np.empty(grid.ActiveDimensions[::-1],
+ dtype="float64")
+ data_view = data.swapaxes(0, 2)
+ else:
+ # check the dtype instead
+ data_view = data = np.empty(grid.ActiveDimensions,
+ dtype="float64")
+ for field in fields:
+ ftype, fname = field
+ dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
+ dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+ # caches
+ nd = grid.select(selector, data_view, rv[field], ind)
+ ind += nd # I don't get that part, only last nd is added
+ if fid is not None:
+ fid.close()
return rv
https://bitbucket.org/yt_analysis/yt/commits/aef987a4f9c6/
Changeset: aef987a4f9c6
Branch: yt-3.0
User: xarthisius
Date: 2013-10-19 10:53:28
Summary: Fix Subpackage 'yt.frontends.boxlib' configuration returned as 'yt.frontends.orion'
Affected #: 2 files
diff -r c2fd25967f13bbce30893e657da162498309e8bd -r aef987a4f9c6f597787d03fe26cfc09e44923178 yt/frontends/boxlib/__init__.py
--- a/yt/frontends/boxlib/__init__.py
+++ b/yt/frontends/boxlib/__init__.py
@@ -1,5 +1,5 @@
"""
-API for yt.frontends.orion
+API for yt.frontends.boxlib
diff -r c2fd25967f13bbce30893e657da162498309e8bd -r aef987a4f9c6f597787d03fe26cfc09e44923178 yt/frontends/boxlib/setup.py
--- a/yt/frontends/boxlib/setup.py
+++ b/yt/frontends/boxlib/setup.py
@@ -7,7 +7,7 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- config = Configuration('orion', parent_package, top_path)
+ config = Configuration('boxlib', parent_package, top_path)
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
return config
https://bitbucket.org/yt_analysis/yt/commits/6354e5834da8/
Changeset: 6354e5834da8
Branch: yt-3.0
User: xarthisius
Date: 2013-10-21 11:35:19
Summary: Update MANIFEST.in to reflect changes in license filename, add CITATION too
Affected #: 1 file
diff -r aef987a4f9c6f597787d03fe26cfc09e44923178 -r 6354e5834da8f23e1e2e5dc799e332d1819d7e02 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
+include distribute_setup.py README* CREDITS COPYING.txt CITATION
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
+recursive-include yt *.pyx *.pxd *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
https://bitbucket.org/yt_analysis/yt/commits/5583f1046fce/
Changeset: 5583f1046fce
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-06 10:56:28
Summary: derived quantity API improvements.
Affected #: 2 files
diff -r c86499f6891e4b191a4ac7cbcd44959ed0b9e245 -r 5583f1046fceb372ba8d5471600283f7ee3597e9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -746,6 +746,8 @@
self.coords = None
self._grids = None
self.quantities = DerivedQuantityCollection(self)
+ for f in self.quantities.keys():
+ self.__dict__[camelcase_to_underscore(f)] = self.quantities[f]
def cut_region(self, field_cuts):
"""
diff -r c86499f6891e4b191a4ac7cbcd44959ed0b9e245 -r 5583f1046fceb372ba8d5471600283f7ee3597e9 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import __builtin__
-import time, types, signal, inspect, traceback, sys, pdb, os
+import time, types, signal, inspect, traceback, sys, pdb, os, re
import contextlib
import warnings, struct, subprocess
import numpy as np
@@ -625,3 +625,7 @@
return
if not os.path.exists(my_dir):
only_on_root(os.makedirs, my_dir)
+
+def camelcase_to_underscore(name):
+ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
https://bitbucket.org/yt_analysis/yt/commits/1613797d60ad/
Changeset: 1613797d60ad
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 07:44:13
Summary: Merging with yt-3.0 tip.
Affected #: 151 files
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -15,6 +15,7 @@
yt/geometry/oct_visitors.c
yt/geometry/particle_deposit.c
yt/geometry/particle_oct_container.c
+yt/geometry/particle_smooth.c
yt/geometry/selection_routines.c
yt/utilities/amr_utils.c
yt/utilities/kdtree/forthonf2c.h
@@ -31,6 +32,7 @@
yt/utilities/lib/geometry_utils.c
yt/utilities/lib/Interpolators.c
yt/utilities/lib/kdtree.c
+yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
yt/utilities/lib/Octree.c
yt/utilities/lib/png_writer.c
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -263,8 +263,6 @@
YT_DEPS+=('python')
YT_DEPS+=('distribute')
YT_DEPS+=('libpng')
-YT_DEPS+=('freetype')
-YT_DEPS+=('hdf5')
YT_DEPS+=('numpy')
YT_DEPS+=('pygments')
YT_DEPS+=('jinja2')
@@ -275,6 +273,7 @@
YT_DEPS+=('h5py')
YT_DEPS+=('matplotlib')
YT_DEPS+=('cython')
+YT_DEPS+=('nose')
# Here is our dependency list for yt
log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
@@ -301,7 +300,6 @@
export HDF5_DIR=${DEST_DIR}
log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
pushd ${YT_DIR}
- echo $DEST_DIR > hdf5.cfg
log_cmd python setup.py develop
popd
log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -941,9 +941,7 @@
( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
-echo $HDF5_DIR > hdf5.cfg
[ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-[ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd $MY_PWD
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
TwoPointFunctions, \
FcnSet
+from .sunyaev_zeldovich.api import SZProjection
+
from .radmc3d_export.api import \
RadMC3DWriter
+
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -233,7 +233,8 @@
pmass_min, pmass_max = dd.quantities["Extrema"](
(ptype, "ParticleMassMsun"), non_zero = True)[0]
if pmass_min != pmass_max:
- raise YTRockstarMultiMassNotSupported
+ raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max,
+ ptype)
particle_mass = pmass_min
# NOTE: We want to take our Msun and turn it into Msun/h . Its value
# should be such that dividing by little h gives the original value.
@@ -244,6 +245,7 @@
# Get total_particles in parallel.
tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
p['total_particles'] = int(tp)
+ mylog.warning("Total Particle Count: %0.3e", int(tp))
p['left_edge'] = tpf.domain_left_edge
p['right_edge'] = tpf.domain_right_edge
p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
config.add_subpackage("star_analysis")
config.add_subpackage("two_point_functions")
config.add_subpackage("radmc3d_export")
+ config.add_subpackage("sunyaev_zeldovich")
return config
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -25,7 +25,7 @@
from yt.utilities.physical_constants import \
kpc_per_cm, \
sec_per_year
-from yt.data_objects.universal_fields import add_field
+from yt.fields.universal_fields import add_field
from yt.mods import *
def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,354 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+
+try:
+ import SZpack
+except:
+ raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+ return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+ return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+ return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+ return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+ return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+ r""" Initialize a SZProjection object.
+
+ Parameters
+ ----------
+ pf : parameter_file
+ The parameter file.
+ freqs : array_like
+ The frequencies (in GHz) at which to compute the SZ spectral distortion.
+ mue : float, optional
+ Mean molecular weight for determining the electron number density.
+ high_order : boolean, optional
+ Should we calculate high-order moments of velocity and temperature?
+
+ Examples
+ --------
+ >>> freqs = [90., 180., 240.]
+ >>> szprj = SZProjection(pf, freqs, high_order=True)
+ """
+ def __init__(self, pf, freqs, mue=1.143, high_order=False):
+
+ self.pf = pf
+ self.num_freqs = len(freqs)
+ self.high_order = high_order
+ self.freqs = np.array(freqs)
+ self.mueinv = 1./mue
+ self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+ self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+ self.data = {}
+
+ self.units = {}
+ self.units["TeSZ"] = r"$\mathrm{keV}$"
+ self.units["Tau"] = None
+
+ self.display_names = {}
+ self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+ self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+ for f, field in zip(self.freqs, self.freq_fields):
+ self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+ self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+
+ def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+ r""" Make an on-axis projection of the SZ signal.
+
+ Parameters
+ ----------
+ axis : integer or string
+ The axis of the simulation domain along which to make the SZprojection.
+ center : array_like or string, optional
+ The center of the projection.
+ width : float or tuple
+ The width of the projection.
+ nx : integer, optional
+ The dimensions on a side of the projection image.
+ source : yt.data_objects.api.AMRData, optional
+ If specified, this will be the data source used for selecting regions to project.
+
+ Examples
+ --------
+ >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+ """
+ axis = fix_axis(axis)
+
+ def _beta_par(field, data):
+ axis = data.get_field_parameter("axis")
+ # Load these, even though we will only use one
+ for ax in 'xyz':
+ data['%s-velocity' % ax]
+ vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+ return vpar/clight
+ add_field("BetaPar", function=_beta_par)
+ self.pf.h._derived_fields_add(["BetaPar"])
+
+ proj = self.pf.h.proj("Density", axis, data_source=source)
+ proj.data_source.set_field_parameter("axis", axis)
+ frb = proj.to_frb(width, nx)
+ dens = frb["Density"]
+ Te = frb["TeSZ"]/dens
+ bpar = frb["BetaPar"]/dens
+ omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+ bperp2 = np.zeros((nx,nx))
+ sigma1 = np.zeros((nx,nx))
+ kappa1 = np.zeros((nx,nx))
+ if self.high_order:
+ bperp2 = frb["BetaPerpSquared"]/dens
+ sigma1 = frb["TBetaPar"]/dens/Te - bpar
+ kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+ tau = sigma_thompson*dens*self.mueinv/mh
+
+ nx,ny = frb.buff_size
+ self.bounds = frb.bounds
+ self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+ self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+ self.nx = nx
+
+ self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+ def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+ r""" Make an off-axis projection of the SZ signal.
+
+ Parameters
+ ----------
+ L : array_like
+ The normal vector of the projection.
+ center : array_like or string, optional
+ The center of the projection.
+ width : float or tuple
+ The width of the projection.
+ nx : integer, optional
+ The dimensions on a side of the projection image.
+ source : yt.data_objects.api.AMRData, optional
+ If specified, this will be the data source used for selecting regions to project.
+ Currently unsupported in yt 2.x.
+
+ Examples
+ --------
+ >>> L = np.array([0.5, 1.0, 0.75])
+ >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+ """
+ if iterable(width):
+ w = width[0]/self.pf.units[width[1]]
+ else:
+ w = width
+ if center == "c":
+ ctr = self.pf.domain_center
+ elif center == "max":
+ ctr = self.pf.h.find_max("Density")
+ else:
+ ctr = center
+
+ if source is not None:
+ mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+ raise NotImplementedError
+
+ def _beta_par(field, data):
+ vpar = data["Density"]*(data["x-velocity"]*L[0]+
+ data["y-velocity"]*L[1]+
+ data["z-velocity"]*L[2])
+ return vpar/clight
+ add_field("BetaPar", function=_beta_par)
+ self.pf.h._derived_fields_add(["BetaPar"])
+
+ dens = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+ Te = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+ bpar = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+ omega1 = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+ omega1 = omega1/(Te*Te) - 1.
+ if self.high_order:
+ bperp2 = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+ sigma1 = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+ sigma1 = sigma1/Te - bpar
+ kappa1 = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+ kappa1 -= bpar
+ else:
+ bperp2 = np.zeros((nx,nx))
+ sigma1 = np.zeros((nx,nx))
+ kappa1 = np.zeros((nx,nx))
+ tau = sigma_thompson*dens*self.mueinv/mh
+
+ self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+ self.dx = w/nx
+ self.dy = w/nx
+ self.nx = nx
+
+ self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+ def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+ # Bad hack, but we get NaNs if we don't do something like this
+ small_beta = np.abs(bpar) < 1.0e-20
+ bpar[small_beta] = 1.0e-20
+
+ comm = communication_system.communicators[-1]
+
+ nx, ny = self.nx,self.nx
+ signal = np.zeros((self.num_freqs,nx,ny))
+ xo = np.zeros((self.num_freqs))
+
+ k = int(0)
+
+ start_i = comm.rank*nx/comm.size
+ end_i = (comm.rank+1)*nx/comm.size
+
+ pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+ for i in xrange(start_i, end_i):
+ for j in xrange(ny):
+ xo[:] = self.xinit[:]
+ SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+ bpar[i,j], omega1[i,j],
+ sigma1[i,j], kappa1[i,j], bperp2[i,j])
+ signal[:,i,j] = xo[:]
+ pbar.update(k)
+ k += 1
+
+ signal = comm.mpi_allreduce(signal)
+
+ pbar.finish()
+
+ for i, field in enumerate(self.freq_fields):
+ self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+ self.data["Tau"] = ImageArray(tau)
+ self.data["TeSZ"] = ImageArray(Te)
+
+ @parallel_root_only
+ def write_fits(self, filename_prefix, clobber=True):
+ r""" Export images to a FITS file. Writes the SZ distortion in all
+ specified frequencies as well as the mass-weighted temperature and the
+ optical depth. Distance units are in kpc.
+
+ Parameters
+ ----------
+ filename_prefix : string
+ The prefix of the FITS filename.
+ clobber : boolean, optional
+ If the file already exists, do we overwrite?
+
+ Examples
+ --------
+ >>> szprj.write_fits("SZbullet", clobber=False)
+ """
+ coords = {}
+ coords["dx"] = self.dx*self.pf.units["kpc"]
+ coords["dy"] = self.dy*self.pf.units["kpc"]
+ coords["xctr"] = 0.0
+ coords["yctr"] = 0.0
+ coords["units"] = "kpc"
+ other_keys = {"Time" : self.pf.current_time}
+ write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+ other_keys=other_keys)
+
+ @parallel_root_only
+ def write_png(self, filename_prefix):
+ r""" Export images to PNG files. Writes the SZ distortion in all
+ specified frequencies as well as the mass-weighted temperature and the
+ optical depth. Distance units are in kpc.
+
+ Parameters
+ ----------
+ filename_prefix : string
+ The prefix of the image filenames.
+
+ Examples
+ --------
+ >>> szprj.write_png("SZsloshing")
+ """
+ extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+ for field, image in self.items():
+ filename=filename_prefix+"_"+field+".png"
+ label = self.display_names[field]
+ if self.units[field] is not None:
+ label += " ("+self.units[field]+")"
+ write_projection(image, filename, colorbar_label=label, take_log=False,
+ extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+ ylabel=r"$\mathrm{y\ (kpc)}$")
+
+ @parallel_root_only
+ def write_hdf5(self, filename):
+ r"""Export the set of S-Z fields to a set of HDF5 datasets.
+
+ Parameters
+ ----------
+ filename : string
+ This file will be opened in "write" mode.
+
+ Examples
+ --------
+ >>> szprj.write_hdf5("SZsloshing.h5")
+ """
+ import h5py
+ f = h5py.File(filename, "w")
+ for field, data in self.items():
+ f.create_dataset(field,data=data)
+ f.close()
+
+ def keys(self):
+ return self.data.keys()
+
+ def items(self):
+ return self.data.items()
+
+ def values(self):
+ return self.data.values()
+
+ def has_key(self, key):
+ return key in self.data.keys()
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ @property
+ def shape(self):
+ return (self.nx,self.nx)
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+ config.add_subpackage("tests")
+ config.make_config_py() # installs __config__.py
+ #config.make_svn_version_py()
+ return config
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,139 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+ mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+ GenericArrayTest, data_dir_load, GenericImageTest
+try:
+ from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+ pass
+import numpy as np
+try:
+ import SZpack
+except ImportError:
+ pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+
+def setup():
+ """Test specific setup."""
+ from yt.config import ytcfg
+ ytcfg["yt", "__withintesting"] = "True"
+
+def full_szpack3d(pf, xo):
+ data = pf.h.grids[0]
+ dz = pf.h.get_smallest_dx()*pf.units["cm"]
+ nx,ny,nz = data["Density"].shape
+ dn = np.zeros((nx,ny,nz))
+ Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+ Te = data["Temperature"]/K_per_keV
+ betac = data["z-velocity"]/clight
+ pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
+ for i in xrange(nx):
+ pbar.update(i)
+ for j in xrange(ny):
+ for k in xrange(nz):
+ dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+ Te[i,j,k], betac[i,j,k],
+ 1.0, 0.0, 0.0, 1.0e-5)
+ pbar.finish()
+ return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+ R = 1000.
+ r_c = 100.
+ rho_c = 1.673e-26
+ beta = 1.
+ T0 = 4.
+ nx,ny,nz = 16,16,16
+ c = 0.17
+ a_c = 30.
+ a = 200.
+ v0 = 300.*cm_per_km
+ ddims = (nx,ny,nz)
+
+ x, y, z = np.mgrid[-R:R:nx*1j,
+ -R:R:ny*1j,
+ -R:R:nz*1j]
+
+ r = np.sqrt(x**2+y**2+z**2)
+
+ dens = np.zeros(ddims)
+ dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+ temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+ velz = v0*temp/(T0*K_per_keV)
+
+ data = {}
+ data["Density"] = dens
+ data["Temperature"] = temp
+ data["x-velocity"] = np.zeros(ddims)
+ data["y-velocity"] = np.zeros(ddims)
+ data["z-velocity"] = velz
+
+ bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+ L = 2*R*cm_per_kpc
+ dl = L/nz
+
+ pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+ return pf
+
+ at requires_module("SZpack")
+def test_projection():
+ pf = setup_cluster()
+ nx,ny,nz = pf.domain_dimensions
+ xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+ szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+ szprj.on_axis(2, nx=nx)
+ deltaI = np.zeros((3,nx,ny))
+ for i in xrange(3):
+ deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+ yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+ pf = data_dir_load(M7)
+ szprj = SZProjection(pf, freqs)
+ szprj.on_axis(2, nx=100)
+ def onaxis_array_func():
+ return szprj.data
+ def onaxis_image_func(filename_prefix):
+ szprj.write_png(filename_prefix)
+ for test in [GenericArrayTest(pf, onaxis_array_func),
+ GenericImageTest(pf, onaxis_image_func, 3)]:
+ test_M7_onaxis.__name__ = test.description
+ yield test
+
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+ pf = data_dir_load(M7)
+ szprj = SZProjection(pf, freqs)
+ szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+ def offaxis_array_func():
+ return szprj.data
+ def offaxis_image_func(filename_prefix):
+ szprj.write_png(filename_prefix)
+ for test in [GenericArrayTest(pf, offaxis_array_func),
+ GenericImageTest(pf, offaxis_image_func, 3)]:
+ test_M7_offaxis.__name__ = test.description
+ yield test
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -498,7 +498,7 @@
points[:, 2] = points[:, 2] / self.period[2]
fKD.qv_many = points.T
fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
- find_many_nn_nearest_neighbors()
+ fKD.find_many_nn_nearest_neighbors()
# The -1 is for fortran counting.
n = fKD.nn_tags[0,:] - 1
return n
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold310',
+ gold_standard_filename = 'gold311',
local_standard_filename = 'local001',
sketchfab_api_key = 'None',
thread_field_detection = 'False'
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -398,7 +398,8 @@
center, pf, field_parameters)
self.left_edge = np.array(left_edge)
self.level = level
- rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+
+ rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1 # issue 602
self.dds = self.pf.domain_width / rdx.astype("float64")
self.ActiveDimensions = np.array(dims, dtype='int32')
@@ -488,9 +489,11 @@
output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
for field in fields]
domain_dims = self.pf.domain_dimensions.astype("int64") \
- * self.pf.refine_by**self.level
+ * self.pf.relative_refinement(0, self.level)
for chunk in self._data_source.chunks(fields, "io"):
input_fields = [chunk[field] for field in fields]
+ # NOTE: This usage of "refine_by" is actually *okay*, because it's
+ # being used with respect to iref, which is *already* scaled!
fill_region(input_fields, output_fields, self.level,
self.global_startindex, chunk.icoords, chunk.ires,
domain_dims, self.pf.refine_by)
@@ -647,10 +650,12 @@
ls = self._initialize_level_state(fields)
for level in range(self.level + 1):
domain_dims = self.pf.domain_dimensions.astype("int64") \
- * self.pf.refine_by**level
+ * self.pf.relative_refinement(0, self.level)
for chunk in ls.data_source.chunks(fields, "io"):
chunk[fields[0]]
input_fields = [chunk[field] for field in fields]
+ # NOTE: This usage of "refine_by" is actually *okay*, because it's
+ # being used with respect to iref, which is *already* scaled!
fill_region(input_fields, ls.fields, ls.current_level,
ls.global_startindex, chunk.icoords,
chunk.ires, domain_dims, self.pf.refine_by)
@@ -682,14 +687,16 @@
def _update_level_state(self, level_state):
ls = level_state
if ls.current_level >= self.level: return
+ rf = float(self.pf.relative_refinement(
+ ls.current_level, ls.current_level + 1))
ls.current_level += 1
- ls.current_dx = self._base_dx / self.pf.refine_by**ls.current_level
+ ls.current_dx = self._base_dx / \
+ self.pf.relative_refinement(0, ls.current_level)
self._setup_data_source(ls)
LL = self.left_edge - self.pf.domain_left_edge
ls.old_global_startindex = ls.global_startindex
ls.global_startindex = np.rint(LL / ls.current_dx).astype('int64') - 1
ls.domain_iwidth = np.rint(self.pf.domain_width/ls.current_dx).astype('int64')
- rf = float(self.pf.refine_by)
input_left = (level_state.old_global_startindex + 0.5) * rf
width = (self.ActiveDimensions*self.dds)
output_dims = np.rint(width/level_state.current_dx+0.5).astype("int32") + 2
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -469,7 +469,6 @@
fd = self.pf.field_dependencies.get(field, None) or \
self.pf.field_dependencies.get(field[1], None)
if fd is None: continue
- fd = self.pf.field_dependencies[field]
requested = self._determine_fields(list(set(fd.requested)))
deps = [d for d in requested if d not in fields_to_get]
fields_to_get += deps
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -264,7 +264,7 @@
if not isinstance(item, tuple):
field = ("unknown", item)
finfo = self.pf._get_field_info(*field)
- mylog.debug("Guessing field %s is %s", item, finfo.name)
+ #mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
finfo = self.pf._get_field_info(*field)
@@ -301,7 +301,8 @@
return self[item]
elif finfo is not None and finfo.particle_type:
if item == "Coordinates" or item[1] == "Coordinates" or \
- item == "Velocities" or item[1] == "Velocities":
+ item == "Velocities" or item[1] == "Velocities" or \
+ item == "Velocity" or item[1] == "Velocity":
# A vector
self[item] = np.ones((self.NumberOfParticles, 3))
else:
@@ -329,6 +330,8 @@
self.requested_parameters.append(param)
if param in ['bulk_velocity', 'center', 'normal']:
return np.random.random(3) * 1e-2
+ elif param in ['axis']:
+ return 0
else:
return 0.0
_num_ghost_zones = 0
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -56,6 +56,7 @@
self.pf = self.hierarchy.parameter_file # weakref already
self._child_mask = self._child_indices = self._child_index_mask = None
self.start_index = None
+ self.filename = filename
self._last_mask = None
self._last_count = -1
self._last_selector_id = None
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -113,7 +113,7 @@
_domain_ind = None
def select_blocks(self, selector):
- mask = self.oct_handler.mask(selector)
+ mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
mask = self._reshape_vals(mask)
slicer = OctreeSubsetBlockSlice(self)
for i, sl in slicer:
@@ -271,12 +271,14 @@
@property
def LeftEdge(self):
- LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+ LE = (self._fcoords[0,0,0,self.ind,:]
+ - self._fwidth[0,0,0,self.ind,:])*0.5
return LE
@property
def RightEdge(self):
- RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+ RE = (self._fcoords[-1,-1,-1,self.ind,:]
+ + self._fwidth[-1,-1,-1,self.ind,:])*0.5
return RE
@property
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-These are common particle deposition fields.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import \
- FieldInfoContainer, \
- FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType, \
- NullFunc, \
- TranslationFunc
-from yt.utilities.physical_constants import \
- mass_hydrogen_cgs, \
- mass_sun_cgs, \
- mh
-
-def _field_concat(fname):
- def _AllFields(field, data):
- v = []
- for ptype in data.pf.particle_types:
- data.pf._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.pf.known_filters:
- continue
- v.append(data[ptype, fname].copy())
- rv = np.concatenate(v, axis=0)
- return rv
- return _AllFields
-
-def _field_concat_slice(fname, axi):
- def _AllFields(field, data):
- v = []
- for ptype in data.pf.particle_types:
- data.pf._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.pf.known_filters:
- continue
- v.append(data[ptype, fname][:,axi])
- rv = np.concatenate(v, axis=0)
- return rv
- return _AllFields
-
-def particle_deposition_functions(ptype, coord_name, mass_name, registry):
- orig = set(registry.keys())
- def particle_count(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, method = "count")
- return d
-
- registry.add_field(("deposit", "%s_count" % ptype),
- function = particle_count,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Count}" % ptype,
- projection_conversion = '1')
-
- def particle_mass(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
- return d
-
- registry.add_field(("deposit", "%s_mass" % ptype),
- function = particle_mass,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Mass}" % ptype,
- units = r"\mathrm{g}",
- projected_units = r"\mathrm{g}\/\mathrm{cm}",
- projection_conversion = 'cm')
-
- def particle_density(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
- d /= data["gas","CellVolume"]
- return d
-
- registry.add_field(("deposit", "%s_density" % ptype),
- function = particle_density,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Density}" % ptype,
- units = r"\mathrm{g}/\mathrm{cm}^{3}",
- projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
- projection_conversion = 'cm')
-
- def particle_cic(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
- d /= data["gas","CellVolume"]
- return d
-
- registry.add_field(("deposit", "%s_cic" % ptype),
- function = particle_cic,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s CIC Density}" % ptype,
- units = r"\mathrm{g}/\mathrm{cm}^{3}",
- projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
- projection_conversion = 'cm')
-
- # Now some translation functions.
-
- def particle_ones(field, data):
- return np.ones(data[ptype, mass_name].shape, dtype="float64")
-
- registry.add_field((ptype, "particle_ones"),
- function = particle_ones,
- particle_type = True,
- units = "")
-
- registry.add_field((ptype, "ParticleMass"),
- function = TranslationFunc((ptype, mass_name)),
- particle_type = True,
- units = r"\mathrm{g}")
-
- def _ParticleMassMsun(field, data):
- return data[ptype, mass_name].copy()
- def _conv_Msun(data):
- return 1.0/mass_sun_cgs
-
- registry.add_field((ptype, "ParticleMassMsun"),
- function = _ParticleMassMsun,
- convert_function = _conv_Msun,
- particle_type = True,
- units = r"\mathrm{M}_\odot")
-
- def particle_mesh_ids(field, data):
- pos = data[ptype, coord_name]
- ids = np.zeros(pos.shape[0], dtype="float64") - 1
- # This is float64 in name only. It will be properly cast inside the
- # deposit operation.
- #_ids = ids.view("float64")
- data.deposit(pos, [ids], method = "mesh_id")
- return ids
- registry.add_field((ptype, "mesh_id"),
- function = particle_mesh_ids,
- validators = [ValidateSpatial()],
- particle_type = True)
-
- return list(set(registry.keys()).difference(orig))
-
-
-def particle_scalar_functions(ptype, coord_name, vel_name, registry):
-
- # Now we have to set up the various velocity and coordinate things. In the
- # future, we'll actually invert this and use the 3-component items
- # elsewhere, and stop using these.
-
- # Note that we pass in _ptype here so that it's defined inside the closure.
- orig = set(registry.keys())
-
- def _get_coord_funcs(axi, _ptype):
- def _particle_velocity(field, data):
- return data[_ptype, vel_name][:,axi]
- def _particle_position(field, data):
- return data[_ptype, coord_name][:,axi]
- return _particle_velocity, _particle_position
- for axi, ax in enumerate("xyz"):
- v, p = _get_coord_funcs(axi, ptype)
- registry.add_field((ptype, "particle_velocity_%s" % ax),
- particle_type = True, function = v)
- registry.add_field((ptype, "particle_position_%s" % ax),
- particle_type = True, function = p)
-
- return list(set(registry.keys()).difference(orig))
-
-def particle_vector_functions(ptype, coord_names, vel_names, registry):
-
- # This will column_stack a set of scalars to create vector fields.
- orig = set(registry.keys())
-
- def _get_vec_func(_ptype, names):
- def particle_vectors(field, data):
- return np.column_stack([data[_ptype, name] for name in names])
- return particle_vectors
- registry.add_field((ptype, "Coordinates"),
- function=_get_vec_func(ptype, coord_names),
- particle_type=True)
- registry.add_field((ptype, "Velocities"),
- function=_get_vec_func(ptype, vel_names),
- particle_type=True)
-
- return list(set(registry.keys()).difference(orig))
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/particle_unions.py
--- /dev/null
+++ b/yt/data_objects/particle_unions.py
@@ -0,0 +1,27 @@
+"""
+These are particle union objects. These essentially alias one particle to
+another, where the other can be one or several particle types.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import ensure_list
+
+class ParticleUnion(object):
+ def __init__(self, name, sub_types):
+ self.name = name
+ self.sub_types = ensure_list(sub_types)
+
+ def __iter__(self):
+ for st in self.sub_types:
+ yield st
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -29,6 +29,8 @@
FieldInfoContainer, NullFunc
from yt.data_objects.particle_filters import \
filter_registry
+from yt.data_objects.particle_unions import \
+ ParticleUnion
from yt.utilities.minimal_representation import \
MinimalStaticOutput
@@ -48,13 +50,17 @@
default_fluid_type = "gas"
fluid_types = ("gas","deposit")
- particle_types = ("all",)
+ particle_types = ("io",) # By default we have an 'all'
+ particle_types_raw = ("io",)
geometry = "cartesian"
coordinates = None
max_level = 99
storage_filename = None
_particle_mass_name = None
_particle_coordinates_name = None
+ _particle_velocity_name = None
+ particle_unions = None
+ known_filters = None
class __metaclass__(type):
def __init__(cls, name, b, d):
@@ -91,7 +97,8 @@
self.file_style = file_style
self.conversion_factors = {}
self.parameters = {}
- self.known_filters = {}
+ self.known_filters = self.known_filters or {}
+ self.particle_unions = self.particle_unions or {}
# path stuff
self.parameter_filename = str(filename)
@@ -213,6 +220,11 @@
raise RuntimeError("You should not instantiate StaticOutput.")
self._instantiated_hierarchy = self._hierarchy_class(
self, data_style=self.data_style)
+ # Now we do things that we need an instantiated hierarchy for
+ if "all" not in self.particle_types:
+ mylog.debug("Creating Particle Union 'all'")
+ pu = ParticleUnion("all", list(self.particle_types_raw))
+ self.add_particle_union(pu)
return self._instantiated_hierarchy
h = hierarchy # alias
@@ -308,12 +320,41 @@
return self._last_finfo
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
- if guessing_type and ("all", fname) in self.field_info:
- self._last_freq = ("all", fname)
- self._last_finfo = self.field_info["all", fname]
- return self._last_finfo
+ if guessing_type:
+ for ftype in ("all", self.default_fluid_type):
+ if (ftype, fname) in self.field_info:
+ self._last_freq = (ftype, fname)
+ self._last_finfo = self.field_info[(ftype, fname)]
+ return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
+ def add_particle_union(self, union):
+ # No string lookups here, we need an actual union.
+ f = self.particle_fields_by_type
+ fields = set_intersection([f[s] for s in union
+ if s in self.particle_types_raw])
+ self.particle_types += (union.name,)
+ self.particle_unions[union.name] = union
+ fields = [ (union.name, field) for field in fields]
+ self.h.field_list.extend(fields)
+ # Give ourselves a chance to add them here, first, then...
+ # ...if we can't find them, we set them up as defaults.
+ self.h._setup_particle_types([union.name])
+ self.h._setup_unknown_fields(fields, self.field_info,
+ skip_removal = True)
+
+ def _setup_particle_type(self, ptype):
+ mylog.debug("Don't know what to do with %s", ptype)
+ return []
+
+ @property
+ def particle_fields_by_type(self):
+ fields = defaultdict(list)
+ for field in self.h.field_list:
+ if field[0] in self.particle_types_raw:
+ fields[field[0]].append(field[1])
+ return fields
+
@property
def ires_factor(self):
o2 = np.log2(self.refine_by)
@@ -321,6 +362,9 @@
raise RuntimeError
return int(o2)
+ def relative_refinement(self, l0, l1):
+ return self.refine_by**(l1-l0)
+
def _reconstruct_pf(*args, **kwargs):
pfs = ParameterFileStore()
pf = pfs.get_pf_hash(*args)
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ /dev/null
@@ -1,108 +0,0 @@
-from yt.testing import *
-import numpy as np
-from yt.data_objects.field_info_container import \
- FieldInfo
-import yt.data_objects.universal_fields
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
-
-def setup():
- from yt.config import ytcfg
- ytcfg["yt","__withintesting"] = "True"
- np.seterr(all = 'ignore')
-
-_sample_parameters = dict(
- axis = 0,
- center = np.array((0.0, 0.0, 0.0)),
- bulk_velocity = np.array((0.0, 0.0, 0.0)),
- normal = np.array((0.0, 0.0, 1.0)),
- cp_x_vec = np.array((1.0, 0.0, 0.0)),
- cp_y_vec = np.array((0.0, 1.0, 0.0)),
- cp_z_vec = np.array((0.0, 0.0, 1.0)),
-)
-
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
-
-def realistic_pf(fields, nprocs):
- np.random.seed(int(0x4d3d3d3))
- pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
- pf.parameters["HydroMethod"] = "streaming"
- pf.parameters["Gamma"] = 5.0/3.0
- pf.parameters["EOSType"] = 1.0
- pf.parameters["EOSSoundSpeed"] = 1.0
- pf.conversion_factors["Time"] = 1.0
- pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
- pf.current_redshift = 0.0001
- pf.hubble_constant = 0.7
- pf.omega_matter = 0.27
- for unit in mpc_conversion:
- pf.units[unit+'h'] = pf.units[unit]
- pf.units[unit+'cm'] = pf.units[unit]
- pf.units[unit+'hcm'] = pf.units[unit]
- return pf
-
-class TestFieldAccess(object):
- description = None
-
- def __init__(self, field_name, nproc):
- # Note this should be a field name
- self.field_name = field_name
- self.description = "Accessing_%s_%s" % (field_name, nproc)
- self.nproc = nproc
-
- def __call__(self):
- field = FieldInfo[self.field_name]
- deps = field.get_dependencies()
- fields = list(set(deps.requested + _base_fields))
- skip_grids = False
- needs_spatial = False
- for v in field.validators:
- f = getattr(v, "fields", None)
- if f: fields += f
- if getattr(v, "ghost_zones", 0) > 0:
- skip_grids = True
- if hasattr(v, "ghost_zones"):
- needs_spatial = True
- pf = realistic_pf(fields, self.nproc)
- # This gives unequal sized grids as well as subgrids
- dd1 = pf.h.all_data()
- dd2 = pf.h.all_data()
- dd1.field_parameters.update(_sample_parameters)
- dd2.field_parameters.update(_sample_parameters)
- v1 = dd1[self.field_name]
- conv = field._convert_function(dd1) or 1.0
- if not field.particle_type:
- assert_equal(v1, dd1["gas", self.field_name])
- if not needs_spatial:
- assert_array_almost_equal_nulp(v1, conv*field._function(field, dd2), 4)
- if not skip_grids:
- for g in pf.h.grids:
- g.field_parameters.update(_sample_parameters)
- conv = field._convert_function(g) or 1.0
- v1 = g[self.field_name]
- g.clear_data()
- g.field_parameters.update(_sample_parameters)
- assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
-
-def test_all_fields():
- for field in FieldInfo:
- if isinstance(field, types.TupleType):
- fname = field[0]
- else:
- fname = field
- if fname.startswith("CuttingPlane"): continue
- if fname.startswith("particle"): continue
- if fname.startswith("CIC"): continue
- if fname.startswith("WeakLensingConvergence"): continue
- if fname.startswith("DensityPerturbation"): continue
- if fname.startswith("Matter_Density"): continue
- if fname.startswith("Overdensity"): continue
- if FieldInfo[field].particle_type: continue
- for nproc in [1, 4, 8]:
- test_all_fields.__name__ = "%s_%s" % (field, nproc)
- yield TestFieldAccess(field, nproc)
-
-if __name__ == "__main__":
- setup()
- for t in test_all_fields():
- t()
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/7f29b0e8bf2b/
Changeset: 7f29b0e8bf2b
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 08:08:10
Summary: Moving the derived quantity functions to the quantities object.
Affected #: 1 file
diff -r 1613797d60ad38a2a6675c2219c3c5df526a29fb -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -746,7 +746,8 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
for f in self.quantities.keys():
- self.__dict__[camelcase_to_underscore(f)] = self.quantities[f]
+ self.quantities.__dict__[camelcase_to_underscore(f)] = \
+ self.quantities[f]
def cut_region(self, field_cuts):
"""
https://bitbucket.org/yt_analysis/yt/commits/aa075fc7dbbd/
Changeset: aa075fc7dbbd
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 10:18:11
Summary: Moving the derived quantities syntax magic into DerivedQuantityCollection.
Affected #: 2 files
diff -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 -r aa075fc7dbbd4ff74832af3d461429a547e19c17 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -745,9 +745,6 @@
self.coords = None
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- for f in self.quantities.keys():
- self.quantities.__dict__[camelcase_to_underscore(f)] = \
- self.quantities[f]
def cut_region(self, field_cuts):
"""
diff -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 -r aa075fc7dbbd4ff74832af3d461429a547e19c17 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -88,8 +88,12 @@
class DerivedQuantityCollection(object):
functions = quantity_info
- def __init__(self, data_source):
- self.data_source = data_source
+ def __new__(cls, data_source, *args, **kwargs):
+ inst = object.__new__(cls)
+ inst.data_source = data_source
+ for f in inst.keys():
+ setattr(inst, camelcase_to_underscore(f), inst[f])
+ return inst
def __getitem__(self, key):
if key not in self.functions:
https://bitbucket.org/yt_analysis/yt/commits/ea58f0906739/
Changeset: ea58f0906739
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-22 13:10:58
Summary: Merged in ngoldbaum/yt-3.0 (pull request #112)
Derived quantity syntax sugar
Affected #: 3 files
diff -r 6354e5834da8f23e1e2e5dc799e332d1819d7e02 -r ea58f090673982ed065dfcca187fad1535cf84ac yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -88,8 +88,12 @@
class DerivedQuantityCollection(object):
functions = quantity_info
- def __init__(self, data_source):
- self.data_source = data_source
+ def __new__(cls, data_source, *args, **kwargs):
+ inst = object.__new__(cls)
+ inst.data_source = data_source
+ for f in inst.keys():
+ setattr(inst, camelcase_to_underscore(f), inst[f])
+ return inst
def __getitem__(self, key):
if key not in self.functions:
diff -r 6354e5834da8f23e1e2e5dc799e332d1819d7e02 -r ea58f090673982ed065dfcca187fad1535cf84ac yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import __builtin__
-import time, types, signal, inspect, traceback, sys, pdb, os
+import time, types, signal, inspect, traceback, sys, pdb, os, re
import contextlib
import warnings, struct, subprocess
import numpy as np
@@ -626,6 +626,10 @@
if not os.path.exists(my_dir):
only_on_root(os.makedirs, my_dir)
+def camelcase_to_underscore(name):
+ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
+
def set_intersection(some_list):
if len(some_list) == 0: return set([])
# This accepts a list of iterables, which we get the intersection of.
https://bitbucket.org/yt_analysis/yt/commits/d7852f590c02/
Changeset: d7852f590c02
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-18 21:22:06
Summary: provide factory function for slice plots
Affected #: 1 file
diff -r 29e24eb0f722d4e4d907b58ed1398eee652a8bdf -r d7852f590c025a626bb530466b16f332d7c80a6a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1208,7 +1208,7 @@
ret += '<img src="data:image/png;base64,%s"><br>' % img
return ret
-class SlicePlot(PWViewerMPL):
+class OnAxisSlicePlot(PWViewerMPL):
r"""Creates a slice plot from a parameter file
Given a pf object, an axis to slice along, and a field name
@@ -1989,3 +1989,32 @@
yfrac
)
return axrect, caxrect
+
+def SlicePlot(pf, normal, fields, *args, **kwargs):
+ r"""
+ A factory function for
+ :class:`yt.visualization.plot_window.OnAxisSlicePlot`
+ and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This
+ essentially allows for a single entry point to both types of slice plots.
+
+ Parameters
+ ----------
+ pf : :class:`yt.data_objects.api.StaticOutput`
+ This is the parameter file object corresponding to the
+ simulation output to be plotted.
+
+ normal : int or one of 'x', 'y', 'z', or sequence of floats
+ This specifies the normal vector to the slice. If given as an integer
+ or a coordinate string (0=x, 1=y, 2=z), this function will return an
+ :class:`OnAxisSlicePlot` object. If given as a sequence of floats,
+ this is interpretted as an off-axis vector and an
+ :class:`OffAxisSlicePlot` object is returned.
+ fields : string
+ The name of the field(s) to be plotted.
+
+ """
+
+ if iterable(normal) and not isinstance(normal,str):
+ return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
+ else:
+ return OnAxisSlicePlot(pf, normal, fields, *args, **kwargs)
https://bitbucket.org/yt_analysis/yt/commits/fb3cf0cdece6/
Changeset: fb3cf0cdece6
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 04:07:55
Summary: add docstring for ipython query and make backwards-compatible
Affected #: 1 file
diff -r d7852f590c025a626bb530466b16f332d7c80a6a -r fb3cf0cdece6778670669091d7569c0797033c8d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1990,19 +1990,23 @@
)
return axrect, caxrect
-def SlicePlot(pf, normal, fields, *args, **kwargs):
+def SlicePlot(pf, normal=None, fields=None, axis=None, *args, **kwargs):
r"""
A factory function for
:class:`yt.visualization.plot_window.OnAxisSlicePlot`
and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This
- essentially allows for a single entry point to both types of slice plots.
+ essentially allows for a single entry point to both types of slice plots,
+ the distinction being determined by the specified normal vector to the
+ slice.
+
+ The returned plot object can be updated using one of the many helper
+ functions defined in PlotWindow.
Parameters
----------
pf : :class:`yt.data_objects.api.StaticOutput`
This is the parameter file object corresponding to the
simulation output to be plotted.
-
normal : int or one of 'x', 'y', 'z', or sequence of floats
This specifies the normal vector to the slice. If given as an integer
or a coordinate string (0=x, 1=y, 2=z), this function will return an
@@ -2010,11 +2014,125 @@
this is interpretted as an off-axis vector and an
:class:`OffAxisSlicePlot` object is returned.
fields : string
- The name of the field(s) to be plotted.
+ The name of the field(s) to be plotted.
+ axis : int or one of 'x', 'y', 'z'
+ An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
+ or the axis name itself. If specified, this will replace normal.
+
+ The following are nominally keyword arguments passed onto the respective
+ slice plot objects generated by this function.
+
+ center : two or three-element vector of sequence floats, 'c', or 'center',
+ or 'max'
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
+ width : tuple or a float.
+ Width can have four different formats to support windows with variable
+ x and y widths. They are:
+
+ ================================== =======================
+ format example
+ ================================== =======================
+ (float, string) (10,'kpc')
+ ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
+ float 0.2
+ (float, float) (0.2, 0.3)
+ ================================== =======================
+
+ For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+ wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+ window that is 10 kiloparsecs wide along the x axis and 15
+ kiloparsecs wide along the y axis. In the other two examples, code
+ units are assumed, for example (0.2, 0.3) requests a plot that has an
+ x width of 0.2 and a y width of 0.3 in code units. If units are
+ provided the resulting plot axis labels will use the supplied units.
+ axes_unit : A string
+ The name of the unit for the tick labels on the x and y axes.
+ Defaults to None, which automatically picks an appropriate unit.
+ If axes_unit is '1', 'u', or 'unitary', it will not display the
+ units, and only show the axes name.
+ origin : string or length 1, 2, or 3 sequence of strings
+ The location of the origin of the plot coordinate system for
+ `OnAxisSlicePlot` objects; for `OffAxisSlicePlot` objects,
+ this parameter is discarded. This is represented by '-' separated
+ string or a tuple of strings. In the first index the y-location is
+ given by 'lower', 'upper', or 'center'. The second index is the
+ x-location, given as 'left', 'right', or 'center'. Finally, the
+ whether the origin is applied in 'domain' space, plot 'window' space
+ or 'native' simulation coordinate system is given. For example, both
+ 'upper-right-domain' and ['upper', 'right', 'domain'] both place the
+ origin in the upper right hand corner of domain space. If x or y are
+ not given, a value is inffered. For instance, 'left-domain'
+ corresponds to the lower-left hand corner of the simulation domain,
+ 'center-domain' corresponds to the center of the simulation domain,
+ or 'center-window' for the center of the plot window. Further
+ examples:
+
+ ================================== ============================
+ format example
+ ================================== ============================
+ '{space}' 'domain'
+ '{xloc}-{space}' 'left-window'
+ '{yloc}-{space}' 'upper-domain'
+ '{yloc}-{xloc}-{space}' 'lower-right-window'
+ ('{space}',) ('window',)
+ ('{xloc}', '{space}') ('right', 'domain')
+ ('{yloc}', '{space}') ('lower', 'window')
+ ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
+ ================================== ============================
+ north-vector : a sequence of floats
+ A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
+ used in OnAxisSlicePlots. This option sets the orientation of the
+ slicing plane. If not set, an arbitrary grid-aligned north-vector is
+ chosen.
+ fontsize : integer
+ The size of the fonts for the axis, colorbar, and tick labels.
+ field_parameters : dictionary
+ A dictionary of field parameters than can be accessed by derived
+ fields.
+
+ Raises
+ ------
+ AssertionError
+ If a proper normal axis is not specified via the normal or axis
+ keywords, and/or if a field to plot is not specified.
+
+ Examples
+ --------
+
+ >>> slc = SlicePlot(pf, "x", "Density", center=[0.2,0.3,0.4])
+ >>> slc = SlicePlot(pf, 2, "Temperature")
+ >>> slc = SlicePlot(pf, [0.4,0.2,-0.1], "Pressure",
+ north_vector=[0.2,-0.3,0.1])
"""
+ # Make sure we are passed a normal
+ # we check the axis keyword for backwards compatability
+ if normal is None: normal = axis
+ if normal is None:
+ raise AssertionError("Must pass a normal vector to the slice!")
+
+ # to keep positional ordering we had to make fields a keyword; make sure
+ # it is present
+ if fields is None:
+ raise AssertionError("Must pass field(s) to plot!")
+
+ # use an OnAxisSlicePlot where possible, e.g.:
+ # maybe someone passed normal=[0,0,1] when they should have just used "z"
+ if iterable(normal):
+ normal = np.array(normal)
+ np.divide(normal, np.dot(normal,normal), normal)
+ if np.count_nonzero(normal) == 1:
+ normal = ("x","y","z")[np.nonzero(normal)[0][0]]
if iterable(normal) and not isinstance(normal,str):
+ # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
+ if 'origin' in kwargs: del kwargs['origin']
+
return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
else:
+ # north_vector not used in OnAxisSlicePlots; remove it if in kwargs
+ if 'north_vector' in kwargs: del kwargs['north_vector']
+
return OnAxisSlicePlot(pf, normal, fields, *args, **kwargs)
https://bitbucket.org/yt_analysis/yt/commits/81bb09b3c388/
Changeset: 81bb09b3c388
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 04:08:57
Summary: Merged yt_analysis/yt-3.0 into yt-3.0
Affected #: 2 files
diff -r fb3cf0cdece6778670669091d7569c0797033c8d -r 81bb09b3c388b425bb0bad93711ad2065cc4ebe5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -459,8 +459,9 @@
for j in range(2):
for k in range(2):
ii = ((k*2)+j)*2+i
+ # Note: C order because our index converts C to F.
source[field][:,ii] = \
- dt[i::2,j::2,k::2].ravel(order="F")
+ dt[i::2,j::2,k::2].ravel(order="C")
oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
del source
# Now we continue with the additional levels.
diff -r fb3cf0cdece6778670669091d7569c0797033c8d -r 81bb09b3c388b425bb0bad93711ad2065cc4ebe5 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,10 +36,10 @@
masks = None
caching = True
- def __init__(self):
+ def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
- super(IOHandlerART, self).__init__()
+ super(IOHandlerART, self).__init__(*args, **kwargs)
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
https://bitbucket.org/yt_analysis/yt/commits/1cceb8a34168/
Changeset: 1cceb8a34168
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 04:28:50
Summary: axis string will show up in iterable(); check this
Affected #: 1 file
diff -r 81bb09b3c388b425bb0bad93711ad2065cc4ebe5 -r 1cceb8a341680b282534df190239fddcb2b89baa yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2119,13 +2119,14 @@
raise AssertionError("Must pass field(s) to plot!")
# use an OnAxisSlicePlot where possible, e.g.:
- # maybe someone passed normal=[0,0,1] when they should have just used "z"
- if iterable(normal):
+ # maybe someone passed normal=[0,0,0.2] when they should have just used "z"
+ if iterable(normal) and not isinstance(normal,str):
normal = np.array(normal)
np.divide(normal, np.dot(normal,normal), normal)
if np.count_nonzero(normal) == 1:
normal = ("x","y","z")[np.nonzero(normal)[0][0]]
-
+
+ # by now the normal should be properly set to get either a On/Off Axis plot
if iterable(normal) and not isinstance(normal,str):
# OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
if 'origin' in kwargs: del kwargs['origin']
https://bitbucket.org/yt_analysis/yt/commits/5098e0baa93f/
Changeset: 5098e0baa93f
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 06:46:43
Summary: doc markup
Affected #: 1 file
diff -r 1cceb8a341680b282534df190239fddcb2b89baa -r 5098e0baa93f781965547cc74d37559dc7b30290 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2083,7 +2083,7 @@
================================== ============================
north-vector : a sequence of floats
A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
- used in OnAxisSlicePlots. This option sets the orientation of the
+ used in `OnAxisSlicePlot`. This option sets the orientation of the
slicing plane. If not set, an arbitrary grid-aligned north-vector is
chosen.
fontsize : integer
https://bitbucket.org/yt_analysis/yt/commits/1cd6c25a4723/
Changeset: 1cd6c25a4723
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 06:48:20
Summary: update API
Affected #: 2 files
diff -r 5098e0baa93f781965547cc74d37559dc7b30290 -r 1cd6c25a4723f9437a8f49be093d0ffae46f7233 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -143,7 +143,8 @@
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, \
apply_colormap, scale_image, write_projection, write_fits, \
- SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot, \
+ SlicePlot, OnAxisSlicePlot, OffAxisSlicePlot, \
+ ProjectionPlot, OffAxisProjectionPlot, \
show_colormaps
from yt.visualization.volume_rendering.api import \
diff -r 5098e0baa93f781965547cc74d37559dc7b30290 -r 1cd6c25a4723f9437a8f49be093d0ffae46f7233 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,6 +49,7 @@
from plot_window import \
SlicePlot, \
+ OnAxisSlicePlot, \
OffAxisSlicePlot, \
ProjectionPlot, \
OffAxisProjectionPlot
https://bitbucket.org/yt_analysis/yt/commits/f02c4fb9ea30/
Changeset: f02c4fb9ea30
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 06:54:06
Summary: warn the user if we are neglecting parameters
Affected #: 1 file
diff -r 1cd6c25a4723f9437a8f49be093d0ffae46f7233 -r f02c4fb9ea305a38a2d736a69465a0306c71fce2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2129,11 +2129,19 @@
# by now the normal should be properly set to get either a On/Off Axis plot
if iterable(normal) and not isinstance(normal,str):
# OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
- if 'origin' in kwargs: del kwargs['origin']
+ if 'origin' in kwargs:
+ msg = "Ignoring 'origin' keyword as it is ill-defined for " \
+ "an OffAxisSlicePlot object."
+ mylog.warn(msg)
+ del kwargs['origin']
return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
else:
# north_vector not used in OnAxisSlicePlots; remove it if in kwargs
- if 'north_vector' in kwargs: del kwargs['north_vector']
+ if 'north_vector' in kwargs:
+ msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
+ "an OnAxisSlicePlot object."
+ mylog.warn(msg)
+ del kwargs['north_vector']
return OnAxisSlicePlot(pf, normal, fields, *args, **kwargs)
https://bitbucket.org/yt_analysis/yt/commits/c32db22f3650/
Changeset: c32db22f3650
Branch: yt-3.0
User: ChrisMalone
Date: 2013-10-19 06:55:32
Summary: rename to AxisAlignedSlicePlot
Affected #: 3 files
diff -r f02c4fb9ea305a38a2d736a69465a0306c71fce2 -r c32db22f3650b0a6926270d4b14ebaf1bfe14dc9 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -143,7 +143,7 @@
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, \
apply_colormap, scale_image, write_projection, write_fits, \
- SlicePlot, OnAxisSlicePlot, OffAxisSlicePlot, \
+ SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
ProjectionPlot, OffAxisProjectionPlot, \
show_colormaps
diff -r f02c4fb9ea305a38a2d736a69465a0306c71fce2 -r c32db22f3650b0a6926270d4b14ebaf1bfe14dc9 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,7 +49,7 @@
from plot_window import \
SlicePlot, \
- OnAxisSlicePlot, \
+ AxisAlignedSlicePlot, \
OffAxisSlicePlot, \
ProjectionPlot, \
OffAxisProjectionPlot
diff -r f02c4fb9ea305a38a2d736a69465a0306c71fce2 -r c32db22f3650b0a6926270d4b14ebaf1bfe14dc9 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1208,7 +1208,7 @@
ret += '<img src="data:image/png;base64,%s"><br>' % img
return ret
-class OnAxisSlicePlot(PWViewerMPL):
+class AxisAlignedSlicePlot(PWViewerMPL):
r"""Creates a slice plot from a parameter file
Given a pf object, an axis to slice along, and a field name
@@ -1993,7 +1993,7 @@
def SlicePlot(pf, normal=None, fields=None, axis=None, *args, **kwargs):
r"""
A factory function for
- :class:`yt.visualization.plot_window.OnAxisSlicePlot`
+ :class:`yt.visualization.plot_window.AxisAlignedSlicePlot`
and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This
essentially allows for a single entry point to both types of slice plots,
the distinction being determined by the specified normal vector to the
@@ -2010,7 +2010,7 @@
normal : int or one of 'x', 'y', 'z', or sequence of floats
This specifies the normal vector to the slice. If given as an integer
or a coordinate string (0=x, 1=y, 2=z), this function will return an
- :class:`OnAxisSlicePlot` object. If given as a sequence of floats,
+ :class:`AxisAlignedSlicePlot` object. If given as a sequence of floats,
this is interpretted as an off-axis vector and an
:class:`OffAxisSlicePlot` object is returned.
fields : string
@@ -2054,7 +2054,7 @@
units, and only show the axes name.
origin : string or length 1, 2, or 3 sequence of strings
The location of the origin of the plot coordinate system for
- `OnAxisSlicePlot` objects; for `OffAxisSlicePlot` objects,
+ `AxisAlignedSlicePlot` objects; for `OffAxisSlicePlot` objects,
this parameter is discarded. This is represented by '-' separated
string or a tuple of strings. In the first index the y-location is
given by 'lower', 'upper', or 'center'. The second index is the
@@ -2083,7 +2083,7 @@
================================== ============================
north-vector : a sequence of floats
A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
- used in `OnAxisSlicePlot`. This option sets the orientation of the
+ used in `AxisAlignedSlicePlot`. This option sets the orientation of the
slicing plane. If not set, an arbitrary grid-aligned north-vector is
chosen.
fontsize : integer
@@ -2118,7 +2118,7 @@
if fields is None:
raise AssertionError("Must pass field(s) to plot!")
- # use an OnAxisSlicePlot where possible, e.g.:
+ # use an AxisAlignedSlicePlot where possible, e.g.:
# maybe someone passed normal=[0,0,0.2] when they should have just used "z"
if iterable(normal) and not isinstance(normal,str):
normal = np.array(normal)
@@ -2137,11 +2137,11 @@
return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
else:
- # north_vector not used in OnAxisSlicePlots; remove it if in kwargs
+ # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs
if 'north_vector' in kwargs:
msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
- "an OnAxisSlicePlot object."
+ "an AxisAlignedSlicePlot object."
mylog.warn(msg)
del kwargs['north_vector']
- return OnAxisSlicePlot(pf, normal, fields, *args, **kwargs)
+ return AxisAlignedSlicePlot(pf, normal, fields, *args, **kwargs)
https://bitbucket.org/yt_analysis/yt/commits/34f821e4b5f2/
Changeset: 34f821e4b5f2
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-22 14:30:56
Summary: Merged in ChrisMalone/yt-3.0 (pull request #118)
Create a single entry-point to SlicePlot and OffAxisSlicePlot
Affected #: 3 files
diff -r ea58f090673982ed065dfcca187fad1535cf84ac -r 34f821e4b5f24d61dd946f19716dc65db01cf11a yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -143,7 +143,8 @@
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, \
apply_colormap, scale_image, write_projection, write_fits, \
- SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot, \
+ SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+ ProjectionPlot, OffAxisProjectionPlot, \
show_colormaps
from yt.visualization.volume_rendering.api import \
diff -r ea58f090673982ed065dfcca187fad1535cf84ac -r 34f821e4b5f24d61dd946f19716dc65db01cf11a yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,6 +49,7 @@
from plot_window import \
SlicePlot, \
+ AxisAlignedSlicePlot, \
OffAxisSlicePlot, \
ProjectionPlot, \
OffAxisProjectionPlot
diff -r ea58f090673982ed065dfcca187fad1535cf84ac -r 34f821e4b5f24d61dd946f19716dc65db01cf11a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1208,7 +1208,7 @@
ret += '<img src="data:image/png;base64,%s"><br>' % img
return ret
-class SlicePlot(PWViewerMPL):
+class AxisAlignedSlicePlot(PWViewerMPL):
r"""Creates a slice plot from a parameter file
Given a pf object, an axis to slice along, and a field name
@@ -1989,3 +1989,159 @@
yfrac
)
return axrect, caxrect
+
+def SlicePlot(pf, normal=None, fields=None, axis=None, *args, **kwargs):
+ r"""
+ A factory function for
+ :class:`yt.visualization.plot_window.AxisAlignedSlicePlot`
+ and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This
+ essentially allows for a single entry point to both types of slice plots,
+ the distinction being determined by the specified normal vector to the
+ slice.
+
+ The returned plot object can be updated using one of the many helper
+ functions defined in PlotWindow.
+
+ Parameters
+ ----------
+ pf : :class:`yt.data_objects.api.StaticOutput`
+ This is the parameter file object corresponding to the
+ simulation output to be plotted.
+ normal : int or one of 'x', 'y', 'z', or sequence of floats
+ This specifies the normal vector to the slice. If given as an integer
+ or a coordinate string (0=x, 1=y, 2=z), this function will return an
+ :class:`AxisAlignedSlicePlot` object. If given as a sequence of floats,
+ this is interpretted as an off-axis vector and an
+ :class:`OffAxisSlicePlot` object is returned.
+ fields : string
+ The name of the field(s) to be plotted.
+ axis : int or one of 'x', 'y', 'z'
+ An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
+ or the axis name itself. If specified, this will replace normal.
+
+ The following are nominally keyword arguments passed onto the respective
+ slice plot objects generated by this function.
+
+ center : two or three-element vector of sequence floats, 'c', or 'center',
+ or 'max'
+ If set to 'c', 'center' or left blank, the plot is centered on the
+ middle of the domain. If set to 'max' or 'm', the center will be at
+ the point of highest density.
+ width : tuple or a float.
+ Width can have four different formats to support windows with variable
+ x and y widths. They are:
+
+ ================================== =======================
+ format example
+ ================================== =======================
+ (float, string) (10,'kpc')
+ ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
+ float 0.2
+ (float, float) (0.2, 0.3)
+ ================================== =======================
+
+ For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+ wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
+ window that is 10 kiloparsecs wide along the x axis and 15
+ kiloparsecs wide along the y axis. In the other two examples, code
+ units are assumed, for example (0.2, 0.3) requests a plot that has an
+ x width of 0.2 and a y width of 0.3 in code units. If units are
+ provided the resulting plot axis labels will use the supplied units.
+ axes_unit : A string
+ The name of the unit for the tick labels on the x and y axes.
+ Defaults to None, which automatically picks an appropriate unit.
+ If axes_unit is '1', 'u', or 'unitary', it will not display the
+ units, and only show the axes name.
+ origin : string or length 1, 2, or 3 sequence of strings
+ The location of the origin of the plot coordinate system for
+ `AxisAlignedSlicePlot` objects; for `OffAxisSlicePlot` objects,
+ this parameter is discarded. This is represented by '-' separated
+ string or a tuple of strings. In the first index the y-location is
+ given by 'lower', 'upper', or 'center'. The second index is the
+ x-location, given as 'left', 'right', or 'center'. Finally, the
+ whether the origin is applied in 'domain' space, plot 'window' space
+ or 'native' simulation coordinate system is given. For example, both
+ 'upper-right-domain' and ['upper', 'right', 'domain'] both place the
+ origin in the upper right hand corner of domain space. If x or y are
+ not given, a value is inffered. For instance, 'left-domain'
+ corresponds to the lower-left hand corner of the simulation domain,
+ 'center-domain' corresponds to the center of the simulation domain,
+ or 'center-window' for the center of the plot window. Further
+ examples:
+
+ ================================== ============================
+ format example
+ ================================== ============================
+ '{space}' 'domain'
+ '{xloc}-{space}' 'left-window'
+ '{yloc}-{space}' 'upper-domain'
+ '{yloc}-{xloc}-{space}' 'lower-right-window'
+ ('{space}',) ('window',)
+ ('{xloc}', '{space}') ('right', 'domain')
+ ('{yloc}', '{space}') ('lower', 'window')
+ ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
+ ================================== ============================
+ north-vector : a sequence of floats
+ A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
+ used in `AxisAlignedSlicePlot`. This option sets the orientation of the
+ slicing plane. If not set, an arbitrary grid-aligned north-vector is
+ chosen.
+ fontsize : integer
+ The size of the fonts for the axis, colorbar, and tick labels.
+ field_parameters : dictionary
+ A dictionary of field parameters than can be accessed by derived
+ fields.
+
+ Raises
+ ------
+ AssertionError
+ If a proper normal axis is not specified via the normal or axis
+ keywords, and/or if a field to plot is not specified.
+
+ Examples
+ --------
+
+ >>> slc = SlicePlot(pf, "x", "Density", center=[0.2,0.3,0.4])
+ >>> slc = SlicePlot(pf, 2, "Temperature")
+ >>> slc = SlicePlot(pf, [0.4,0.2,-0.1], "Pressure",
+ north_vector=[0.2,-0.3,0.1])
+
+ """
+ # Make sure we are passed a normal
+ # we check the axis keyword for backwards compatability
+ if normal is None: normal = axis
+ if normal is None:
+ raise AssertionError("Must pass a normal vector to the slice!")
+
+ # to keep positional ordering we had to make fields a keyword; make sure
+ # it is present
+ if fields is None:
+ raise AssertionError("Must pass field(s) to plot!")
+
+ # use an AxisAlignedSlicePlot where possible, e.g.:
+ # maybe someone passed normal=[0,0,0.2] when they should have just used "z"
+ if iterable(normal) and not isinstance(normal,str):
+ normal = np.array(normal)
+ np.divide(normal, np.dot(normal,normal), normal)
+ if np.count_nonzero(normal) == 1:
+ normal = ("x","y","z")[np.nonzero(normal)[0][0]]
+
+ # by now the normal should be properly set to get either a On/Off Axis plot
+ if iterable(normal) and not isinstance(normal,str):
+ # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
+ if 'origin' in kwargs:
+ msg = "Ignoring 'origin' keyword as it is ill-defined for " \
+ "an OffAxisSlicePlot object."
+ mylog.warn(msg)
+ del kwargs['origin']
+
+ return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
+ else:
+ # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs
+ if 'north_vector' in kwargs:
+ msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
+ "an AxisAlignedSlicePlot object."
+ mylog.warn(msg)
+ del kwargs['north_vector']
+
+ return AxisAlignedSlicePlot(pf, normal, fields, *args, **kwargs)
https://bitbucket.org/yt_analysis/yt/commits/9d6a692bfef1/
Changeset: 9d6a692bfef1
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-22 16:16:07
Summary: Removing Tiger frontend.
Affected #: 9 files
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/__init__.py
--- a/yt/frontends/tiger/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.tiger
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/api.py
--- a/yt/frontends/tiger/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.tiger
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
- TigerGrid, \
- TigerHierarchy, \
- TigerStaticOutput
-
-from .fields import \
- TigerFieldInfo, \
- add_tiger_field
-
-from .io import \
- IOHandlerTiger
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ /dev/null
@@ -1,195 +0,0 @@
-"""
-TIGER-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
- AMRGridPatch
-from yt.geometry.grid_geometry_handler import \
- GridGeometryHandler
-from yt.data_objects.static_output import \
- StaticOutput
-
-from yt.data_objects.field_info_container import \
- FieldInfoContainer, NullFunc
-from .fields import TigerFieldInfo, KnownTigerFields
-
-class TigerGrid(AMRGridPatch):
- _id_offset = 0
-
- def __init__(self, id, hierarchy, left_edge, right_edge, left_dims, right_dims):
- AMRGridPatch.__init__(self, id, hierarchy = hierarchy)
- self.LeftEdge = left_edge
- self.RightEdge = right_edge
- self.Level = 0
- self.NumberOfParticles = 0
- self.left_dims = np.array(left_dims, dtype='int32')
- self.right_dims = np.array(right_dims, dtype='int32')
- self.ActiveDimensions = self.right_dims - self.left_dims
- self.Parent = None
- self.Children = []
-
- @property
- def child_mask(self):
- return np.ones(self.ActiveDimensions, dtype='int32')
-
- def __repr__(self):
- return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-
-class TigerHierarchy(GridGeometryHandler):
-
- grid = TigerGrid
-
- def __init__(self, pf, data_style):
- self.directory = pf.fullpath
- self.data_style = data_style
- GridGeometryHandler.__init__(self, pf, data_style)
-
- def _count_grids(self):
- # Tiger is unigrid
- self.ngdims = [i/j for i,j in
- izip(self.pf.root_size, self.pf.max_grid_size)]
- self.num_grids = np.prod(self.ngdims)
- self.max_level = 0
-
- def _setup_classes(self):
- dd = self._get_data_reader_dict()
- GridGeometryHandler._setup_classes(self, dd)
- self.object_types.sort()
-
- def _parse_hierarchy(self):
- grids = []
- # We need to fill in dims, LE, RE, level, count
- dims, LE, RE, levels, counts = [], [], [], [], []
- DLE = self.pf.domain_left_edge
- DRE = self.pf.domain_right_edge
- DW = DRE - DLE
- gds = DW / self.ngdims
- rd = [self.pf.root_size[i]-self.pf.max_grid_size[i] for i in range(3)]
- glx, gly, glz = np.mgrid[DLE[0]:DRE[0]-gds[0]:self.ngdims[0]*1j,
- DLE[1]:DRE[1]-gds[1]:self.ngdims[1]*1j,
- DLE[2]:DRE[2]-gds[2]:self.ngdims[2]*1j]
- gdx, gdy, gdz = np.mgrid[0:rd[0]:self.ngdims[0]*1j,
- 0:rd[1]:self.ngdims[1]*1j,
- 0:rd[2]:self.ngdims[2]*1j]
- LE, RE, levels, counts = [], [], [], []
- i = 0
- for glei, gldi in izip(izip(glx.flat, gly.flat, glz.flat),
- izip(gdx.flat, gdy.flat, gdz.flat)):
- gld = np.array(gldi)
- gle = np.array(glei)
- gre = gle + gds
- g = self.grid(i, self, gle, gre, gld, gld+self.pf.max_grid_size)
- grids.append(g)
- dims.append(self.pf.max_grid_size)
- LE.append(g.LeftEdge)
- RE.append(g.RightEdge)
- levels.append(g.Level)
- counts.append(g.NumberOfParticles)
- i += 1
- self.grids = np.empty(len(grids), dtype='object')
- for gi, g in enumerate(grids): self.grids[gi] = g
- self.grid_dimensions[:] = np.array(dims, dtype='int64')
- self.grid_left_edge[:] = np.array(LE, dtype='float64')
- self.grid_right_edge[:] = np.array(RE, dtype='float64')
- self.grid_levels.flat[:] = np.array(levels, dtype='int32')
- self.grid_particle_count.flat[:] = np.array(counts, dtype='int32')
-
- def _populate_grid_objects(self):
- # We don't need to do anything here
- for g in self.grids: g._setup_dx()
-
- def _detect_fields(self):
- self.file_mapping = {"Density" : "rhob",
- "Temperature" : "temp"}
-
- @property
- def field_list(self):
- return self.file_mapping.keys()
-
- def _setup_derived_fields(self):
- self.derived_field_list = []
-
-class TigerStaticOutput(StaticOutput):
- _hierarchy_class = TigerHierarchy
- _fieldinfo_fallback = TigerFieldInfo
- _fieldinfo_known = KnownTigerFields
-
- def __init__(self, rhobname, root_size, max_grid_size=128,
- data_style='tiger', storage_filename = None):
- StaticOutput.__init__(self, rhobname, data_style)
- self.storage_filename = storage_filename
- self.basename = rhobname[:-4]
- if not os.path.exists(self.basename + "rhob"):
- print "%s doesn't exist, don't know how to handle this!" % (
- self.basename + "rhob")
- raise IOError
- if not iterable(root_size): root_size = (root_size,) * 3
- self.root_size = root_size
- if not iterable(max_grid_size): max_grid_size = (max_grid_size,) * 3
- self.max_grid_size = max_grid_size
-
- self.field_info = FieldInfoContainer.create_with_fallback(
- self._fieldinfo_fallback)
-
- # We assume that we have basename + "rhob" and basename + "temp"
- # to get at our various parameters.
-
- # First we get our our header:
-
- header = [
- ('i', 'dummy0'),
- ('f', 'ZR'),
- ('f', 'OMEGA0'),
- ('f', 'FLAM0'),
- ('f', 'OMEGAB'),
- ('f', 'H0'),
- ('f', 'BOXL0'),
- ('i', 'dummy1'),
- ]
-
- h_fmt, h_key = zip(*header)
- header_string = "".join(h_fmt)
-
- fs = open(self.basename + "rhob")
- header_raw = read_struct(fs, header_string)
- self.parameters.update(dict(zip(h_key, header_raw)))
-
- if "InitialTime" not in self.parameters:
- self.current_time = 0.0
- self.unique_identifier = \
- int(os.stat(self.parameter_filename)[ST_CTIME])
- self.parameters['TopGridDimensions'] = root_size
- self.parameters['TopGridRank'] = 3
- self.units["Density"] = 1.0
- self.parameters['RefineBy'] = 2
-
- def _set_units(self):
- self.domain_left_edge = np.zeros(3, dtype='float64')
- self.domain_right_edge = np.ones(3, dtype='float64')
- self.units = {}
- self.time_units = {}
- self.time_units['1'] = 1
- self.units['1'] = 1.0
- self.units['cm'] = 1.0 # This is just plain false
- self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
-
- def _parse_parameter_file(self):
- pass
-
- @classmethod
- def _is_valid(self, *args, **kwargs):
- return os.path.exists(args[0] + "rhob")
-
-
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/fields.py
--- a/yt/frontends/tiger/fields.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Tiger-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
- FieldInfoContainer, \
- FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType
-import yt.fields.universal_fields
-
-KnownTigerFields = FieldInfoContainer()
-add_tiger_field = KnownTigerFields.add_field
-
-TigerFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = TigerFieldInfo.add_field
-
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/io.py
--- a/yt/frontends/tiger/io.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-TIGER-specific IO functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.io_handler import \
- BaseIOHandler
-
-class IOHandlerTiger(BaseIOHandler):
- _data_style = "tiger"
- _offset = 36
-
- def __init__(self, *args, **kwargs):
- BaseIOHandler.__init__(self, *args, **kwargs)
- self._memmaps = {}
-
- def _read_data(self, grid, field):
- fn = grid.pf.basename + grid.hierarchy.file_mapping[field]
- LD = np.array(grid.left_dims, dtype='int64')
- SS = np.array(grid.ActiveDimensions, dtype='int64')
- RS = np.array(grid.pf.root_size, dtype='int64')
- data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
- return data
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/frontends/tiger/setup.py
--- a/yt/frontends/tiger/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('tiger', parent_package, top_path)
- config.make_config_py() # installs __config__.py
- #config.make_svn_version_py()
- return config
diff -r 34f821e4b5f24d61dd946f19716dc65db01cf11a -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -84,15 +84,9 @@
from yt.frontends.flash.api import \
FLASHStaticOutput, FLASHFieldInfo, add_flash_field
-from yt.frontends.tiger.api import \
- TigerStaticOutput, TigerFieldInfo, add_tiger_field
-
from yt.frontends.artio.api import \
ARTIOStaticOutput, ARTIOFieldInfo, add_artio_field
-#from yt.frontends.artio2.api import \
-# Artio2StaticOutput
-
from yt.frontends.ramses.api import \
RAMSESStaticOutput, RAMSESFieldInfo, add_ramses_field
https://bitbucket.org/yt_analysis/yt/commits/2f6ffa08eaa0/
Changeset: 2f6ffa08eaa0
Branch: yt-3.0
User: xarthisius
Date: 2013-10-22 18:33:06
Summary: Remove tiger subpackage from yt.frontends
Affected #: 1 file
diff -r 9d6a692bfef164ffeb8cc9b61294d93cc153fbd8 -r 2f6ffa08eaa02cf8fc14f50312c21608977b473e yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -22,7 +22,6 @@
config.add_subpackage("ramses")
config.add_subpackage("sph")
config.add_subpackage("stream")
- config.add_subpackage("tiger")
config.add_subpackage("boxlib/tests")
config.add_subpackage("flash/tests")
config.add_subpackage("enzo/tests")
https://bitbucket.org/yt_analysis/yt/commits/ec501e2b7d68/
Changeset: ec501e2b7d68
Branch: yt-3.0
User: xarthisius
Date: 2013-10-22 18:48:51
Summary: Remove unused read_tiger_section
Affected #: 1 file
diff -r 2f6ffa08eaa02cf8fc14f50312c21608977b473e -r ec501e2b7d686ac302ee463d87a8a115eb46284f yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -55,41 +55,9 @@
fread(buf, 1, bytes, f)
fclose(f)
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def read_tiger_section(
- char *fn,
- np.ndarray[np.int64_t, ndim=1] slab_start,
- np.ndarray[np.int64_t, ndim=1] slab_size,
- np.ndarray[np.int64_t, ndim=1] root_size,
- int offset = 36):
- cdef int strides[3]
- strides[0] = 1
- strides[1] = root_size[0] * strides[0]
- strides[2] = strides[1] * root_size[1] + 2
- cdef np.int64_t i, j, k
- cdef np.ndarray buffer = np.zeros(slab_size, dtype='float32', order='F')
- cdef FILE *f = fopen(fn, "rb")
- #for i in range(3): offset += strides[i] * slab_start[i]
- cdef np.int64_t pos = 0
- cdef np.int64_t moff = 0
- cdef float *data = <float *> buffer.data
- fseek(f, offset, 0)
- # If anybody wants to convert this loop to a SEEK_CUR, that'd be great.
- for i in range(slab_size[2]):
- for j in range(slab_size[1]):
- moff = (slab_start[0] ) * strides[0] \
- + (slab_start[1] + j) * strides[1] \
- + (slab_start[2] + i) * strides[2]
- #print offset + 4 * moff, pos
- fseek(f, offset + 4 * moff, SEEK_SET)
- fread(<void *> (data + pos), 4, slab_size[0], f)
- pos += slab_size[0]
- return buffer
-
def count_art_octs(char *fn, long offset,
int min_level, int max_level,
- int nhydro_vars,
+ int nhydro_vars,
level_info):
cdef int nchild = 8
cdef int i, Lev, next_record, nLevel
@@ -108,12 +76,12 @@
# Offset for one record header we just read
next_record = (nLevel * (next_record + 2*sizeof(int))) - sizeof(int)
fseek(f, next_record, SEEK_CUR)
- # Now we skip the second section
+ # Now we skip the second section
fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
nhydro_vars = next_record/4-2-3 #nhvar in daniel's code
#record length is normally 2 pad bytes, 8 + 2 hvars (the 2 is nchem)
# and then 3 vars, but we can find nhvars only here and not in other
- # file headers
+ # file headers
next_record = (2*sizeof(int) + readin) * (nLevel * nchild)
next_record -= sizeof(int)
fseek(f, next_record, SEEK_CUR)
@@ -121,13 +89,13 @@
fclose(f)
def read_art_tree(char *fn, long offset,
- int min_level, int max_level,
+ int min_level, int max_level,
np.ndarray[np.int64_t, ndim=2] oct_indices,
np.ndarray[np.int64_t, ndim=1] oct_levels,
np.ndarray[np.int64_t, ndim=2] oct_info):
# np.ndarray[np.int64_t, ndim=1] oct_mask,
# np.ndarray[np.int64_t, ndim=1] oct_parents,
-
+
# This accepts the filename of the ART header and an integer offset that
# points to the start of the record *following* the reading of iOctFree and
# nOct. For those following along at home, we only need to read:
@@ -162,7 +130,7 @@
#print ftell(f)
for ic1 in range(nLevel):
iOctMax = max(iOctMax, iOct)
- #print readin, iOct, nLevel, sizeof(int)
+ #print readin, iOct, nLevel, sizeof(int)
next_record = ftell(f)
fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
assert readin==52
@@ -185,30 +153,30 @@
fseek(f, next_record, SEEK_SET)
fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
assert readin==52
-
+
total_masked = 0
level_offsets.append(ftell(f))
-
+
#skip over the hydro variables
#find the length of one child section
#print 'measuring child record ',
- fread(&next_record, sizeof(int), 1, f);
+ fread(&next_record, sizeof(int), 1, f);
#print next_record,
FIX_LONG(next_record)
#print next_record
fseek(f,ftell(f)-sizeof(int),SEEK_SET) #rewind
#This is a sloppy fix; next_record is 64bit
- #and I don't think FIX_LONG(next_record) is working
+ #and I don't think FIX_LONG(next_record) is working
#correctly for 64bits
if next_record > 4294967296L:
next_record -= 4294967296L
assert next_record == 56
-
+
#find the length of all of the children section
child_record = ftell(f) + (next_record+2*sizeof(int))*nLevel*nchild
#print 'Skipping over hydro vars', ftell(f), child_record
fseek(f, child_record, SEEK_SET)
-
+
# for ic1 in range(nLevel * nchild):
# fread(&next_record, sizeof(int), 1, f); FIX_LONG(next_record)
# fread(&idc, sizeof(int), 1, f); FIX_LONG(idc); idc -= 1 + (128**3)
@@ -220,7 +188,7 @@
fclose(f)
return level_offsets
-def read_art_root_vars(char *fn, long root_grid_offset,
+def read_art_root_vars(char *fn, long root_grid_offset,
int nhydro_vars, int nx, int ny, int nz,
int ix, int iy, int iz, fields, var):
@@ -235,7 +203,7 @@
fseek(f, cell_record_size * my_offset, SEEK_CUR)
#(((C)*GridDimension[1]+(B))*GridDimension[0]+A)
for j in range(nhydro_vars):
- fread(&temp, sizeof(float), 1, f);
+ fread(&temp, sizeof(float), 1, f);
if j in fields:
FIX_FLOAT(temp)
var[l]=temp
@@ -243,7 +211,7 @@
fclose(f)
cdef void read_art_vars(FILE *f,
- int min_level, int max_level, int nhydro_vars,
+ int min_level, int max_level, int nhydro_vars,
int grid_level,long grid_id,long child_offset,
fields,
np.ndarray[np.int64_t, ndim=1] level_offsets,
@@ -273,7 +241,7 @@
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)
-def read_art_grid(int varindex,
+def read_art_grid(int varindex,
np.ndarray[np.int64_t, ndim=1] start_index,
np.ndarray[np.int32_t, ndim=1] grid_dims,
np.ndarray[np.float32_t, ndim=3] data,
https://bitbucket.org/yt_analysis/yt/commits/4c112d5ad497/
Changeset: 4c112d5ad497
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-22 23:47:37
Summary: A few minor updates for GadgetHDF5 data.
Affected #: 3 files
diff -r ec501e2b7d686ac302ee463d87a8a115eb46284f -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -128,7 +128,8 @@
function = TranslationFunc((ptype, "ParticleIDs")),
particle_type = True)
standard_particle_fields(self.field_info, ptype)
- _setup_particle_fields(self.field_info, ptype)
+ _setup_particle_fields(self.field_info, ptype,
+ self._particle_mass_name)
return [n for n, v in set(self.field_info.items()).difference(orig)]
class GadgetStaticOutput(ParticleStaticOutput):
@@ -286,6 +287,7 @@
_file_class = ParticleFile
_fieldinfo_fallback = GadgetHDF5FieldInfo
_fieldinfo_known = KnownGadgetHDF5Fields
+ _particle_mass_name = "Masses"
_suffix = ".hdf5"
def __init__(self, filename, data_style="gadget_hdf5",
diff -r ec501e2b7d686ac302ee463d87a8a115eb46284f -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -66,11 +66,8 @@
return data.convert(cf)
return _convert
-# TIPSY
-# =====
-
-def _setup_particle_fields(registry, ptype):
- registry.add_field((ptype, "Mass"), function=NullFunc,
+def _setup_particle_fields(registry, ptype, mass_name = "Mass"):
+ registry.add_field((ptype, mass_name), function=NullFunc,
particle_type = True,
convert_function=_get_conv("mass"),
units = r"\mathrm{g}")
@@ -80,7 +77,7 @@
units = r"\mathrm{cm}/\mathrm{s}")
# Note that we have to do this last so that TranslationFunc operates
# correctly.
- particle_deposition_functions(ptype, "Coordinates", "Mass",
+ particle_deposition_functions(ptype, "Coordinates", mass_name,
registry)
particle_scalar_functions(ptype, "Coordinates", "Velocities",
registry)
diff -r ec501e2b7d686ac302ee463d87a8a115eb46284f -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -94,7 +94,7 @@
del coords
if mask is None: continue
for field in field_list:
- if field in ("Mass", "Mass") and \
+ if field in ("Mass", "Masses") and \
ptype not in self.var_mass:
data = np.empty(mask.sum(), dtype="float64")
ind = self._known_ptypes.index(ptype)
https://bitbucket.org/yt_analysis/yt/commits/a068074be818/
Changeset: a068074be818
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 02:07:23
Summary: Setting particle_types_raw in ART frontend
Affected #: 1 file
diff -r 4c112d5ad497f5f4c000530939d03ed4dde34cc1 -r a068074be818374213c9a209eb681d84e2bd778a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -121,6 +121,8 @@
self.parameter_file.particle_types = ["darkmatter", "stars"]
for specie in range(nspecies):
self.parameter_file.particle_types.append("specie%i" % specie)
+ self.parameter_file.particle_types_raw = tuple(
+ self.parameter_file.particle_types)
else:
self.parameter_file.particle_types = []
for ptype in self.parameter_file.particle_types:
https://bitbucket.org/yt_analysis/yt/commits/9024c7fa22dc/
Changeset: 9024c7fa22dc
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 21:41:29
Summary: Removing NotImplementedError for partial_coverage = 1.
Affected #: 1 file
diff -r a068074be818374213c9a209eb681d84e2bd778a -r 9024c7fa22dc05a28c3973faef2474a9567623f4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -500,8 +500,6 @@
left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]),
over_refine = self.oref)
- if self.partial_coverage == 1:
- raise NotImplementedError
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
# domain_id = -1 here, because we want *every* oct
cdef OctVisitorData data
https://bitbucket.org/yt_analysis/yt/commits/788e19ee51e6/
Changeset: 788e19ee51e6
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 20:29:22
Summary: Skip empty block lists in kD-tree and use _get_field_info in grid patch.
Also supply optional argument for get_field_info to make it guess.
Affected #: 3 files
diff -r a068074be818374213c9a209eb681d84e2bd778a -r 788e19ee51e682974a4d576f0e0912611c92d454 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -259,7 +259,8 @@
new_field[1:,1:,:-1] += of
new_field[1:,1:,1:] += of
np.multiply(new_field, 0.125, new_field)
- if self.pf.field_info[field].take_log:
+ finfo = self.pf._get_field_info(field)
+ if finfo.take_log:
new_field = np.log10(new_field)
new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
@@ -269,7 +270,7 @@
new_field[-1,:,:] = 2.0*new_field[-2,:,:] - new_field[-3,:,:]
new_field[0,:,:] = 2.0*new_field[1,:,:] - new_field[2,:,:]
- if self.pf.field_info[field].take_log:
+ if finfo.take_log:
np.power(10.0, new_field, new_field)
else:
cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
diff -r a068074be818374213c9a209eb681d84e2bd778a -r 788e19ee51e682974a4d576f0e0912611c92d454 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -291,7 +291,9 @@
_last_freq = (None, None)
_last_finfo = None
- def _get_field_info(self, ftype, fname):
+ def _get_field_info(self, ftype, fname = None):
+ if fname is None:
+ ftype, fname = "unknown", ftype
guessing_type = False
if ftype == "unknown" and self._last_freq[0] != None:
ftype = self._last_freq[0]
diff -r a068074be818374213c9a209eb681d84e2bd778a -r 788e19ee51e682974a4d576f0e0912611c92d454 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -83,6 +83,7 @@
grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
gids = np.array([g.id for g in grids if g.Level == lvl],
dtype="int64")
+ if len(grids) == 0: continue
self.add_grids(grids)
def check_tree(self):
https://bitbucket.org/yt_analysis/yt/commits/9cd06e423304/
Changeset: 9cd06e423304
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 21:42:47
Summary: Merging
Affected #: 3 files
diff -r 9024c7fa22dc05a28c3973faef2474a9567623f4 -r 9cd06e4233044a84bc4f8c880a4e45417816f316 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -259,7 +259,8 @@
new_field[1:,1:,:-1] += of
new_field[1:,1:,1:] += of
np.multiply(new_field, 0.125, new_field)
- if self.pf.field_info[field].take_log:
+ finfo = self.pf._get_field_info(field)
+ if finfo.take_log:
new_field = np.log10(new_field)
new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
@@ -269,7 +270,7 @@
new_field[-1,:,:] = 2.0*new_field[-2,:,:] - new_field[-3,:,:]
new_field[0,:,:] = 2.0*new_field[1,:,:] - new_field[2,:,:]
- if self.pf.field_info[field].take_log:
+ if finfo.take_log:
np.power(10.0, new_field, new_field)
else:
cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
diff -r 9024c7fa22dc05a28c3973faef2474a9567623f4 -r 9cd06e4233044a84bc4f8c880a4e45417816f316 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -291,7 +291,9 @@
_last_freq = (None, None)
_last_finfo = None
- def _get_field_info(self, ftype, fname):
+ def _get_field_info(self, ftype, fname = None):
+ if fname is None:
+ ftype, fname = "unknown", ftype
guessing_type = False
if ftype == "unknown" and self._last_freq[0] != None:
ftype = self._last_freq[0]
diff -r 9024c7fa22dc05a28c3973faef2474a9567623f4 -r 9cd06e4233044a84bc4f8c880a4e45417816f316 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -83,6 +83,7 @@
grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
gids = np.array([g.id for g in grids if g.Level == lvl],
dtype="int64")
+ if len(grids) == 0: continue
self.add_grids(grids)
def check_tree(self):
https://bitbucket.org/yt_analysis/yt/commits/9885388169e7/
Changeset: 9885388169e7
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 22:31:25
Summary: Update load/save octree to work with partially-covered octrees.
Affected #: 3 files
diff -r 9cd06e4233044a84bc4f8c880a4e45417816f316 -r 9885388169e70b8641d0fb347b005d232d8be1ad yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1051,7 +1051,8 @@
left_edge = self.pf.domain_left_edge,
right_edge = self.pf.domain_right_edge,
octree = self.pf.octree_mask,
- over_refine = self.pf.over_refine_factor)
+ over_refine = self.pf.over_refine_factor,
+ partial_coverage = 1)
self.oct_handler = OctreeContainer.load_octree(header)
def _identify_base_chunk(self, dobj):
@@ -1113,7 +1114,10 @@
Parameters
----------
octree_mask : np.ndarray[uint8_t]
- This is a depth-first refinement mask for an Octree.
+ This is a depth-first refinement mask for an Octree. It should be of
+ size n_octs * 8, where each item is 1 for an oct-cell being refined and
+ 0 for it not being refined. Note that for over_refine_factors != 1,
+ the children count will still be 8, so this is always 8.
data : dict
A dictionary of 1D arrays. Note that these must of the size of the
number of "False" values in the ``octree_mask``.
diff -r 9cd06e4233044a84bc4f8c880a4e45417816f316 -r 9885388169e70b8641d0fb347b005d232d8be1ad yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -120,12 +120,15 @@
cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
ref_mask = header['octree']
cdef OctreeContainer obj = cls(header['dims'], header['left_edge'],
- header['right_edge'], over_refine = header['over_refine'])
+ header['right_edge'], over_refine = header['over_refine'],
+ partial_coverage = header['partial_coverage'])
# NOTE: We do not allow domain/file indices to be specified.
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
cdef OctVisitorData data
obj.setup_data(&data, -1)
- obj.allocate_domains([ref_mask.shape[0]])
+ assert(ref_mask.shape[0] / 8.0 == <int>(ref_mask.shape[0]/8.0))
+ print ref_mask.shape[0], ref_mask.shape[0]/8.0
+ obj.allocate_domains([ref_mask.shape[0] / 8.0])
cdef int i, j, k, n
data.global_index = -1
data.level = 0
@@ -159,6 +162,7 @@
data.pos[0] = i
data.pos[1] = j
data.pos[2] = k
+ # Always visit covered
selector.recursively_visit_octs(
obj.root_mesh[i][j][k],
pos, dds, 0, oct_visitors.load_octree,
@@ -167,9 +171,10 @@
pos[1] += dds[1]
pos[0] += dds[0]
obj.nocts = cur.n_assigned
- if obj.nocts != ref_mask.size:
+ if obj.nocts * 8 != ref_mask.size:
print "SOMETHING WRONG", ref_mask.size, obj.nocts, obj.oref
- raise RuntimeError
+ raise KeyError(ref_mask.size, obj.nocts, obj.oref,
+ obj.partial_coverage)
return obj
cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
@@ -499,14 +504,16 @@
header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]),
- over_refine = self.oref)
+ over_refine = self.oref,
+ partial_coverage = self.partial_coverage)
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
# domain_id = -1 here, because we want *every* oct
cdef OctVisitorData data
self.setup_data(&data, -1)
cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
- ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+ ref_mask = np.zeros(self.nocts * 8, dtype="uint8") - 1
data.array = <void *> ref_mask.data
+ # Enforce partial_coverage here
self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
header['octree'] = ref_mask
return header
diff -r 9cd06e4233044a84bc4f8c880a4e45417816f316 -r 9885388169e70b8641d0fb347b005d232d8be1ad yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -176,15 +176,15 @@
arr[o.domain - 1] += 1
cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
- cdef np.uint8_t *arr
- if data.last != o.domain_ind:
- data.last = o.domain_ind
- arr = <np.uint8_t *> data.array
- if o.children == NULL:
- arr[data.index] = 0
- if o.children != NULL:
- arr[data.index] = 1
- data.index += 1
+ cdef np.uint8_t *arr, res, ii
+ ii = cind(data.ind[2], data.ind[1], data.ind[0])
+ arr = <np.uint8_t *> data.array
+ if o.children == NULL or o.children[ii] == NULL:
+ res = 0
+ else:
+ res = 1
+ arr[data.index] = res
+ data.index += 1
cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
cdef void **p = <void **> data.array
@@ -192,21 +192,22 @@
cdef Oct* octs = <Oct*> p[1]
cdef np.int64_t *nocts = <np.int64_t*> p[2]
cdef np.int64_t *nfinest = <np.int64_t*> p[3]
- cdef int i
-
- if data.last != o.domain_ind:
- data.last = o.domain_ind
- if arr[data.index] == 0:
- o.children = NULL
- o.file_ind = nfinest[0]
- o.domain = 1
- nfinest[0] += 1
- if arr[data.index] == 1:
+ cdef int i, ii
+ ii = cind(data.ind[2], data.ind[1], data.ind[0])
+ if arr[data.index] == 0:
+ o.children = NULL
+ o.file_ind = nfinest[0]
+ o.domain = 1
+ nfinest[0] += 1
+ elif arr[data.index] == 1:
+ if o.children == NULL:
o.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
- o.children[i] = &octs[nocts[0]]
- o.children[i].domain_ind = nocts[0]
- o.children[i].file_ind = -1
- o.children[i].domain = -1
- nocts[0] += 1
- data.index += 1
+ o.children[i] = NULL
+ o.children[ii] = &octs[nocts[0]]
+ o.children[ii].domain_ind = nocts[0]
+ o.children[ii].file_ind = -1
+ o.children[ii].domain = -1
+ o.children[ii].children = NULL
+ nocts[0] += 1
+ data.index += 1
https://bitbucket.org/yt_analysis/yt/commits/a0e03a112c73/
Changeset: a0e03a112c73
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 22:55:07
Summary: Attempting to thread partial_coverage through load_octree.
Affected #: 3 files
diff -r 9885388169e70b8641d0fb347b005d232d8be1ad -r a0e03a112c7395da8a123aaff688c135f50c4505 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1052,7 +1052,7 @@
right_edge = self.pf.domain_right_edge,
octree = self.pf.octree_mask,
over_refine = self.pf.over_refine_factor,
- partial_coverage = 1)
+ partial_coverage = self.pf.partial_coverage)
self.oct_handler = OctreeContainer.load_octree(header)
def _identify_base_chunk(self, dobj):
@@ -1102,7 +1102,7 @@
def load_octree(octree_mask, data, sim_unit_to_cm,
bbox=None, sim_time=0.0, periodicity=(True, True, True),
- over_refine_factor = 1):
+ over_refine_factor = 1, partial_coverage = 1):
r"""Load an octree mask into yt.
Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1130,6 +1130,9 @@
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
+ partial_coverage : boolean
+ Whether or not an oct can be refined cell-by-cell, or whether all 8 get
+ refined.
"""
@@ -1176,6 +1179,7 @@
spf = StreamOctreeStaticOutput(handler)
spf.octree_mask = octree_mask
+ spf.partial_coverage = partial_coverage
spf.units["cm"] = sim_unit_to_cm
spf.units['1'] = 1.0
spf.units["unitary"] = 1.0
diff -r 9885388169e70b8641d0fb347b005d232d8be1ad -r a0e03a112c7395da8a123aaff688c135f50c4505 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -127,7 +127,6 @@
cdef OctVisitorData data
obj.setup_data(&data, -1)
assert(ref_mask.shape[0] / 8.0 == <int>(ref_mask.shape[0]/8.0))
- print ref_mask.shape[0], ref_mask.shape[0]/8.0
obj.allocate_domains([ref_mask.shape[0] / 8.0])
cdef int i, j, k, n
data.global_index = -1
diff -r 9885388169e70b8641d0fb347b005d232d8be1ad -r a0e03a112c7395da8a123aaff688c135f50c4505 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -177,7 +177,7 @@
cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
cdef np.uint8_t *arr, res, ii
- ii = cind(data.ind[2], data.ind[1], data.ind[0])
+ ii = cind(data.ind[0], data.ind[1], data.ind[2])
arr = <np.uint8_t *> data.array
if o.children == NULL or o.children[ii] == NULL:
res = 0
@@ -193,7 +193,7 @@
cdef np.int64_t *nocts = <np.int64_t*> p[2]
cdef np.int64_t *nfinest = <np.int64_t*> p[3]
cdef int i, ii
- ii = cind(data.ind[2], data.ind[1], data.ind[0])
+ ii = cind(data.ind[0], data.ind[1], data.ind[2])
if arr[data.index] == 0:
o.children = NULL
o.file_ind = nfinest[0]
https://bitbucket.org/yt_analysis/yt/commits/210d9fd86ecd/
Changeset: 210d9fd86ecd
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 23:16:20
Summary: Fixing remaining issue. oref==0 does not currently work.
Affected #: 1 file
diff -r a0e03a112c7395da8a123aaff688c135f50c4505 -r 210d9fd86ecd652ed5fd034008ccab1cca0bec36 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -195,10 +195,13 @@
cdef int i, ii
ii = cind(data.ind[0], data.ind[1], data.ind[2])
if arr[data.index] == 0:
- o.children = NULL
- o.file_ind = nfinest[0]
- o.domain = 1
- nfinest[0] += 1
+ # We only want to do this once. Otherwise we end up with way too many
+ # nfinest for our tastes.
+ if o.file_ind == -1:
+ o.children = NULL
+ o.file_ind = nfinest[0]
+ o.domain = 1
+ nfinest[0] += 1
elif arr[data.index] == 1:
if o.children == NULL:
o.children = <Oct **> malloc(sizeof(Oct *) * 8)
https://bitbucket.org/yt_analysis/yt/commits/dd2c20258cd1/
Changeset: dd2c20258cd1
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 23:29:14
Summary: By setting oref and nz to 1 and 8 we visit all child *octs*.
Affected #: 1 file
diff -r 210d9fd86ecd652ed5fd034008ccab1cca0bec36 -r dd2c20258cd1f2a55d587349f666ef499ace72aa yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -131,6 +131,9 @@
cdef int i, j, k, n
data.global_index = -1
data.level = 0
+ # This is not something I terribly like, but it needs to be done.
+ data.oref = 1
+ data.nz = 8
cdef np.float64_t pos[3], dds[3]
# This dds is the oct-width
for i in range(3):
@@ -509,6 +512,8 @@
# domain_id = -1 here, because we want *every* oct
cdef OctVisitorData data
self.setup_data(&data, -1)
+ data.oref = 1
+ data.nz = 8
cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
ref_mask = np.zeros(self.nocts * 8, dtype="uint8") - 1
data.array = <void *> ref_mask.data
https://bitbucket.org/yt_analysis/yt/commits/a95df1ddb227/
Changeset: a95df1ddb227
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-24 23:47:54
Summary: This fixes a number of issues with oref == 0.
Among other things, particle deposition for oref == 0 now works correctly.
Affected #: 1 file
diff -r dd2c20258cd1f2a55d587349f666ef499ace72aa -r a95df1ddb227309f6d75bcd817be28d377119cbe yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -308,8 +308,9 @@
else:
next = NULL
if oinfo == NULL: return cur
+ cdef int ncells = (1 << self.oref)
cdef np.float64_t factor = 1.0 / (1 << (self.oref-1))
- if self.oref == 0: factor = 1.0
+ if self.oref == 0: factor = 2.0
for i in range(3):
# This will happen *after* we quit out, so we need to back out the
# last change to cp
@@ -319,13 +320,11 @@
cp[i] += dds[i]/2.0
# We don't normally need to change dds[i] as it has been halved
# from the oct width, thus making it already the cell width.
- # But, for some cases where the oref != 1, this needs to be
- # changed.
+ # But, since not everything has the cell width equal to have the
+ # width of the oct, we need to apply "factor".
oinfo.dds[i] = dds[i] * factor # Cell width
- oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
oinfo.ipos[i] = ipos[i]
- if self.oref == 0:
- oinfo.dds[i] = dds[i] # Same here as elsewhere
+ oinfo.left_edge[i] = oinfo.ipos[i] * (oinfo.dds[i] * ncells) + self.DLE[i]
oinfo.level = level
return cur
https://bitbucket.org/yt_analysis/yt/commits/d6fb32282181/
Changeset: d6fb32282181
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-25 00:47:01
Summary: Fixing the handling of the normal keyword in the new SlicePlot factory function.
Affected #: 1 file
diff -r 788e19ee51e682974a4d576f0e0912611c92d454 -r d6fb3228218105fccef16f36c4c8b7f9d8c9748d yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2123,7 +2123,10 @@
if iterable(normal) and not isinstance(normal,str):
normal = np.array(normal)
np.divide(normal, np.dot(normal,normal), normal)
- if np.count_nonzero(normal) == 1:
+ elif isinstance(normal, str):
+ assert normal in ('x', 'y', 'z'), \
+ "The normal axis must be 'x', 'y', or 'z'"
+ elif np.count_nonzero(normal) == 1:
normal = ("x","y","z")[np.nonzero(normal)[0][0]]
# by now the normal should be properly set to get either a On/Off Axis plot
https://bitbucket.org/yt_analysis/yt/commits/efc7c71f4b7b/
Changeset: efc7c71f4b7b
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-25 03:40:26
Summary: Fixing the logic a bit.
Affected #: 1 file
diff -r d6fb3228218105fccef16f36c4c8b7f9d8c9748d -r efc7c71f4b7b656280a82234ecf43e7e800476a5 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2121,14 +2121,12 @@
# use an AxisAlignedSlicePlot where possible, e.g.:
# maybe someone passed normal=[0,0,0.2] when they should have just used "z"
if iterable(normal) and not isinstance(normal,str):
- normal = np.array(normal)
- np.divide(normal, np.dot(normal,normal), normal)
- elif isinstance(normal, str):
- assert normal in ('x', 'y', 'z'), \
- "The normal axis must be 'x', 'y', or 'z'"
- elif np.count_nonzero(normal) == 1:
- normal = ("x","y","z")[np.nonzero(normal)[0][0]]
-
+ if np.count_nonzero(normal) == 1:
+ normal = ("x","y","z")[np.nonzero(normal)[0][0]]
+ else:
+ normal = np.array(normal)
+ np.divide(normal, np.dot(normal,normal), normal)
+
# by now the normal should be properly set to get either a On/Off Axis plot
if iterable(normal) and not isinstance(normal,str):
# OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
https://bitbucket.org/yt_analysis/yt/commits/69034824f51e/
Changeset: 69034824f51e
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-25 16:59:17
Summary: Merged in ngoldbaum/yt-3.0 (pull request #125)
Fixing the handling of the normal keyword in the new SlicePlot factory function.
Affected #: 1 file
diff -r 9cd06e4233044a84bc4f8c880a4e45417816f316 -r 69034824f51e8ff3ac076e0ee3ded333c534d4ad yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -2121,11 +2121,12 @@
# use an AxisAlignedSlicePlot where possible, e.g.:
# maybe someone passed normal=[0,0,0.2] when they should have just used "z"
if iterable(normal) and not isinstance(normal,str):
- normal = np.array(normal)
- np.divide(normal, np.dot(normal,normal), normal)
- if np.count_nonzero(normal) == 1:
- normal = ("x","y","z")[np.nonzero(normal)[0][0]]
-
+ if np.count_nonzero(normal) == 1:
+ normal = ("x","y","z")[np.nonzero(normal)[0][0]]
+ else:
+ normal = np.array(normal)
+ np.divide(normal, np.dot(normal,normal), normal)
+
# by now the normal should be properly set to get either a On/Off Axis plot
if iterable(normal) and not isinstance(normal,str):
# OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
https://bitbucket.org/yt_analysis/yt/commits/d927877e4039/
Changeset: d927877e4039
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-25 17:59:18
Summary: This fixes detecting all added fields.
Affected #: 1 file
diff -r 69034824f51e8ff3ac076e0ee3ded333c534d4ad -r d927877e40391cb0f50d581a3e74f941482c333f yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -248,7 +248,7 @@
fi = self.parameter_file.field_info
# First we construct our list of fields to check
fields_to_check = []
- for field in fi.keys():
+ for field in fi:
finfo = fi[field]
# Explicitly defined
if isinstance(field, tuple):
https://bitbucket.org/yt_analysis/yt/commits/d9d68aac9f0c/
Changeset: d9d68aac9f0c
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-25 18:46:25
Summary: This adds a second derived field check after particle types have been created.
What this addresses is when a baryon field and a particle field are combined.
Currently the ordering of field construction makes one available and the other
not available. See #601 for more details.
I would prefer to see the solution be to re-generate dependencies as needed,
but we are not yet able to do that.
Affected #: 4 files
diff -r d927877e40391cb0f50d581a3e74f941482c333f -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -375,7 +375,7 @@
convert_function=_convertCellMassCode)
def _TotalMass(field,data):
- return (data["gas","Density"]+data[("deposit", "particle_density")]) * \
+ return (data["gas","Density"]+data[("deposit", "all_density")]) * \
data["CellVolume"]
add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
add_field("TotalMassMsun", units=r"M_{\odot}",
diff -r d927877e40391cb0f50d581a3e74f941482c333f -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -150,7 +150,6 @@
if fid: fid.close()
return rv
-
class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
_data_style = "enzo_packed_3d_gz"
diff -r d927877e40391cb0f50d581a3e74f941482c333f -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -83,6 +83,9 @@
mylog.debug("Setting up particle fields")
self._setup_particle_types()
+ mylog.debug("Checking derived fields again")
+ self._derived_fields_add(self._check_later)
+
def __del__(self):
if self._data_file is not None:
self._data_file.close()
@@ -110,6 +113,7 @@
self._data_mode = None
self._max_locations = {}
self.num_grids = None
+ self._check_later = []
def _setup_classes(self, dd):
# Called by subclass
@@ -273,10 +277,12 @@
if fields_to_check is None:
fields_to_check = []
fi = self.parameter_file.field_info
+ self._check_later = []
for field in fields_to_check:
try:
fd = fi[field].get_dependencies(pf = self.parameter_file)
except Exception as e:
+ self._check_later.append(field)
if type(e) != YTFieldNotFound:
mylog.debug("Raises %s during field %s detection.",
str(type(e)), field)
diff -r d927877e40391cb0f50d581a3e74f941482c333f -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -114,7 +114,7 @@
return None
def _read_chunk_data(self, chunk, fields):
- return None
+ return {}
def _read_particle_selection(self, chunks, selector, fields):
rv = {}
https://bitbucket.org/yt_analysis/yt/commits/9bc960be45cb/
Changeset: 9bc960be45cb
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-25 19:32:15
Summary: Merged in MatthewTurk/yt-3.0 (pull request #124)
Fixes for loading and saving of octrees and oref == 0
Affected #: 3 files
diff -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 -r 9bc960be45cb0664d924523552460752a67c5325 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1051,7 +1051,8 @@
left_edge = self.pf.domain_left_edge,
right_edge = self.pf.domain_right_edge,
octree = self.pf.octree_mask,
- over_refine = self.pf.over_refine_factor)
+ over_refine = self.pf.over_refine_factor,
+ partial_coverage = self.pf.partial_coverage)
self.oct_handler = OctreeContainer.load_octree(header)
def _identify_base_chunk(self, dobj):
@@ -1101,7 +1102,7 @@
def load_octree(octree_mask, data, sim_unit_to_cm,
bbox=None, sim_time=0.0, periodicity=(True, True, True),
- over_refine_factor = 1):
+ over_refine_factor = 1, partial_coverage = 1):
r"""Load an octree mask into yt.
Octrees can be saved out by calling save_octree on an OctreeContainer.
@@ -1113,7 +1114,10 @@
Parameters
----------
octree_mask : np.ndarray[uint8_t]
- This is a depth-first refinement mask for an Octree.
+ This is a depth-first refinement mask for an Octree. It should be of
+ size n_octs * 8, where each item is 1 for an oct-cell being refined and
+ 0 for it not being refined. Note that for over_refine_factors != 1,
+ the children count will still be 8, so this is always 8.
data : dict
A dictionary of 1D arrays. Note that these must of the size of the
number of "False" values in the ``octree_mask``.
@@ -1126,6 +1130,9 @@
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
+ partial_coverage : boolean
+ Whether or not an oct can be refined cell-by-cell, or whether all 8 get
+ refined.
"""
@@ -1172,6 +1179,7 @@
spf = StreamOctreeStaticOutput(handler)
spf.octree_mask = octree_mask
+ spf.partial_coverage = partial_coverage
spf.units["cm"] = sim_unit_to_cm
spf.units['1'] = 1.0
spf.units["unitary"] = 1.0
diff -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 -r 9bc960be45cb0664d924523552460752a67c5325 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -120,15 +120,20 @@
cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
ref_mask = header['octree']
cdef OctreeContainer obj = cls(header['dims'], header['left_edge'],
- header['right_edge'], over_refine = header['over_refine'])
+ header['right_edge'], over_refine = header['over_refine'],
+ partial_coverage = header['partial_coverage'])
# NOTE: We do not allow domain/file indices to be specified.
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
cdef OctVisitorData data
obj.setup_data(&data, -1)
- obj.allocate_domains([ref_mask.shape[0]])
+ assert(ref_mask.shape[0] / 8.0 == <int>(ref_mask.shape[0]/8.0))
+ obj.allocate_domains([ref_mask.shape[0] / 8.0])
cdef int i, j, k, n
data.global_index = -1
data.level = 0
+ # This is not something I terribly like, but it needs to be done.
+ data.oref = 1
+ data.nz = 8
cdef np.float64_t pos[3], dds[3]
# This dds is the oct-width
for i in range(3):
@@ -159,6 +164,7 @@
data.pos[0] = i
data.pos[1] = j
data.pos[2] = k
+ # Always visit covered
selector.recursively_visit_octs(
obj.root_mesh[i][j][k],
pos, dds, 0, oct_visitors.load_octree,
@@ -167,9 +173,10 @@
pos[1] += dds[1]
pos[0] += dds[0]
obj.nocts = cur.n_assigned
- if obj.nocts != ref_mask.size:
+ if obj.nocts * 8 != ref_mask.size:
print "SOMETHING WRONG", ref_mask.size, obj.nocts, obj.oref
- raise RuntimeError
+ raise KeyError(ref_mask.size, obj.nocts, obj.oref,
+ obj.partial_coverage)
return obj
cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
@@ -301,8 +308,9 @@
else:
next = NULL
if oinfo == NULL: return cur
+ cdef int ncells = (1 << self.oref)
cdef np.float64_t factor = 1.0 / (1 << (self.oref-1))
- if self.oref == 0: factor = 1.0
+ if self.oref == 0: factor = 2.0
for i in range(3):
# This will happen *after* we quit out, so we need to back out the
# last change to cp
@@ -312,13 +320,11 @@
cp[i] += dds[i]/2.0
# We don't normally need to change dds[i] as it has been halved
# from the oct width, thus making it already the cell width.
- # But, for some cases where the oref != 1, this needs to be
- # changed.
+ # But, since not everything has the cell width equal to have the
+ # width of the oct, we need to apply "factor".
oinfo.dds[i] = dds[i] * factor # Cell width
- oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
oinfo.ipos[i] = ipos[i]
- if self.oref == 0:
- oinfo.dds[i] = dds[i] # Same here as elsewhere
+ oinfo.left_edge[i] = oinfo.ipos[i] * (oinfo.dds[i] * ncells) + self.DLE[i]
oinfo.level = level
return cur
@@ -499,14 +505,18 @@
header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]),
- over_refine = self.oref)
+ over_refine = self.oref,
+ partial_coverage = self.partial_coverage)
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
# domain_id = -1 here, because we want *every* oct
cdef OctVisitorData data
self.setup_data(&data, -1)
+ data.oref = 1
+ data.nz = 8
cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
- ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+ ref_mask = np.zeros(self.nocts * 8, dtype="uint8") - 1
data.array = <void *> ref_mask.data
+ # Enforce partial_coverage here
self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
header['octree'] = ref_mask
return header
diff -r d9d68aac9f0c49a052b6d8285ae4b4cebe37c882 -r 9bc960be45cb0664d924523552460752a67c5325 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -176,15 +176,15 @@
arr[o.domain - 1] += 1
cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
- cdef np.uint8_t *arr
- if data.last != o.domain_ind:
- data.last = o.domain_ind
- arr = <np.uint8_t *> data.array
- if o.children == NULL:
- arr[data.index] = 0
- if o.children != NULL:
- arr[data.index] = 1
- data.index += 1
+ cdef np.uint8_t *arr, res, ii
+ ii = cind(data.ind[0], data.ind[1], data.ind[2])
+ arr = <np.uint8_t *> data.array
+ if o.children == NULL or o.children[ii] == NULL:
+ res = 0
+ else:
+ res = 1
+ arr[data.index] = res
+ data.index += 1
cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
cdef void **p = <void **> data.array
@@ -192,21 +192,25 @@
cdef Oct* octs = <Oct*> p[1]
cdef np.int64_t *nocts = <np.int64_t*> p[2]
cdef np.int64_t *nfinest = <np.int64_t*> p[3]
- cdef int i
-
- if data.last != o.domain_ind:
- data.last = o.domain_ind
- if arr[data.index] == 0:
+ cdef int i, ii
+ ii = cind(data.ind[0], data.ind[1], data.ind[2])
+ if arr[data.index] == 0:
+ # We only want to do this once. Otherwise we end up with way too many
+ # nfinest for our tastes.
+ if o.file_ind == -1:
o.children = NULL
o.file_ind = nfinest[0]
o.domain = 1
nfinest[0] += 1
- if arr[data.index] == 1:
+ elif arr[data.index] == 1:
+ if o.children == NULL:
o.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
- o.children[i] = &octs[nocts[0]]
- o.children[i].domain_ind = nocts[0]
- o.children[i].file_ind = -1
- o.children[i].domain = -1
- nocts[0] += 1
- data.index += 1
+ o.children[i] = NULL
+ o.children[ii] = &octs[nocts[0]]
+ o.children[ii].domain_ind = nocts[0]
+ o.children[ii].file_ind = -1
+ o.children[ii].domain = -1
+ o.children[ii].children = NULL
+ nocts[0] += 1
+ data.index += 1
https://bitbucket.org/yt_analysis/yt/commits/1773e02166ca/
Changeset: 1773e02166ca
Branch: yt-3.0
User: scopatz
Date: 2013-10-29 02:02:55
Summary: fixed up PyneMoabHex8 to use updated pyne mesh tagging capabilities.
Affected #: 2 files
diff -r 9bc960be45cb0664d924523552460752a67c5325 -r 1773e02166caa9bf1042c029325bc761cc0a60d9 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -157,11 +157,12 @@
#self.field_list = self.pyne_mesh.mesh.getAllTags(
# self.pyne_mesh.mesh.rootSet)
# So we have to look at each entity.
- tags = set([])
- for ent in self.pyne_mesh.mesh.rootSet:
- for tag in self.pyne_mesh.mesh.getAllTags(ent):
- tags.add(tag.name)
- self.field_list = list(tags)
+ #tags = set([])
+ #for ent in self.pyne_mesh.mesh.rootSet:
+ # for tag in self.pyne_mesh.mesh.getAllTags(ent):
+ # tags.add(tag.name)
+ #self.field_list = list(tags)
+ self.field_list = self.pyne_mesh.tags.keys()
def _count_grids(self):
self.num_grids = 1
diff -r 9bc960be45cb0664d924523552460752a67c5325 -r 1773e02166caa9bf1042c029325bc761cc0a60d9 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -74,3 +74,28 @@
for g in chunk.objs:
ind += g.select(selector, ds, rv[field], ind) # caches
return rv
+
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ chunks = list(chunks)
+ assert(len(chunks) == 1)
+ tags = {}
+ rv = {}
+ pyne_mesh = self.pf.pyne_mesh
+ mesh = pyne_mesh.mesh
+ for field in fields:
+ rv[field] = np.empty(size, dtype="float64")
+ from itaps import iBase
+ ve_idx_tag = mesh.getTagHandle('ve_idx')
+ ents = pyne_mesh.structured_set.getEntities(iBase.Type.region)
+ ve_idx = ve_idx_tag[ents]
+ ngrids = sum(len(chunk.objs) for chunk in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s blocks",
+ size, [fname for ftype, fname in fields], ngrids)
+ for field in fields:
+ ftype, fname = field
+ ds = np.asarray(getattr(pyne_mesh, fname)[ve_idx], 'float64')
+ ind = 0
+ for chunk in chunks:
+ for g in chunk.objs:
+ ind += g.select(selector, ds, rv[field], ind) # caches
+ return rv
https://bitbucket.org/yt_analysis/yt/commits/62e9d46abd61/
Changeset: 62e9d46abd61
Branch: yt-3.0
User: scopatz
Date: 2013-10-29 02:54:24
Summary: rm'd some commented and extraneous code
Affected #: 2 files
diff -r 1773e02166caa9bf1042c029325bc761cc0a60d9 -r 62e9d46abd61ee4ff02a8a33f6b9b8bf34da7cd9 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -152,16 +152,6 @@
vind, coords, self)]
def _detect_fields(self):
- # Currently, I don't know a better way to do this. This code, for
- # example, does not work:
- #self.field_list = self.pyne_mesh.mesh.getAllTags(
- # self.pyne_mesh.mesh.rootSet)
- # So we have to look at each entity.
- #tags = set([])
- #for ent in self.pyne_mesh.mesh.rootSet:
- # for tag in self.pyne_mesh.mesh.getAllTags(ent):
- # tags.add(tag.name)
- #self.field_list = list(tags)
self.field_list = self.pyne_mesh.tags.keys()
def _count_grids(self):
diff -r 1773e02166caa9bf1042c029325bc761cc0a60d9 -r 62e9d46abd61ee4ff02a8a33f6b9b8bf34da7cd9 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -55,31 +55,6 @@
assert(len(chunks) == 1)
tags = {}
rv = {}
- mesh = self.pf.pyne_mesh.mesh
- for field in fields:
- ftype, fname = field
- rv[field] = np.empty(size, dtype="float64")
- tags[field] = mesh.getTagHandle(fname)
- from itaps import iBase
- ents = self.pf.pyne_mesh.structured_set.getEntities(
- iBase.Type.region)
- ngrids = sum(len(chunk.objs) for chunk in chunks)
- mylog.debug("Reading %s cells of %s fields in %s blocks",
- size, [fname for ftype, fname in fields], ngrids)
- for field in fields:
- ftype, fname = field
- ds = tags[field][ents]
- ind = 0
- for chunk in chunks:
- for g in chunk.objs:
- ind += g.select(selector, ds, rv[field], ind) # caches
- return rv
-
- def _read_fluid_selection(self, chunks, selector, fields, size):
- chunks = list(chunks)
- assert(len(chunks) == 1)
- tags = {}
- rv = {}
pyne_mesh = self.pf.pyne_mesh
mesh = pyne_mesh.mesh
for field in fields:
https://bitbucket.org/yt_analysis/yt/commits/75ffc619a1d7/
Changeset: 75ffc619a1d7
Branch: yt-3.0
User: scopatz
Date: 2013-10-29 03:31:50
Summary: further improvements to make field access faster
Affected #: 2 files
diff -r 62e9d46abd61ee4ff02a8a33f6b9b8bf34da7cd9 -r 75ffc619a1d7bee7cbfbe795e62107e4485e9a6f yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -141,9 +141,9 @@
def _initialize_mesh(self):
from itaps import iBase, iMesh
- ent = self.pyne_mesh.structured_set.getEntities(iBase.Type.vertex)
- coords = self.pyne_mesh.mesh.getVtxCoords(ent).astype("float64")
- vind = self.pyne_mesh.structured_set.getAdjEntIndices(
+ ents = self.pyne_mesh.mesh.rootSet.getEntities(iBase.Type.vertex)
+ coords = self.pyne_mesh.mesh.getVtxCoords(ents).astype("float64")
+ vind = self.pyne_mesh.mesh.rootSet.getAdjEntIndices(
iBase.Type.region, iMesh.Topology.hexahedron,
iBase.Type.vertex)[1].indices.data.astype("int64")
# Divide by float so it throws an error if it's not 8
@@ -192,8 +192,8 @@
def _parse_parameter_file(self):
from itaps import iBase
- ent = self.pyne_mesh.structured_set.getEntities(iBase.Type.vertex)
- coords = self.pyne_mesh.mesh.getVtxCoords(ent)
+ ents = self.pyne_mesh.mesh.rootSet.getEntities(iBase.Type.vertex)
+ coords = self.pyne_mesh.mesh.getVtxCoords(ents)
self.domain_left_edge = coords[0]
self.domain_right_edge = coords[-1]
self.domain_dimensions = self.domain_right_edge - self.domain_left_edge
diff -r 62e9d46abd61ee4ff02a8a33f6b9b8bf34da7cd9 -r 75ffc619a1d7bee7cbfbe795e62107e4485e9a6f yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -59,16 +59,12 @@
mesh = pyne_mesh.mesh
for field in fields:
rv[field] = np.empty(size, dtype="float64")
- from itaps import iBase
- ve_idx_tag = mesh.getTagHandle('ve_idx')
- ents = pyne_mesh.structured_set.getEntities(iBase.Type.region)
- ve_idx = ve_idx_tag[ents]
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fname for ftype, fname in fields], ngrids)
for field in fields:
ftype, fname = field
- ds = np.asarray(getattr(pyne_mesh, fname)[ve_idx], 'float64')
+ ds = np.asarray(getattr(pyne_mesh, fname)[:], 'float64')
ind = 0
for chunk in chunks:
for g in chunk.objs:
https://bitbucket.org/yt_analysis/yt/commits/0ca385183a06/
Changeset: 0ca385183a06
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-30 18:01:45
Summary: This AssertionError can mask other frontends, and other errors.
Affected #: 1 file
diff -r 75ffc619a1d7bee7cbfbe795e62107e4485e9a6f -r 0ca385183a06b5fd4057a139ccc7e29db348124d yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -426,7 +426,7 @@
try:
amr_header_vals = read_attrs(fh, amr_header_struct, '>')
return True
- except AssertionError:
+ except:
return False
return False
https://bitbucket.org/yt_analysis/yt/commits/c225d6258e00/
Changeset: c225d6258e00
Branch: yt-3.0
User: MatthewTurk
Date: 2013-11-01 17:02:34
Summary: Merging
Affected #: 1 file
diff -r 0ca385183a06b5fd4057a139ccc7e29db348124d -r c225d6258e00fa8b7bd86d16412f59d36f09a2b7 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1124,106 +1124,6 @@
def __getitem__(self, key):
return self._groups[key]
- def nearest_neighbors_3D(self, haloID, num_neighbors=7, search_radius=.2):
- r"""For a halo its nearest neighbors in 3D using the kd tree.
-
- This will calculate the nearest neighbors of a halo, using the kD tree.
- Returns a list of the neighbors distances and ID with format
- [distance,haloID].
-
- Parameters
- ----------
- haloID : integer
- The halo to find neighbors for.
- num_neighbors : integer
- How many neighbors to search for. Default = 7.
- search_radius : float
- How far away to look for neighbors in code units. Default = 0.2.
-
- Examples
- --------
- >>> neighbors = halos.nearest_neighbors_3D(0)
- """
- period = self.pf.domain_right_edge - self.pf.domain_left_edge
- # Initialize the dataset of points from all the haloes
- dataset = []
- for group in self:
- p = Point()
- p.data = group.center_of_mass().tolist()
- p.haloID = group.id
- dataset.append(p)
- mylog.info('Building kd tree...')
- kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
- # make the neighbors object
- neighbors = Neighbors()
- neighbors.k = num_neighbors
- neighbors.points = []
- neighbors.minDistanceSquared = search_radius * search_radius
- mylog.info('Finding nearest neighbors...')
- getKNN(self[haloID].center_of_mass().tolist(), kd, neighbors, 0.,
- period.tolist())
- # convert the data in order to return something less perverse than a
- # Neighbors object, also root the distances
- n_points = []
- for n in neighbors.points:
- n_points.append([math.sqrt(n[0]), n[1].haloID])
- return n_points
-
- def nearest_neighbors_2D(self, haloID, num_neighbors=7, search_radius=.2,
- proj_dim=0):
- r"""For a halo its nearest neighbors in 2D using the kd tree.
-
- This will strip a dimension from consideration in the kD-tree, and then
- calculate all the nearest projected neighbors of a halo. Returns a
- list of the neighbors distances and ID with format [distance,haloID].
-
- Parameters
- ----------
- haloID : int
- The halo to find neighbors for.
- num_neighbors : int
- How many neighbors to search for. Default = 7.
- search_radius : float
- How far away to look for neighbors in code units. Default = 0.2.
- proj_dim : int
- Which dimension (0, 1, or 2) to project the halos into 2D.
- Default = 0.
-
- Examples
- --------
- >>> neighbors = halos.nearest_neighbors_2D(0)
- """
- # Set up a vector to multiply other
- # vectors by to project along proj_dim
- vec = np.array([1., 1., 1.])
- vec[proj_dim] = 0.
- period = self.pf.domain_right_edge - self.pf.domain_left_edge
- period = period * vec
- # Initialize the dataset of points from all the haloes
- dataset = []
- for group in self:
- p = Point()
- cm = group.center_of_mass() * vec
- p.data = cm.tolist()
- p.haloID = group.id
- dataset.append(p)
- mylog.info('Building kd tree...')
- kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
- # make the neighbors object
- neighbors = Neighbors()
- neighbors.k = num_neighbors
- neighbors.points = []
- neighbors.minDistanceSquared = search_radius * search_radius
- mylog.info('Finding nearest neighbors...')
- cm = self[haloID].center_of_mass() * vec
- getKNN(cm.tolist(), kd, neighbors, 0., period.tolist())
- # convert the data in order to return something less perverse than a
- # Neighbors object, also root the distances
- n_points = []
- for n in neighbors.points:
- n_points.append([math.sqrt(n[0]), n[1].haloID])
- return n_points
-
def write_out(self, filename, ellipsoid_data=False):
r"""Write out standard halo information to a text file.
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list