[Yt-svn] commit/yt: 2 new changesets
Bitbucket
commits-noreply at bitbucket.org
Fri May 27 08:17:59 PDT 2011
2 new changesets in yt:
http://bitbucket.org/yt_analysis/yt/changeset/5b1d7160e413/
changeset: 5b1d7160e413
branches:
user: MatthewTurk
date: 2011-05-27 17:15:47
summary: Fixes and improvements to the Stream IO system.
affected #: 2 files (546 bytes)
--- a/yt/frontends/stream/api.py Fri May 27 10:20:39 2011 -0400
+++ b/yt/frontends/stream/api.py Fri May 27 11:15:47 2011 -0400
@@ -27,7 +27,8 @@
from .data_structures import \
StreamGrid, \
StreamHierarchy, \
- StreamStaticOutput
+ StreamStaticOutput, \
+ StreamHandler
from .fields import \
StreamFieldContainer, \
--- a/yt/frontends/stream/data_structures.py Fri May 27 10:20:39 2011 -0400
+++ b/yt/frontends/stream/data_structures.py Fri May 27 11:15:47 2011 -0400
@@ -26,6 +26,7 @@
import weakref
import numpy as na
+from yt.utilities.io_handler import io_registry
from yt.funcs import *
from yt.config import ytcfg
from yt.data_objects.grid_patch import \
@@ -89,7 +90,7 @@
return [self.hierarchy.grids[cid - self._id_offset]
for cid in self._children_ids]
-class StreamHandlers(object):
+class StreamHandler(object):
def __init__(self, left_edges, right_edges, dimensions,
levels, parent_ids, particle_count, processor_ids,
fields):
@@ -104,8 +105,7 @@
self.fields = fields
def get_fields(self):
- field_list = set()
- for fl in self.fields.values(): field_list.update(fl.keys())
+ return self.fields.all_fields
class StreamHierarchy(AMRHierarchy):
@@ -115,7 +115,7 @@
self.data_style = data_style
self.float_type = 'float64'
self.parameter_file = weakref.proxy(pf) # for _obtain_enzo
- self.stream_handlers = pf.stream_handlers
+ self.stream_handler = pf.stream_handler
self.float_type = "float64"
self.directory = os.getcwd()
AMRHierarchy.__init__(self, pf, data_style)
@@ -124,17 +124,20 @@
pass
def _count_grids(self):
- self.num_grids = self.stream_handlers.num_grids
+ self.num_grids = self.stream_handler.num_grids
+
+ def _setup_unknown_fields(self):
+ pass
def _parse_hierarchy(self):
- self.grid_dimensions = self.stream_handlers.dimensions
- self.grid_left_edge[:] = self.stream_handlers.left_edges
- self.grid_right_edge[:] = self.stream_handlers.right_edges
- self.grid_levels[:] = self.stream_handlers.levels
- self.grid_procs = self.stream_handlers.processor_ids
- self.grid_particle_count[:] = self.stream_handlers.particle_count
+ self.grid_dimensions = self.stream_handler.dimensions
+ self.grid_left_edge[:] = self.stream_handler.left_edges
+ self.grid_right_edge[:] = self.stream_handler.right_edges
+ self.grid_levels[:] = self.stream_handler.levels
+ self.grid_procs = self.stream_handler.processor_ids
+ self.grid_particle_count[:] = self.stream_handler.particle_count
mylog.debug("Copying reverse tree")
- reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
+ reverse_tree = self.stream_handler.parent_ids.tolist()
# Initial setup:
mylog.debug("Reconstructing parent-child relationships")
self.grids = []
@@ -152,12 +155,12 @@
if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = None
grid._prepare_grid()
- grid.proc_num = self.grid_procs[i,0]
+ grid.proc_num = self.grid_procs[i]
self.grids = na.array(self.grids, dtype='object')
mylog.debug("Prepared")
def _initialize_grid_arrays(self):
- EnzoHierarchy._initialize_grid_arrays(self)
+ AMRHierarchy._initialize_grid_arrays(self)
self.grid_procs = na.zeros((self.num_grids,1),'int32')
def save_data(self, *args, **kwargs):
@@ -180,8 +183,22 @@
if field not in self.derived_field_list:
self.derived_field_list.append(field)
+ def _setup_classes(self):
+ dd = self._get_data_reader_dict()
+ AMRHierarchy._setup_classes(self, dd)
+ self.object_types.sort()
+
+ def _populate_grid_objects(self):
+ for g in self.grids:
+ g._setup_dx()
+ self.max_level = self.grid_levels.max()
+
+ def _setup_data_io(self):
+ self.io = io_registry[self.data_style](self.stream_handler)
+
class StreamStaticOutput(StaticOutput):
_hierarchy_class = StreamHierarchy
+ _fieldinfo_class = StreamFieldContainer
_data_style = 'stream'
def __init__(self, stream_handler):
@@ -190,6 +207,7 @@
#if conversion_override is None: conversion_override = {}
#self._conversion_override = conversion_override
+ self.stream_handler = stream_handler
StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
self.field_info = self._fieldinfo_class()
@@ -203,7 +221,7 @@
self.refine_by = self.stream_handler.refine_by
self.dimensionality = self.stream_handler.dimensionality
self.domain_dimensions = self.stream_handler.domain_dimensions
- self.current_time = self.stream_handlers.simulation_time
+ self.current_time = self.stream_handler.simulation_time
if self.stream_handler.cosmology_simulation:
self.cosmological_simulation = 1
self.current_redshift = self.stream_handler.current_redshift
@@ -214,6 +232,9 @@
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
+ def _set_units(self):
+ pass
+
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
http://bitbucket.org/yt_analysis/yt/changeset/bf1985f0e256/
changeset: bf1985f0e256
branches:
user: MatthewTurk
date: 2011-05-27 17:16:09
summary: Merging
affected #: 1 file (18 bytes)
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py Fri May 27 11:15:47 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py Fri May 27 11:16:09 2011 -0400
@@ -610,7 +610,7 @@
added_points = False
@parallel_blocking_call
- def write_out_means(self):
+ def write_out_means(self, fn = "%s.txt"):
r"""Writes out the weighted-average value for each function for
each dimension for each ruler length to a text file. The data is written
to files of the name 'function_name.txt' in the current working
@@ -621,7 +621,7 @@
>>> tpf.write_out_means()
"""
for fset in self._fsets:
- fp = self._write_on_root("%s.txt" % fset.function.__name__)
+ fp = self._write_on_root(fn % fset.function.__name__)
fset._avg_bin_hits()
line = "# length".ljust(sep)
line += "count".ljust(sep)
@@ -643,7 +643,7 @@
fp.close()
@parallel_root_only
- def write_out_arrays(self):
+ def write_out_arrays(self, fn = "%s.h5"):
r"""Writes out the raw probability bins and the bin edges to an HDF5 file
for each of the functions. The files are named
'function_name.txt' and saved in the current working directory.
@@ -654,7 +654,7 @@
"""
if self.mine == 0:
for fset in self._fsets:
- f = h5py.File("%s.h5" % fset.function.__name__, "w")
+ f = h5py.File(fn % fset.function.__name__, "w")
bin_names = []
prob_names = []
bin_counts = []
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list