[yt-svn] commit/yt: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Dec 5 07:37:42 PST 2013
5 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/57b8f420f120/
Changeset: 57b8f420f120
Branch: yt-3.0
User: jzuhone
Date: 2013-12-04 22:20:56
Summary: Need to make this work for 2/3D datasets.
Affected #: 1 file
diff -r c851d95ab9862cf24c761044953f657840b1c4fb -r 57b8f420f120c4b0f6b753e18dcb11d00d6dadd6 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -186,7 +186,7 @@
dims = np.array(self.shape)
ndims = len(dims)
self.wcs.wcs.crpix = 0.5*(dims+1)
- self.wcs.wcs.cdelt = [1.,1.]
+ self.wcs.wcs.cdelt = [1.]*ndims
self.wcs.wcs.crval = 0.5*(dims+1)
self.wcs.wcs.cunit = ["pixel"]*ndims
self.wcs.wcs.ctype = ["LINEAR"]*ndims
https://bitbucket.org/yt_analysis/yt/commits/80931cca9b95/
Changeset: 80931cca9b95
Branch: yt-3.0
User: jzuhone
Date: 2013-12-04 23:23:06
Summary: Getting the Athena frontend up and running on 3.0.
Affected #: 3 files
diff -r 57b8f420f120c4b0f6b753e18dcb11d00d6dadd6 -r 80931cca9b9583718cb7022db7e6d8c44d149dea yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -26,6 +26,8 @@
StaticOutput
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+ get_box_grids_level
from .fields import AthenaFieldInfo, KnownAthenaFields
from yt.data_objects.field_info_container import \
@@ -109,7 +111,7 @@
self.hierarchy_filename = self.parameter_file.filename
#self.directory = os.path.dirname(self.hierarchy_filename)
self._fhandle = file(self.hierarchy_filename,'rb')
- AMRHierarchy.__init__(self, pf, data_style)
+ GridGeometryHandler.__init__(self, pf, data_style)
self._fhandle.close()
@@ -161,7 +163,7 @@
def _setup_classes(self):
dd = self._get_data_reader_dict()
- AMRHierarchy._setup_classes(self, dd)
+ GridGeometryHandler._setup_classes(self, dd)
self.object_types.sort()
def _count_grids(self):
@@ -305,13 +307,33 @@
for g in self.grids:
g._prepare_grid()
g._setup_dx()
+ self._reconstruct_parent_child()
+ """
for g in self.grids:
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
+ """
self.max_level = self.grid_levels.max()
+ def _reconstruct_parent_child(self):
+ mask = np.empty(len(self.grids), dtype='int32')
+ mylog.debug("First pass; identifying child grids")
+ for i, grid in enumerate(self.grids):
+ get_box_grids_level(self.grid_left_edge[i,:],
+ self.grid_right_edge[i,:],
+ self.grid_levels[i] + 1,
+ self.grid_left_edge, self.grid_right_edge,
+ self.grid_levels, mask)
+ #ids = np.where(mask.astype("bool")) # where is a tuple
+ #mask[ids] = True
+ grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
+ mylog.debug("Second pass; identifying parents")
+ for i, grid in enumerate(self.grids): # Second pass
+ for child in grid.Children:
+ child.Parent.append(grid)
+
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
diff -r 57b8f420f120c4b0f6b753e18dcb11d00d6dadd6 -r 80931cca9b9583718cb7022db7e6d8c44d149dea yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -152,7 +152,7 @@
units=r"\rm{K}")
def _convertBfield(data):
- return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
+ return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
def _Bx(field, data):
return data['cell_centered_B_x']
add_field("Bx", function=_Bx, take_log=False,
diff -r 57b8f420f120c4b0f6b753e18dcb11d00d6dadd6 -r 80931cca9b9583718cb7022db7e6d8c44d149dea yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -15,6 +15,7 @@
from yt.utilities.io_handler import \
BaseIOHandler
import numpy as np
+from yt.funcs import mylog, defaultdict
class IOHandlerAthena(BaseIOHandler):
_data_style = "athena"
@@ -30,35 +31,45 @@
def _read_field_names(self,grid):
pass
- def _read_data(self,grid,field):
- f = file(grid.filename, 'rb')
- dtype, offsetr = grid.hierarchy._field_map[field]
- grid_ncells = np.prod(grid.ActiveDimensions)
- grid_dims = grid.ActiveDimensions
- grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
- read_table_offset = get_read_table_offset(f)
- if grid_ncells != grid0_ncells:
- offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
- if grid_ncells == grid0_ncells:
- offset = offsetr
- f.seek(read_table_offset+offset)
- if dtype == 'scalar':
- data = np.fromfile(f, dtype='>f4',
- count=grid_ncells).reshape(grid_dims,order='F').copy()
- if dtype == 'vector':
- data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
- if '_x' in field:
- data = data[0::3].reshape(grid_dims,order='F').copy()
- elif '_y' in field:
- data = data[1::3].reshape(grid_dims,order='F').copy()
- elif '_z' in field:
- data = data[2::3].reshape(grid_dims,order='F').copy()
- f.close()
- if grid.pf.field_ordering == 1:
- return data.T.astype("float64")
- else:
- return data.astype("float64")
-
+ def _read_chunk_data(self,chunk,fields):
+ data = {}
+ grids_by_file = defaultdict(list)
+ if len(chunk.objs) == 0: return data
+ field_list = set(f[1] for f in fields)
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ f = open(grid.filename, "rb")
+ data[grid.id] = {}
+ grid_ncells = np.prod(grid.ActiveDimensions)
+ grid_dims = grid.ActiveDimensions
+ grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
+ read_table_offset = get_read_table_offset(f)
+ for field in self.pf.h.field_list:
+ dtype, offsetr = grid.hierarchy._field_map[field]
+ if grid_ncells != grid0_ncells:
+ offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+ if grid_ncells == grid0_ncells:
+ offset = offsetr
+ f.seek(read_table_offset+offset)
+ if dtype == 'scalar':
+ v = np.fromfile(f, dtype='>f4',
+ count=grid_ncells).reshape(grid_dims,order='F').copy()
+ if dtype == 'vector':
+ v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+ if '_x' in field:
+ v = v[0::3].reshape(grid_dims,order='F').copy()
+ elif '_y' in field:
+ v = v[1::3].reshape(grid_dims,order='F').copy()
+ elif '_z' in field:
+ v = v[2::3].reshape(grid_dims,order='F').copy()
+ if grid.pf.field_ordering == 1:
+ data[grid.id][field] = v.T.astype("float64")
+ else:
+ data[grid.id][field] = v.astype("float64")
+ f.close()
+ return data
+
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
@@ -66,6 +77,27 @@
sl.reverse()
return self._read_data_set(grid, field)[sl]
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ chunks = list(chunks)
+ if any((ftype != "gas" for ftype, fname in fields)):
+ raise NotImplementedError
+ rv = {}
+ for field in fields:
+ rv[field] = np.empty(size, dtype="float64")
+ ng = sum(len(c.objs) for c in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s grids",
+ size, [f2 for f1, f2 in fields], ng)
+ ind = 0
+ for chunk in chunks:
+ data = self._read_chunk_data(chunk, fields)
+ for g in chunk.objs:
+ for field in fields:
+ ftype, fname = field
+ ds = data[g.id].pop(fname)
+ nd = g.select(selector, ds, rv[field], ind) # caches
+ ind += nd
+ data.pop(g.id)
+ return rv
def get_read_table_offset(f):
line = f.readline()
https://bitbucket.org/yt_analysis/yt/commits/0c705833855d/
Changeset: 0c705833855d
Branch: yt-3.0
User: jzuhone
Date: 2013-12-04 23:24:28
Summary: Merging
Affected #: 17 files
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -90,9 +90,13 @@
... SlicePlot(pf, "x", "Density").save()
"""
- def __init__(self, outputs, parallel = True ,**kwargs):
+ def __init__(self, outputs, parallel = True, setup_function = None,
+ **kwargs):
self.tasks = AnalysisTaskProxy(self)
self.params = TimeSeriesParametersContainer(self)
+ if setup_function is None:
+ setup_function = lambda a: None
+ self._setup_function = setup_function
self._pre_outputs = outputs[:]
for type_name in data_object_registry:
setattr(self, type_name, functools.partial(
@@ -104,7 +108,9 @@
# We can make this fancier, but this works
for o in self._pre_outputs:
if isinstance(o, types.StringTypes):
- yield load(o,**self.kwargs)
+ pf = load(o, **self.kwargs)
+ self._setup_function(pf)
+ yield pf
else:
yield o
@@ -116,7 +122,8 @@
return TimeSeriesData(self._pre_outputs[key], self.parallel)
o = self._pre_outputs[key]
if isinstance(o, types.StringTypes):
- o = load(o,**self.kwargs)
+ o = load(o, **self.kwargs)
+ self._setup_function(o)
return o
def __len__(self):
@@ -163,7 +170,12 @@
This demonstrates how one might store results:
- >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy")
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
+ >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy",
+ ... setup_function = print_time )
+ ...
>>> my_storage = {}
>>> for sto, pf in ts.piter(storage=my_storage):
... v, c = pf.h.find_max("Density")
@@ -215,7 +227,8 @@
return [v for k, v in sorted(return_values.items())]
@classmethod
- def from_filenames(cls, filenames, parallel = True, **kwargs):
+ def from_filenames(cls, filenames, parallel = True, setup_function = None,
+ **kwargs):
r"""Create a time series from either a filename pattern or a list of
filenames.
@@ -239,12 +252,19 @@
this is set to either True or an integer, it will be iterated with
1 or that integer number of processors assigned to each parameter
file provided to the loop.
+ setup_function : callable, accepts a pf
+ This function will be called whenever a parameter file is loaded.
Examples
--------
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
>>> ts = TimeSeriesData.from_filenames(
- "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0")
+ ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",
+ ... setup_function = print_time)
+ ...
>>> for pf in ts:
... SlicePlot(pf, "x", "Density").save()
@@ -262,7 +282,8 @@
else:
filenames = glob.glob(filenames)
filenames.sort()
- obj = cls(filenames[:], parallel = parallel, **kwargs)
+ obj = cls(filenames[:], parallel = parallel,
+ setup_function = setup_function, **kwargs)
return obj
@classmethod
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/extern/progressbar/progressbar.py
--- a/yt/extern/progressbar/progressbar.py
+++ b/yt/extern/progressbar/progressbar.py
@@ -410,7 +410,9 @@
from IPython.display import Javascript, display
# First delete the node that held the progress bar from the page
js = """var element = document.getElementById('%s');
- element.parentNode.removeChild(element);""" % self.uuid
+ var parent = element.parentNode
+ parent.removeChild(element);
+ parent.parentElement.remove();""" % self.uuid
display(Javascript(js))
# Then also remove its trace from the cell output (so it doesn't get
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -459,3 +459,23 @@
validators=[ValidateParameter("normal"),
ValidateParameter("center")])
+
+def add_particle_average(registry, ptype, field_name,
+ weight = "particle_mass",
+ density = True):
+ def _pfunc_avg(field, data):
+ pos = data[ptype, "Coordinates"]
+ f = data[ptype, field_name]
+ wf = data[ptype, weight]
+ f *= wf
+ v = data.deposit(pos, [f], method = "sum")
+ w = data.deposit(pos, [wf], method = "sum")
+ v /= w
+ if density: v /= data["CellVolume"]
+ v[np.isnan(v)] = 0.0
+ return v
+ fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
+ registry.add_field(fn, function=_pfunc_avg,
+ validators = [ValidateSpatial(0)],
+ particle_type = False)
+ return fn
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -842,16 +842,22 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.particle_types = ["io"]
+ self.particle_types = []
if self.parameters["NumberOfParticles"] > 0 and \
"AppendActiveParticleType" in self.parameters.keys():
# If this is the case, then we know we should have a DarkMatter
# particle type, and we don't need the "io" type.
- self.particle_types = ["DarkMatter"]
self.parameters["AppendActiveParticleType"].append("DarkMatter")
+ else:
+ # We do not have an "io" type for Enzo particles if the
+ # ActiveParticle machinery is on, as we simply will ignore any of
+ # the non-DarkMatter particles in that case. However, for older
+ # datasets, we call this particle type "io".
+ self.particle_types = ["io"]
for ptype in self.parameters.get("AppendActiveParticleType", []):
self.particle_types.append(ptype)
self.particle_types = tuple(self.particle_types)
+ self.particle_types_raw = self.particle_types
if self.dimensionality == 1:
self._setup_1d()
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,36 +37,43 @@
load
class EnzoSimulation(SimulationTimeSeries):
- r"""Class for creating TimeSeriesData object from an Enzo
- simulation parameter file.
+ r"""Initialize an Enzo Simulation object.
+
+ Upon creation, the parameter file is parsed and the time and redshift
+ are calculated and stored in all_outputs. A time units dictionary is
+ instantiated to allow for time outputs to be requested with physical
+ time units. The get_time_series can be used to generate a
+ TimeSeriesData object.
+
+ parameter_filename : str
+ The simulation parameter file.
+ find_outputs : bool
+ If True, subdirectories within the GlobalDir directory are
+ searched one by one for datasets. Time and redshift
+ information are gathered by temporarily instantiating each
+ dataset. This can be used when simulation data was created
+ in a non-standard way, making it difficult to guess the
+ corresponding time and redshift information.
+ Default: False.
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> es = EnzoSimulation("my_simulation.par")
+ >>> es.get_time_series()
+ >>> for pf in es:
+ ... print pf.current_time
+
+ >>> from yt.mods import *
+ >>> es = simulation("my_simulation.par", "Enzo")
+ >>> es.get_time_series()
+ >>> for pf in es:
+ ... print pf.current_time
+
"""
+
def __init__(self, parameter_filename, find_outputs=False):
- r"""Initialize an Enzo Simulation object.
- Upon creation, the parameter file is parsed and the time and redshift
- are calculated and stored in all_outputs. A time units dictionary is
- instantiated to allow for time outputs to be requested with physical
- time units. The get_time_series can be used to generate a
- TimeSeriesData object.
-
- parameter_filename : str
- The simulation parameter file.
- find_outputs : bool
- If True, subdirectories within the GlobalDir directory are
- searched one by one for datasets. Time and redshift
- information are gathered by temporarily instantiating each
- dataset. This can be used when simulation data was created
- in a non-standard way, making it difficult to guess the
- corresponding time and redshift information.
- Default: False.
-
- Examples
- --------
- >>> from yt.mods import *
- >>> es = ES.EnzoSimulation("my_simulation.par")
- >>> print es.all_outputs
-
- """
SimulationTimeSeries.__init__(self, parameter_filename,
find_outputs=find_outputs)
@@ -75,7 +82,7 @@
initial_redshift=None, final_redshift=None,
initial_cycle=None, final_cycle=None,
times=None, redshifts=None, tolerance=None,
- parallel=True):
+ parallel=True, setup_function=None):
"""
Instantiate a TimeSeriesData object for a set of outputs.
@@ -152,9 +159,15 @@
integer is supplied, the work will be divided into that
number of jobs.
Default: True.
+ setup_function : callable, accepts a pf
+ This function will be called whenever a parameter file is loaded.
Examples
--------
+
+ >>> from yt.mods import *
+ >>> es = simulation("my_simulation.par", "Enzo")
+
>>> es.get_time_series(initial_redshift=10, final_time=13.7,
time_units='Gyr', redshift_data=False)
@@ -166,9 +179,16 @@
>>> # after calling get_time_series
>>> for pf in es.piter():
- >>> pc = PlotCollection(pf, 'c')
- >>> pc.add_projection('Density', 0)
- >>> pc.save()
+ ... pc = PlotCollection(pf, 'c')
+ ... pc.add_projection('Density', 0)
+ ... pc.save()
+
+ >>> # An example using the setup_function keyword
+ >>> def print_time(pf):
+ ... print pf.current_time
+ >>> es.get_time_series(setup_function=print_time)
+ >>> for pf in es:
+ ... SlicePlot(pf, "x", "Density").save()
"""
@@ -242,7 +262,8 @@
if os.path.exists(output['filename']):
init_outputs.append(output['filename'])
- TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+ TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel,
+ setup_function=setup_function)
mylog.info("%d outputs loaded into time series.", len(init_outputs))
def _parse_parameter_file(self):
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -35,6 +35,8 @@
from yt.utilities.io_handler import \
io_registry
from yt.utilities.physical_constants import cm_per_mpc
+from yt.utilities.exceptions import \
+ YTFITSHeaderNotUnderstood
from .fields import FITSFieldInfo, add_fits_field, KnownFITSFields
from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
ValidateDataField, TranslationFunc
@@ -129,11 +131,14 @@
_handle = None
def __init__(self, filename, data_style='fits',
+ ignore_unit_names = False,
primary_header = None,
sky_conversion = None,
storage_filename = None,
- conversion_override = None):
+ conversion_override = None,
+ mask_nans = True):
+ self.mask_nans = mask_nans
if isinstance(filename, pyfits.HDUList):
self._handle = filename
fname = filename.filename()
@@ -155,7 +160,10 @@
self.wcs = pywcs.WCS(self.primary_header)
- if self.wcs.wcs.cunit[0].name in ["deg","arcsec","arcmin","mas"]:
+ name = getattr(self.wcs.wcs.cunit[0], "name", None)
+ if name is None and ignore_unit_names == False:
+ raise YTFITSHeaderNotUnderstood
+ if name in ["deg","arcsec","arcmin","mas"]:
self.sky_wcs = self.wcs.deepcopy()
if sky_conversion is None:
self._set_minimalist_wcs()
@@ -186,7 +194,7 @@
dims = np.array(self.shape)
ndims = len(dims)
self.wcs.wcs.crpix = 0.5*(dims+1)
- self.wcs.wcs.cdelt = [1.]*ndims
+ self.wcs.wcs.cdelt = [1.0]*ndims
self.wcs.wcs.crval = 0.5*(dims+1)
self.wcs.wcs.cunit = ["pixel"]*ndims
self.wcs.wcs.ctype = ["LINEAR"]*ndims
@@ -239,7 +247,8 @@
self.dimensionality = self.primary_header["naxis"]
self.geometry = "cartesian"
- self.domain_dimensions = np.array(self._handle[self.first_image].shape)
+ dims = self._handle[self.first_image].shape[::-1]
+ self.domain_dimensions = np.array(dims)
if self.dimensionality == 2:
self.domain_dimensions = np.append(self.domain_dimensions,
[int(1)])
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -35,26 +35,6 @@
count_list, conv_factors):
pass
- def _read_data_set(self, grid, field):
- f = self._handle
- if self.pf.dimensionality == 2:
- nx,ny = f[field].data.tranpose().shape
- tr = f[field].data.transpose().reshape(nx,ny,1)
- elif self.pf.dimensionality == 3:
- tr = f[field].data.transpose()
- return tr.astype("float64")
-
- def _read_data_slice(self, grid, field, axis, coord):
- sl = [slice(None), slice(None), slice(None)]
- sl[axis] = slice(coord, coord + 1)
- f = self._handle
- if self.pf.dimensionality == 2:
- nx,ny = f[field].data.transpose().shape
- tr = f[field].data.transpose().reshape(nx,ny,1)[sl]
- elif self.pf.dimensionality == 3:
- tr = f[field].data.transpose()[sl]
- return tr.astype("float64")
-
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
if any((ftype != "gas" for ftype, fname in fields)):
@@ -69,14 +49,13 @@
size, [f2 for f1, f2 in fields], ng)
for field in fields:
ftype, fname = field
- ds = f[fname].data.astype("float64")
+ ds = f[fname].data.astype("float64").transpose()
+ if self.pf.mask_nans:
+ ds[np.isnan(ds)] = 0.0
ind = 0
for chunk in chunks:
for g in chunk.objs:
if self.pf.dimensionality == 2:
- nx,ny = ds.transpose().shape
- data = ds.transpose().reshape(nx,ny,1)
- elif self.pf.dimensionality == 3:
- data = ds.transpose()
- ind += g.select(selector, data, rv[field], ind) # caches
+ ds.shape = ds.shape + (1,)
+ ind += g.select(selector, ds, rv[field], ind) # caches
return rv
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -72,9 +72,9 @@
def _detect_fields(self):
ncomp = self._handle["/unknown names"].shape[0]
- self.field_list = [s for s in self._handle["/unknown names"][:].flat]
+ self.field_list = [("gas", s) for s in self._handle["/unknown names"][:].flat]
if ("/particle names" in self._particle_handle) :
- self.field_list += ["particle_" + s[0].strip() for s
+ self.field_list += [("io", "particle_" + s[0].strip()) for s
in self._particle_handle["/particle names"][:]]
def _setup_classes(self):
@@ -176,26 +176,6 @@
g.dds[1] = DD
self.max_level = self.grid_levels.max()
- def _setup_derived_fields(self):
- super(FLASHHierarchy, self)._setup_derived_fields()
- [self.parameter_file.conversion_factors[field]
- for field in self.field_list]
- for field in self.field_list:
- if field not in self.derived_field_list:
- self.derived_field_list.append(field)
- if (field not in KnownFLASHFields and
- field.startswith("particle")) :
- self.parameter_file.field_info.add_field(
- field, function=NullFunc, take_log=False,
- validators = [ValidateDataField(field)],
- particle_type=True)
-
- for field in self.derived_field_list:
- f = self.parameter_file.field_info[field]
- if f._function.func_name == "_TranslationFunc":
- # Translating an already-converted field
- self.parameter_file.conversion_factors[field] = 1.0
-
class FLASHStaticOutput(StaticOutput):
_hierarchy_class = FLASHHierarchy
_fieldinfo_fallback = FLASHFieldInfo
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -20,11 +20,7 @@
NullFunc, \
TranslationFunc, \
FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType
+ ValidateSpatial
import yt.fields.universal_fields
from yt.utilities.physical_constants import \
kboltz, mh, Na
@@ -241,7 +237,6 @@
if v not in KnownFLASHFields:
pfield = v.startswith("particle")
add_flash_field(v, function=NullFunc, take_log=False,
- validators = [ValidateDataField(v)],
particle_type = pfield)
if f.endswith("_Fraction") :
dname = "%s\/Fraction" % f.split("_")[0]
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -16,11 +16,25 @@
import numpy as np
import h5py
from yt.utilities.math_utils import prec_accum
+from itertools import groupby
from yt.utilities.io_handler import \
BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
+# http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
+def particle_sequences(grids):
+ g_iter = sorted(grids, key = lambda g: g.id)
+ for k, g in groupby(enumerate(g_iter), lambda (i,x):i-x.id):
+ seq = list(v[1] for v in g)
+ yield seq[0], seq[-1]
+
+def grid_sequences(grids):
+ g_iter = sorted(grids, key = lambda g: g.id)
+ for k, g in groupby(enumerate(g_iter), lambda (i,x):i-x.id):
+ seq = list(v[1] for v in g)
+ yield seq
+
class IOHandlerFLASH(BaseIOHandler):
_particle_reader = False
_data_style = "flash_hdf5"
@@ -43,6 +57,49 @@
count_list, conv_factors):
pass
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ f_part = self._particle_handle
+ p_ind = self.pf.h._particle_indices
+ px, py, pz = (self._particle_fields["particle_pos%s" % ax]
+ for ax in 'xyz')
+ p_fields = f_part["/tracer particles"]
+ assert(len(ptf) == 1)
+ ptype = ptf.keys()[0]
+ for chunk in chunks:
+ start = end = None
+ for g1, g2 in particle_sequences(chunk.objs):
+ start = p_ind[g1.id - g1._id_offset]
+ end = p_ind[g2.id - g2._id_offset + 1]
+ x = p_fields[start:end, px]
+ y = p_fields[start:end, py]
+ z = p_fields[start:end, pz]
+ yield ptype, (x, y, z)
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ chunks = list(chunks)
+ f_part = self._particle_handle
+ p_ind = self.pf.h._particle_indices
+ px, py, pz = (self._particle_fields["particle_pos%s" % ax]
+ for ax in 'xyz')
+ p_fields = f_part["/tracer particles"]
+ assert(len(ptf) == 1)
+ ptype = ptf.keys()[0]
+ field_list = ptf[ptype]
+ for chunk in chunks:
+ for g1, g2 in particle_sequences(chunk.objs):
+ start = p_ind[g1.id - g1._id_offset]
+ end = p_ind[g2.id - g2._id_offset + 1]
+ x = p_fields[start:end, px]
+ y = p_fields[start:end, py]
+ z = p_fields[start:end, pz]
+ mask = selector.select_points(x, y, z)
+ if mask is None: continue
+ for field in field_list:
+ fi = self._particle_fields[field]
+ data = p_fields[start:end, fi]
+ yield (ptype, field), data[mask]
+
def _read_data_set(self, grid, field):
f = self._handle
f_part = self._particle_handle
@@ -72,7 +129,6 @@
for field in fields:
ftype, fname = field
dt = f["/%s" % fname].dtype
- dt = prec_accum[dt]
if dt == "float32": dt = "float64"
rv[field] = np.empty(size, dtype=dt)
ng = sum(len(c.objs) for c in chunks)
@@ -83,9 +139,12 @@
ds = f["/%s" % fname]
ind = 0
for chunk in chunks:
- for g in chunk.objs:
- data = ds[g.id - g._id_offset,:,:,:].transpose()
- ind += g.select(selector, data, rv[field], ind) # caches
+ for gs in grid_sequences(chunk.objs):
+ start = gs[0].id - gs[0]._id_offset
+ end = gs[-1].id - gs[-1]._id_offset + 1
+ data = ds[start:end,:,:,:].transpose()
+ for i, g in enumerate(gs):
+ ind += g.select(selector, data[...,i], rv[field], ind)
return rv
def _read_chunk_data(self, chunk, fields):
@@ -97,8 +156,11 @@
ftype, fname = field
ds = f["/%s" % fname]
ind = 0
- for g in chunk.objs:
- data = ds[g.id - g._id_offset,:,:,:].transpose()
- rv[g.id][field] = data
+ for gs in grid_sequences(chunk.objs):
+ start = gs[0].id - gs[0]._id_offset
+ end = gs[-1].id - gs[-1]._id_offset + 1
+ data = ds[start:end,:,:,:].transpose()
+ for i, g in enumerate(gs):
+ rv[g.id][field] = data[...,i]
return rv
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -20,6 +20,7 @@
import weakref
import struct
import glob
+import time
import os
from yt.utilities.fortran_utils import read_record
@@ -50,6 +51,11 @@
particle_deposition_functions, \
standard_particle_fields
+try:
+ import requests
+ import json
+except ImportError:
+ requests = None
class ParticleFile(object):
def __init__(self, pf, io, filename, file_id):
@@ -562,3 +568,79 @@
def _is_valid(self, *args, **kwargs):
# We do not allow load() of these files.
return False
+
+class HTTPParticleFile(ParticleFile):
+ pass
+
+class HTTPStreamStaticOutput(ParticleStaticOutput):
+ _hierarchy_class = ParticleGeometryHandler
+ _file_class = HTTPParticleFile
+ _fieldinfo_fallback = GadgetFieldInfo
+ _fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
+ _particle_velocity_name = "Velocities"
+ filename_template = ""
+
+ def __init__(self, base_url,
+ data_style = "http_particle_stream",
+ n_ref = 64, over_refine_factor=1):
+ if requests is None:
+ raise RuntimeError
+ self.base_url = base_url
+ self.n_ref = n_ref
+ self.over_refine_factor = over_refine_factor
+ super(HTTPStreamStaticOutput, self).__init__("", data_style)
+
+ def __repr__(self):
+ return self.base_url
+
+ def _parse_parameter_file(self):
+ self.dimensionality = 3
+ self.refine_by = 2
+ self.parameters["HydroMethod"] = "sph"
+
+ # Here's where we're going to grab the JSON index file
+ hreq = requests.get(self.base_url + "/yt_index.json")
+ if hreq.status_code != 200:
+ raise RuntimeError
+ header = json.loads(hreq.content)
+ header['particle_count'] = dict((int(k), header['particle_count'][k])
+ for k in header['particle_count'])
+ self.parameters = header
+
+ # Now we get what we need
+ self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
+ self.domain_right_edge = np.array(header['domain_right_edge'], "float64")
+ nz = 1 << self.over_refine_factor
+ self.domain_dimensions = np.ones(3, "int32") * nz
+ self.periodicity = (True, True, True)
+
+ self.current_time = header['current_time']
+ self.unique_identifier = header.get("unique_identifier", time.time())
+ self.cosmological_simulation = int(header['cosmological_simulation'])
+ for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
+ 'hubble_constant'):
+ setattr(self, attr, float(header[attr]))
+
+ self.file_count = header['num_files']
+
+ def _set_units(self):
+ length_unit = float(self.parameters['units']['length'])
+ time_unit = float(self.parameters['units']['time'])
+ mass_unit = float(self.parameters['units']['mass'])
+ density_unit = mass_unit / length_unit ** 3
+ velocity_unit = length_unit / time_unit
+ self._unit_base = {}
+ self._unit_base['cm'] = 1.0/length_unit
+ self._unit_base['s'] = 1.0/time_unit
+ super(HTTPStreamStaticOutput, self)._set_units()
+ self.conversion_factors["velocity"] = velocity_unit
+ self.conversion_factors["mass"] = mass_unit
+ self.conversion_factors["density"] = density_unit
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ if args[0].startswith("http://"):
+ return True
+ return False
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -28,6 +28,11 @@
from yt.geometry.oct_container import _ORDER_MAX
+try:
+ import requests
+except ImportError:
+ requests = None
+
CHUNKSIZE = 10000000
def _get_h5_handle(fn):
@@ -543,3 +548,90 @@
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+class IOHandlerHTTPStream(BaseIOHandler):
+ _data_style = "http_particle_stream"
+ _vector_fields = ("Coordinates", "Velocity", "Velocities")
+
+ def __init__(self, pf):
+ if requests is None:
+ raise RuntimeError
+ self._url = pf.base_url
+ # This should eventually manage the IO and cache it
+ self.total_bytes = 0
+ super(IOHandlerHTTPStream, self).__init__(pf)
+
+ def _open_stream(self, data_file, field):
+ # This does not actually stream yet!
+ ftype, fname = field
+ s = "%s/%s/%s/%s" % (self._url,
+ data_file.file_id, ftype, fname)
+ mylog.info("Loading URL %s", s)
+ resp = requests.get(s)
+ if resp.status_code != 200:
+ raise RuntimeError
+ self.total_bytes += len(resp.content)
+ return resp.content
+
+ def _identify_fields(self, data_file):
+ f = []
+ for ftype, fname in self.pf.parameters["field_list"]:
+ f.append((str(ftype), str(fname)))
+ return f
+
+ def _read_particle_coords(self, chunks, ptf):
+ chunks = list(chunks)
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype in ptf:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ yield ptype, (c[:,0], c[:,1], c[:,2])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ # Now we have all the sizes, and we can allocate
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ for ptype, field_list in sorted(ptf.items()):
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ mask = selector.select_points(
+ c[:,0], c[:,1], c[:,2])
+ del c
+ if mask is None: continue
+ for field in field_list:
+ s = self._open_stream(data_file, (ptype, field))
+ c = np.frombuffer(s, dtype="float64")
+ if field in self._vector_fields:
+ c.shape = (c.shape[0]/3.0, 3)
+ data = c[mask, ...]
+ yield (ptype, field), data
+
+ def _initialize_index(self, data_file, regions):
+ header = self.pf.parameters
+ ptypes = header["particle_count"][data_file.file_id].keys()
+ pcount = sum(header["particle_count"][data_file.file_id].values())
+ morton = np.empty(pcount, dtype='uint64')
+ ind = 0
+ for ptype in ptypes:
+ s = self._open_stream(data_file, (ptype, "Coordinates"))
+ c = np.frombuffer(s, dtype="float64")
+ c.shape = (c.shape[0]/3.0, 3)
+ regions.add_data_file(c, data_file.file_id)
+ morton[ind:ind+c.shape[0]] = compute_morton(
+ c[:,0], c[:,1], c[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ ind += c.shape[0]
+ return morton
+
+ def _count_particles(self, data_file):
+ return self.pf.parameters["particle_count"][data_file.file_id]
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -181,8 +181,8 @@
# self.fields[g.id][fname] is the pattern here
pos = np.column_stack(self.fields[data_file.filename][
"particle_position_%s" % ax] for ax in 'xyz')
- if np.any(pos.min(axis=0) <= data_file.pf.domain_left_edge) or \
- np.any(pos.max(axis=0) >= data_file.pf.domain_right_edge):
+ if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
+ np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
data_file.pf.domain_left_edge,
data_file.pf.domain_right_edge)
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -252,6 +252,7 @@
fi = self.parameter_file.field_info
# First we construct our list of fields to check
fields_to_check = []
+ fi_update = {}
for field in fi:
finfo = fi[field]
# Explicitly defined
@@ -268,9 +269,11 @@
for pt in self.parameter_file.particle_types:
new_fi = copy.copy(finfo)
new_fi.name = (pt, new_fi.name)
- fi[new_fi.name] = new_fi
+ fi_update[new_fi.name] = new_fi
new_fields.append(new_fi.name)
fields_to_check += new_fields
+ for field in fi_update:
+ fi[field] = fi_update[field]
return fields_to_check
def _derived_fields_add(self, fields_to_check = None):
diff -r 80931cca9b9583718cb7022db7e6d8c44d149dea -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -311,3 +311,9 @@
v += "mass %0.3e. Multi-mass particles are not currently supported." % (
self.ma)
return v
+
+class YTFITSHeaderNotUnderstood(YTException):
+ def __str__(self):
+ return "This FITS header is not recognizable in its current form.\n" + \
+ "If you would like to force loading, specify: \n" + \
+ "ignore_unit_names = True"
https://bitbucket.org/yt_analysis/yt/commits/2b491501e39e/
Changeset: 2b491501e39e
Branch: yt-3.0
User: jzuhone
Date: 2013-12-05 03:17:27
Summary: Subtly refactoring the FITS frontend. For simplicity, "code units" shall always refer to pixel units, where the
Affected #: 1 file
diff -r 0c705833855d6ba302439b9a6c47a8cfce65edf1 -r 2b491501e39e15ec2810721b6634725c202057d3 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -41,6 +41,9 @@
from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
ValidateDataField, TranslationFunc
+angle_units = ["deg","arcsec","arcmin","mas"]
+all_units = angle_units + mpc_conversion.keys()
+
class FITSGrid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, hierarchy, level):
@@ -131,12 +134,12 @@
_handle = None
def __init__(self, filename, data_style='fits',
- ignore_unit_names = False,
primary_header = None,
sky_conversion = None,
storage_filename = None,
conversion_override = None,
- mask_nans = True):
+ mask_nans = True,
+ nprocs=1):
self.mask_nans = mask_nans
if isinstance(filename, pyfits.HDUList):
@@ -149,7 +152,7 @@
if h.is_image and h.data is not None:
self.first_image = i
break
-
+
if primary_header is None:
self.primary_header = self._handle[self.first_image].header
else:
@@ -158,30 +161,22 @@
if conversion_override is None: conversion_override = {}
self._conversion_override = conversion_override
- self.wcs = pywcs.WCS(self.primary_header)
-
- name = getattr(self.wcs.wcs.cunit[0], "name", None)
- if name is None and ignore_unit_names == False:
- raise YTFITSHeaderNotUnderstood
- if name in ["deg","arcsec","arcmin","mas"]:
- self.sky_wcs = self.wcs.deepcopy()
- if sky_conversion is None:
- self._set_minimalist_wcs()
- else:
- dims = np.array(self.shape)
- ndims = len(self.shape)
- new_unit = sky_conversion[1]
- new_deltx = np.abs(self.wcs.wcs.cdelt[0])*sky_conversion[0]
- new_delty = np.abs(self.wcs.wcs.cdelt[1])*sky_conversion[0]
- self.wcs.wcs.cdelt = [new_deltx, new_delty]
- self.wcs.wcs.crpix = 0.5*(dims+1)
- self.wcs.wcs.crval = [0.0]*2
- self.wcs.wcs.cunit = [new_unit]*2
- self.wcs.wcs.ctype = ["LINEAR"]*2
-
- if not all(key in self.primary_header for key in
- ["CRPIX1","CRVAL1","CDELT1","CUNIT1"]):
- self._set_minimalist_wcs()
+ self.wcs = pywcs.WCS(header=self.primary_header)
+
+ for i, unit in enumerate(self.wcs.wcs.cunit):
+ if unit in all_units:
+ self.file_unit = unit.name
+ idx = i
+ break
+ self.new_unit = None
+ self.pixel_scale = 1.0
+ if self.file_unit in angle_units:
+ if sky_conversion is not None:
+ self.new_unit = sky_conversion[1]
+ self.pixel_scale = np.abs(self.wcs.wcs.cdelt[idx])*sky_conversion[0]
+ elif self.file_unit in mpc_conversion:
+ self.new_unit = self.file_unit
+ self.pixel_scale = self.wcs.wcs.cdelt[idx]
StaticOutput.__init__(self, fname, data_style)
self.storage_filename = storage_filename
@@ -189,16 +184,6 @@
self.refine_by = 2
self._set_units()
- def _set_minimalist_wcs(self):
- mylog.warning("Could not determine WCS information. Using pixel units.")
- dims = np.array(self.shape)
- ndims = len(dims)
- self.wcs.wcs.crpix = 0.5*(dims+1)
- self.wcs.wcs.cdelt = [1.0]*ndims
- self.wcs.wcs.crval = 0.5*(dims+1)
- self.wcs.wcs.cunit = ["pixel"]*ndims
- self.wcs.wcs.ctype = ["LINEAR"]*ndims
-
def _set_units(self):
"""
Generates the conversion to various physical _units based on the parameter file
@@ -208,8 +193,7 @@
if len(self.parameters) == 0:
self._parse_parameter_file()
self.conversion_factors = defaultdict(lambda: 1.0)
- file_unit = self.wcs.wcs.cunit[0].name.lower()
- if file_unit in mpc_conversion:
+ if self.new_unit in mpc_conversion:
self._setup_getunits_units()
else:
self._setup_nounits_units()
@@ -226,9 +210,8 @@
pass
def _setup_getunits_units(self):
- file_unit = self.wcs.wcs.cunit[0].name.lower()
for unit in mpc_conversion.keys():
- self.units[unit] = mpc_conversion[unit]/mpc_conversion[file_unit]
+ self.units[unit] = self.pixel_scale*mpc_conversion[unit]/mpc_conversion[self.new_unit]
self.conversion_factors["Time"] = 1.0
def _setup_nounits_units(self):
@@ -252,22 +235,14 @@
if self.dimensionality == 2:
self.domain_dimensions = np.append(self.domain_dimensions,
[int(1)])
- ND = self.dimensionality
-
- le = [0.5]*ND
- re = [float(dim)+0.5 for dim in self.domain_dimensions]
- if ND == 2:
- xe, ye = self.wcs.wcs_pix2world([le[0],re[0]],
- [le[1],re[1]], 1)
- self.domain_left_edge = np.array([xe[0], ye[0], 0.0])
- self.domain_right_edge = np.array([xe[1], ye[1], 1.0])
- elif ND == 3:
- xe, ye, ze = world_edges = self.wcs.wcs_pix2world([le[0],re[0]],
- [le[1],re[1]],
- [le[2],re[2]], 1)
- self.domain_left_edge = np.array([xe[0], ye[0], ze[0]])
- self.domain_right_edge = np.array([xe[1], ye[1], ze[1]])
+
+ self.domain_left_edge = np.array([0.5]*3)
+ self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
+ if self.dimensionality == 2:
+ self.domain_left_edge[-1] = 0.0
+ self.domain_right_edge[-1] = 1.0
+
# Get the simulation time
try:
self.current_time = self.parameters["time"]
https://bitbucket.org/yt_analysis/yt/commits/c062561ed189/
Changeset: c062561ed189
Branch: yt-3.0
User: jzuhone
Date: 2013-12-05 03:18:58
Summary: Subtly refactoring the FITS frontend. For simplicity, "code units" will always refer to pixel units, where the center of the left-most edge pixel is always (1,1,1).
Affected #: 1 file
diff -r 2b491501e39e15ec2810721b6634725c202057d3 -r c062561ed189cf5493a7e38c79f4652189e32509 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -240,8 +240,8 @@
self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
if self.dimensionality == 2:
- self.domain_left_edge[-1] = 0.0
- self.domain_right_edge[-1] = 1.0
+ self.domain_left_edge[-1] = 0.5
+ self.domain_right_edge[-1] = 1.5
# Get the simulation time
try:
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list