[yt-svn] commit/yt: 8 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Sat Jun 6 08:29:34 PDT 2015
8 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/3f90860a1bbe/
Changeset: 3f90860a1bbe
Branch: yt
User: MatthewTurk
Date: 2015-05-21 21:38:54+00:00
Summary: Adding vector field specification for gadget
Affected #: 1 file
diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -33,7 +33,9 @@
class IOHandlerGadgetBinary(BaseIOHandler):
_dataset_type = "gadget_binary"
- _vector_fields = ("Coordinates", "Velocity", "Velocities")
+ _vector_fields = (("Coordinates", 3),
+ ("Velocity", 3),
+ ("Velocities", 3))
# Particle types (Table 3 in GADGET-2 user guide)
#
@@ -54,6 +56,7 @@
_var_mass = None
def __init__(self, ds, *args, **kwargs):
+ self._vector_fields = dict(self._vector_fields)
self._fields = ds._field_spec
self._ptypes = ds._ptype_spec
super(IOHandlerGadgetBinary, self).__init__(ds, *args, **kwargs)
@@ -126,10 +129,11 @@
else:
dt = "float32"
if name in self._vector_fields:
- count *= 3
+ count *= self._vector_fields[name]
arr = np.fromfile(f, dtype=dt, count = count)
if name in self._vector_fields:
- arr = arr.reshape((count/3, 3), order="C")
+ factor = self._vector_fields[name]
+ arr = arr.reshape((count/factor, factor), order="C")
return arr.astype("float64")
def _initialize_index(self, data_file, regions):
@@ -177,7 +181,7 @@
offsets[(ptype, field)] = pos
any_ptypes = True
if field in self._vector_fields:
- pos += 3 * pcount[ptype] * fs
+ pos += self._vector_fields[field] * pcount[ptype] * fs
else:
pos += pcount[ptype] * fs
pos += 4
@@ -203,6 +207,8 @@
field, req = field
if req is ZeroMass:
if m > 0.0 : continue
+ elif isinstance(req, tuple) and ptype in req:
+ pass
elif req != ptype:
continue
field_list.append((ptype, field))
https://bitbucket.org/yt_analysis/yt/commits/d6fde921dac1/
Changeset: d6fde921dac1
Branch: yt
User: chummels
Date: 2015-05-25 20:31:29+00:00
Summary: Adding support for Gadget Nx4 fields that aren't vector fields for working with Gadget format from Dave+Oppenheimer group (group0000).
Affected #: 5 files
diff -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 -r d6fde921dac1baa3ef5610eca1842f7422a8f491 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -136,6 +136,11 @@
self[item] = \
YTArray(np.ones((self.NumberOfParticles, 3)),
finfo.units, registry=self.ds.unit_registry)
+ elif "FourMetalFractions" in (item, item[1]):
+ self[item] = \
+ YTArray(np.ones((self.NumberOfParticles, 4)),
+ finfo.units, registry=self.ds.unit_registry)
+
else:
# Not a vector
self[item] = \
diff -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 -r d6fde921dac1baa3ef5610eca1842f7422a8f491 yt/frontends/gadget/definitions.py
--- a/yt/frontends/gadget/definitions.py
+++ b/yt/frontends/gadget/definitions.py
@@ -65,5 +65,22 @@
("Electron_Number_Density", "Gas"),
("HI_NumberDensity", "Gas"),
("SmoothingLength", "Gas"),
- )
+ ),
+ group0000 = ( "Coordinates",
+ "Velocities",
+ "ParticleIDs",
+ "Mass",
+ "Potential",
+ ("Temperature", "Gas"),
+ ("Density", "Gas"),
+ ("ElectronNumberDensity", "Gas"),
+ ("HI_NumberDensity", "Gas"),
+ ("SmoothingLength", "Gas"),
+ ("StarFormationRate", "Gas"),
+ ("DelayTime", "Gas"),
+ ("FourMetalFractions", ("Gas", "Stars")),
+ ("MaxTemperature", ("Gas", "Stars")),
+ ("NStarsSpawned", ("Gas", "Stars")),
+ ("StellarAge", "Stars")
+ ),
)
diff -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 -r d6fde921dac1baa3ef5610eca1842f7422a8f491 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -21,10 +21,39 @@
class GadgetFieldInfo(SPHFieldInfo):
def setup_particle_fields(self, ptype, *args, **kwargs):
+
+ # setup some special fields that only make sense for SPH particles
+ if (ptype, "FourMetalFractions") in self.ds.field_list:
+ metal_fraction_names = ['C', 'O', 'Si', 'Fe']
+ for i in range(len(metal_fraction_names)):
+ def _Fraction_wrap(i):
+ def _Fraction(field,data):
+ return data[(ptype, 'FourMetalFractions')][:,i]
+ return _Fraction
+# def _Deposit_wrap(i):
+# def _deposit(field, data):
+# pos = data[ptype, coord_name]
+# mass = data[ptype, mass_name]
+# pos.convert_to_units("code_length")
+# mass.convert_to_units("code_mass")
+# d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+# d = data.ds.arr(d, "code_mass")
+# d /= data["index", "cell_volume"]
+# return d
+# return _deposit
+
+ self.add_field( (ptype, metal_fraction_names[i]+"_fraction"),
+ function=_Fraction_wrap(i),
+ particle_type=True,
+ units="")
+
super(GadgetFieldInfo, self).setup_particle_fields(
ptype, *args, **kwargs)
- # setup some special fields that only make sense for SPH particles
+# self.add_field( ("deposit", "%s_%s" % (ptype, metal_fraction_names[i]+"Mass")),
+# function=_Deposit_wrap(i),
+# units="")
+
if ptype in ("PartType0", "Gas"):
self.setup_gas_particle_fields(ptype)
diff -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 -r d6fde921dac1baa3ef5610eca1842f7422a8f491 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -35,7 +35,8 @@
_dataset_type = "gadget_binary"
_vector_fields = (("Coordinates", 3),
("Velocity", 3),
- ("Velocities", 3))
+ ("Velocities", 3),
+ ("FourMetalFractions", 4))
# Particle types (Table 3 in GADGET-2 user guide)
#
diff -r 3f90860a1bbef349ff45e8e234bfa5c673d215b7 -r d6fde921dac1baa3ef5610eca1842f7422a8f491 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -166,7 +166,7 @@
fsize[field] += psize.get(field[0], 0)
for field in fields:
if field[1] in self._vector_fields:
- shape = (fsize[field], 3)
+ shape = (fsize[field], self._vector_fields[field[1]])
elif field[1] in self._array_fields:
shape = (fsize[field],)+self._array_fields[field[1]]
else:
https://bitbucket.org/yt_analysis/yt/commits/d858b8181259/
Changeset: d858b8181259
Branch: yt
User: chummels
Date: 2015-05-30 00:15:45+00:00
Summary: Adding further support for Gadget Nx4 fields to produce smoothed metal density fields.
Affected #: 2 files
diff -r d6fde921dac1baa3ef5610eca1842f7422a8f491 -r d858b81812593612ffd7c3ac2c074940dc88ab00 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -60,6 +60,10 @@
'Mg_fraction',
'Si_fraction',
'Fe_fraction',
+ 'C_density',
+ 'O_density',
+ 'Si_density',
+ 'Fe_density'
)
diff -r d6fde921dac1baa3ef5610eca1842f7422a8f491 -r d858b81812593612ffd7c3ac2c074940dc88ab00 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -23,37 +23,40 @@
def setup_particle_fields(self, ptype, *args, **kwargs):
# setup some special fields that only make sense for SPH particles
+
+ # if the FourMetalFractions field (Nx4 field) exists, break
+ # it up into its four component metal fraction fields and
+ # corresponding metal density fields which later get smoothed
if (ptype, "FourMetalFractions") in self.ds.field_list:
metal_fraction_names = ['C', 'O', 'Si', 'Fe']
for i in range(len(metal_fraction_names)):
+
+ # add the metal fraction fields
def _Fraction_wrap(i):
def _Fraction(field,data):
return data[(ptype, 'FourMetalFractions')][:,i]
return _Fraction
-# def _Deposit_wrap(i):
-# def _deposit(field, data):
-# pos = data[ptype, coord_name]
-# mass = data[ptype, mass_name]
-# pos.convert_to_units("code_length")
-# mass.convert_to_units("code_mass")
-# d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
-# d = data.ds.arr(d, "code_mass")
-# d /= data["index", "cell_volume"]
-# return d
-# return _deposit
self.add_field( (ptype, metal_fraction_names[i]+"_fraction"),
function=_Fraction_wrap(i),
particle_type=True,
units="")
+ # add the metal density fields
+ def _Density_wrap(i):
+ def _Metal_density(field,data):
+ return data[(ptype, 'FourMetalFractions')][:,i] * \
+ data[(ptype, 'density')]
+ return _Metal_density
+
+ self.add_field( (ptype, metal_fraction_names[i]+"_density"),
+ function=_Density_wrap(i),
+ particle_type=True,
+ units="code_mass/code_length**3")
+
super(GadgetFieldInfo, self).setup_particle_fields(
ptype, *args, **kwargs)
-# self.add_field( ("deposit", "%s_%s" % (ptype, metal_fraction_names[i]+"Mass")),
-# function=_Deposit_wrap(i),
-# units="")
-
if ptype in ("PartType0", "Gas"):
self.setup_gas_particle_fields(ptype)
https://bitbucket.org/yt_analysis/yt/commits/98b3bad6d406/
Changeset: 98b3bad6d406
Branch: yt
User: chummels
Date: 2015-05-30 00:16:33+00:00
Summary: Merging.
Affected #: 17 files
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -62,3 +62,4 @@
doc/source/reference/api/generated/*
doc/_temp/*
doc/source/bootcamp/.ipynb_checkpoints/
+dist
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
For more information on installation, what to do if you run into problems, or
ways to help development, please visit our website.
-Enjoy!
+Enjoy!
\ No newline at end of file
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
the width of the smallest grid element in the simulation from the
last data snapshot (i.e. the one where time has evolved the
longest) in the time series:
- ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+ ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
* ``total_particles``, if supplied, this is a pre-calculated
total number of dark matter
particles present in the simulation. For example, this is useful
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
first image in the primary file. If this is not the case,
yt will raise a warning and will not load this field.
+.. _additional_fits_options:
+
Additional Options
^^^^^^^^^^^^^^^^^^
@@ -570,6 +572,35 @@
``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
utilized more here.
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+ The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+ library to be installed.
+
+If you have a spectral intensity dataset of some sort, and would like to extract emission in
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+ slab_centers = {'13CN': (218.03117, 'GHz'),
+ 'CH3CH2CHO': (218.284256, 'GHz'),
+ 'CH3NH2': (218.40956, 'GHz')}
+ slab_width = (0.05, "GHz")
+ ds = create_spectral_slabs("intensity_cube.fits",
+ slab_centers, slab_width,
+ nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
Examples of Using FITS Data
^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -213,10 +213,31 @@
++++++++++++++++++++++++++++++++++++++
To install yt from source, you must make sure you have yt's dependencies
-installed on your system. These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system.
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite``
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+ $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+ $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
.. code-block:: bash
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -56,7 +56,7 @@
import yt
ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
- sphere = ds.sphere("max", (1.0, "mpc"))
+ sphere = ds.sphere("max", (1.0, "Mpc"))
surface = ds.surface(sphere, "density", 1e-27)
This object, ``surface``, can be queried for values on the surface. For
@@ -172,7 +172,7 @@
trans = [1.0, 0.5]
filename = './surfaces'
- sphere = ds.sphere("max", (1.0, "mpc"))
+ sphere = ds.sphere("max", (1.0, "Mpc"))
for i,r in enumerate(rho):
surf = ds.surface(sphere, 'density', r)
surf.export_obj(filename, transparency = trans[i], color_field='temperature', plot_index = i)
@@ -248,7 +248,7 @@
return (data['density']*data['density']*np.sqrt(data['temperature']))
add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
- sphere = ds.sphere("max", (1.0, "mpc"))
+ sphere = ds.sphere("max", (1.0, "Mpc"))
for i,r in enumerate(rho):
surf = ds.surface(sphere, 'density', r)
surf.export_obj(filename, transparency = trans[i],
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c optional-requirements.txt
--- /dev/null
+++ b/optional-requirements.txt
@@ -0,0 +1,1 @@
+ipython[notebook]
\ No newline at end of file
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c requirements.txt
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+numpy==1.9.2
+matplotlib==1.4.3
+Cython==0.22
+h5py==2.5.0
+nose==1.3.6
+sympy==0.7.6
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1232,9 +1232,8 @@
fglob = path.join(basedir, 'halos_%d.*.bin' % n)
files = glob.glob(fglob)
halos = self._get_halos_binary(files)
- #Jc = mass_sun_cgs/ ds['mpchcm'] * 1e5
Jc = 1.0
- length = 1.0 / ds['mpchcm']
+ length = 1.0 / ds['Mpchcm']
conv = dict(pos = np.array([length, length, length,
1, 1, 1]), # to unitary
r=1.0/ds['kpchcm'], # to unitary
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -729,7 +729,7 @@
>>> import yt
>>> ds = yt.load("RedshiftOutput0005")
- >>> sp = ds.sphere("max", (1.0, 'mpc'))
+ >>> sp = ds.sphere("max", (1.0, 'Mpc'))
>>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
"""
_type_name = "cut_region"
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -141,12 +141,12 @@
("CellMassCode", "code_mass"),
("TotalMassMsun", "msun"),
("CellVolumeCode", "code_length"),
- ("CellVolumeMpc", "mpc**3"),
- ("ParticleSpecificAngularMomentumXKMSMPC","km/s/mpc"),
- ("ParticleSpecificAngularMomentumYKMSMPC","km/s/mpc"),
- ("ParticleSpecificAngularMomentumZKMSMPC","km/s/mpc"),
- ("RadiusMpc", "mpc"),
- ("ParticleRadiusMpc", "mpc"),
+ ("CellVolumeMpc", "Mpc**3"),
+ ("ParticleSpecificAngularMomentumXKMSMPC","km/s/Mpc"),
+ ("ParticleSpecificAngularMomentumYKMSMPC","km/s/Mpc"),
+ ("ParticleSpecificAngularMomentumZKMSMPC","km/s/Mpc"),
+ ("RadiusMpc", "Mpc"),
+ ("ParticleRadiusMpc", "Mpc"),
("ParticleRadiuskpc", "kpc"),
("Radiuskpc", "kpc"),
("ParticleRadiuskpch", "kpc"),
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -890,7 +890,7 @@
elif self.dimensionality == 2:
self._setup_2d()
- def set_code_units(self):
+ def _set_code_unit_attributes(self):
if self.cosmological_simulation:
k = self.cosmology_get_units()
# Now some CGS values
@@ -928,17 +928,6 @@
magnetic_unit = np.float64(magnetic_unit.in_cgs())
self.magnetic_unit = self.quan(magnetic_unit, "gauss")
- self._override_code_units()
-
- self.unit_registry.modify("code_magnetic", self.magnetic_unit)
- self.unit_registry.modify("code_length", self.length_unit)
- self.unit_registry.modify("code_mass", self.mass_unit)
- self.unit_registry.modify("code_time", self.time_unit)
- self.unit_registry.modify("code_velocity", self.velocity_unit)
- DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
- self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
- DW.units.dimensions)
-
def cosmology_get_units(self):
"""
Return an Enzo-fortran style dictionary of units to feed into custom
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -165,7 +165,7 @@
units = self._determine_image_units(hdu.header, known_units)
try:
# Grab field name from btype
- fname = hdu.header["btype"].lower()
+ fname = hdu.header["btype"]
except KeyError:
# Try to guess the name from the units
fname = self._guess_name_from_units(units)
@@ -205,18 +205,6 @@
"the same dimensions as the primary and will not be " +
"available as a field.")
- # For line fields, we still read the primary field. Not sure how to extend this
- # For now, we pick off the first field from the field list.
- line_db = self.dataset.line_database
- primary_fname = self.field_list[0][1]
- for k, v in iteritems(line_db):
- mylog.info("Adding line field: %s at frequency %g GHz" % (k, v))
- self.field_list.append((self.dataset_type, k))
- self._ext_map[k] = self._ext_map[primary_fname]
- self._axis_map[k] = self._axis_map[primary_fname]
- self._file_map[k] = self._file_map[primary_fname]
- self.dataset.field_units[k] = self.dataset.field_units[primary_fname]
-
def _count_grids(self):
self.num_grids = self.ds.parameters["nprocs"]
@@ -242,19 +230,11 @@
bbox = np.array([[le,re] for le, re in zip(ds.domain_left_edge,
ds.domain_right_edge)])
dims = np.array(ds.domain_dimensions)
- # If we are creating a dataset of lines, only decompose along the position axes
- if len(ds.line_database) > 0:
- dims[ds.spec_axis] = 1
psize = get_psize(dims, self.num_grids)
gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
self.grid_left_edge = self.ds.arr(gle, "code_length")
self.grid_right_edge = self.ds.arr(gre, "code_length")
self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
- # If we are creating a dataset of lines, only decompose along the position axes
- if len(ds.line_database) > 0:
- self.grid_left_edge[:,ds.spec_axis] = ds.domain_left_edge[ds.spec_axis]
- self.grid_right_edge[:,ds.spec_axis] = ds.domain_right_edge[ds.spec_axis]
- self.grid_dimensions[:,ds.spec_axis] = ds.domain_dimensions[ds.spec_axis]
else:
self.grid_left_edge[0,:] = ds.domain_left_edge
self.grid_right_edge[0,:] = ds.domain_right_edge
@@ -322,8 +302,6 @@
nan_mask=None,
spectral_factor=1.0,
z_axis_decomp=False,
- line_database=None,
- line_width=None,
suppress_astropy_warnings=True,
parameters=None,
units_override=None):
@@ -336,19 +314,6 @@
self.z_axis_decomp = z_axis_decomp
self.spectral_factor = spectral_factor
- if line_width is not None:
- self.line_width = YTQuantity(line_width[0], line_width[1])
- self.line_units = line_width[1]
- mylog.info("For line folding, spectral_factor = 1.0")
- self.spectral_factor = 1.0
- else:
- self.line_width = None
-
- self.line_database = {}
- if line_database is not None:
- for k in line_database:
- self.line_database[k] = YTQuantity(line_database[k], self.line_units)
-
if suppress_astropy_warnings:
warnings.filterwarnings('ignore', module="astropy", append=True)
auxiliary_files = ensure_list(auxiliary_files)
@@ -361,13 +326,13 @@
self.nan_mask = {"all":nan_mask}
elif isinstance(nan_mask, dict):
self.nan_mask = nan_mask
- if isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU):
- self._handle = FITSFileHandler(self.filenames[0])
- fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+ self._handle = FITSFileHandler(self.filenames[0])
+ if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
+ isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
+ fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
else:
- self._handle = FITSFileHandler(self.filenames[0])
fn = self.filenames[0]
- self._handle._fits_files = [self._handle]
+ self._handle._fits_files.append(self._handle)
if self.num_files > 1:
for fits_file in auxiliary_files:
if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):
@@ -540,20 +505,14 @@
# If nprocs is None, do some automatic decomposition of the domain
if self.specified_parameters["nprocs"] is None:
- if len(self.line_database) > 0:
- dims = 2
- else:
- dims = self.dimensionality
if self.z_axis_decomp:
nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
else:
- nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+ nprocs = np.around(np.prod(self.domain_dimensions)/32**self.dimensionality).astype("int")
self.parameters["nprocs"] = max(min(nprocs, 512), 1)
else:
self.parameters["nprocs"] = self.specified_parameters["nprocs"]
- self.reversed = False
-
# Check to see if this data is in some kind of (Lat,Lon,Vel) format
self.spec_cube = False
x = 0
@@ -618,41 +577,23 @@
self._z0 = self.wcs.wcs.crval[self.spec_axis]
self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
- if self.line_width is not None:
- if self._dz < 0.0:
- self.reversed = True
- le = self.dims[self.spec_axis]+0.5
- else:
- le = 0.5
- self.line_width = self.line_width.in_units(self.spec_unit)
- self.freq_begin = (le-self._p0)*self._dz + self._z0
- # We now reset these so that they are consistent
- # with the new setup
- self._dz = np.abs(self._dz)
- self._p0 = 0.0
- self._z0 = 0.0
- nz = np.rint(self.line_width.value/self._dz).astype("int")
- self.line_width = self._dz*nz
- self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
- self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
- self.domain_dimensions[self.spec_axis] = nz
- else:
- if self.spectral_factor == "auto":
- self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
- self.lat_axis]]))
- self.spectral_factor /= self.domain_dimensions[self.spec_axis]
- mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
- Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
- self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
- self.spectral_factor*Dz
- self._dz /= self.spectral_factor
- self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+ if self.spectral_factor == "auto":
+ self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+ self.lat_axis]]))
+ self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+ mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+ Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+ self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+ self.spectral_factor*Dz
+ self._dz /= self.spectral_factor
+ self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+
else:
self.wcs_2d = self.wcs
self.spec_axis = 2
self.spec_name = "z"
- self.spec_unit = "code length"
+ self.spec_unit = "code_length"
def spec2pixel(self, spec_value):
sv = self.arr(spec_value).in_units(self.spec_unit)
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -24,12 +24,6 @@
super(IOHandlerFITS, self).__init__(ds)
self.ds = ds
self._handle = ds._handle
- if self.ds.line_width is not None:
- self.line_db = self.ds.line_database
- self.dz = self.ds.line_width/self.domain_dimensions[self.ds.spec_axis]
- else:
- self.line_db = None
- self.dz = 1.
def _read_particles(self, fields_to_read, type, args, grid_list,
count_list, conv_factors):
@@ -79,32 +73,15 @@
dx = self.ds.domain_width/self.ds.domain_dimensions
for field in fields:
ftype, fname = field
- tmp_fname = fname
- if fname in self.ds.line_database:
- fname = self.ds.field_list[0][1]
f = self.ds.index._file_map[fname]
ds = f[self.ds.index._ext_map[fname]]
bzero, bscale = self.ds.index._scale_map[fname]
- fname = tmp_fname
ind = 0
for chunk in chunks:
for g in chunk.objs:
start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int")
end = start + g.ActiveDimensions
- if self.line_db is not None and fname in self.line_db:
- my_off = self.line_db.get(fname).in_units(self.ds.spec_unit).value
- my_off = my_off - 0.5*self.ds.line_width
- my_off = int((my_off-self.ds.freq_begin)/self.dz)
- my_off = max(my_off, 0)
- my_off = min(my_off, self.ds.dims[self.ds.spec_axis]-1)
- start[self.ds.spec_axis] += my_off
- end[self.ds.spec_axis] += my_off
- mylog.debug("Reading from " + str(start) + str(end))
slices = [slice(start[i],end[i]) for i in range(3)]
- if self.ds.reversed:
- new_start = self.ds.dims[self.ds.spec_axis]-1-start[self.ds.spec_axis]
- new_end = max(self.ds.dims[self.ds.spec_axis]-1-end[self.ds.spec_axis],0)
- slices[self.ds.spec_axis] = slice(new_start,new_end,-1)
if self.ds.dimensionality == 2:
nx, ny = g.ActiveDimensions[:2]
nz = 1
@@ -115,13 +92,6 @@
data = ds.data[idx,slices[2],slices[1],slices[0]].transpose()
else:
data = ds.data[slices[2],slices[1],slices[0]].transpose()
- if self.line_db is not None:
- nz1 = data.shape[self.ds.spec_axis]
- nz2 = g.ActiveDimensions[self.ds.spec_axis]
- if nz1 != nz2:
- old_data = data.copy()
- data = np.zeros(g.ActiveDimensions)
- data[:,:,nz2-nz1:] = old_data
if fname in self.ds.nan_mask:
data[np.isnan(data)] = self.ds.nan_mask[fname]
elif "all" in self.ds.nan_mask:
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,6 +17,8 @@
from yt.utilities.on_demand_imports import _astropy
from yt.funcs import mylog, get_image_suffix
from yt.visualization._mpl_imports import FigureCanvasAgg
+from yt.units.yt_array import YTQuantity, YTArray
+from yt.utilities.fits_image import FITSImageBuffer
import os
@@ -68,6 +70,70 @@
validators = [ValidateSpatial()],
display_name="Counts (%s-%s keV)" % (emin, emax))
+def create_spectral_slabs(filename, slab_centers, slab_width,
+ **kwargs):
+ r"""
+ Given a dictionary of spectral slab centers and a width in
+ spectral units, extract data from a spectral cube at these slab
+ centers and return a `FITSDataset` instance containing the different
+ slabs as separate yt fields. Useful for extracting individual
+ lines from a spectral cube and separating them out as different fields.
+
+ Requires the SpectralCube (http://spectral-cube.readthedocs.org)
+ library.
+
+ All keyword arguments will be passed on to the `FITSDataset` constructor.
+
+ Parameters
+ ----------
+ filename : string
+ The spectral cube FITS file to extract the data from.
+ slab_centers : dict of (float, string) tuples or YTQuantities
+ The centers of the slabs, where the keys are the names
+ of the new fields and the values are (float, string) tuples or
+ YTQuantities, specifying a value for each center and its unit.
+ slab_width : YTQuantity or (float, string) tuple
+ The width of the slab along the spectral axis.
+
+ Examples
+ --------
+ >>> slab_centers = {'13CN': (218.03117, 'GHz'),
+ ... 'CH3CH2CHO': (218.284256, 'GHz'),
+ ... 'CH3NH2': (218.40956, 'GHz')}
+ >>> slab_width = (0.05, "GHz")
+ >>> ds = create_spectral_slabs("intensity_cube.fits",
+ ... slab_centers, slab_width,
+ ... nan_mask=0.0)
+ """
+ from spectral_cube import SpectralCube
+ from yt.frontends.fits.api import FITSDataset
+ cube = SpectralCube.read(filename)
+ if not isinstance(slab_width, YTQuantity):
+ slab_width = YTQuantity(slab_width[0], slab_width[1])
+ slab_data = {}
+ field_units = cube.header.get("bunit", "dimensionless")
+ for k, v in slab_centers.items():
+ if not isinstance(v, YTQuantity):
+ slab_center = YTQuantity(v[0], v[1])
+ else:
+ slab_center = v
+ mylog.info("Adding slab field %s at %g %s" %
+ (k, slab_center.v, slab_center.units))
+ slab_lo = (slab_center-0.5*slab_width).to_astropy()
+ slab_hi = (slab_center+0.5*slab_width).to_astropy()
+ subcube = cube.spectral_slab(slab_lo, slab_hi)
+ slab_data[k] = YTArray(subcube.filled_data[:,:,:], field_units)
+ width = subcube.header["naxis3"]*cube.header["cdelt3"]
+ w = subcube.wcs.copy()
+ w.wcs.crpix[-1] = 0.5
+ w.wcs.crval[-1] = -0.5*width
+ fid = FITSImageBuffer(slab_data, wcs=w)
+ for hdu in fid:
+ hdu.header.pop("RESTFREQ", None)
+ hdu.header.pop("RESTFRQ", None)
+ ds = FITSDataset(fid, **kwargs)
+ return ds
+
def ds9_region(ds, reg, obj=None, field_parameters=None):
r"""
Create a data container from a ds9 region file. Requires the pyregion
diff -r d858b81812593612ffd7c3ac2c074940dc88ab00 -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -54,12 +54,15 @@
class FITSFileHandler(HDF5FileHandler):
def __init__(self, filename):
from yt.utilities.on_demand_imports import _astropy
- if isinstance(filename, _astropy.pyfits.PrimaryHDU):
+ if isinstance(filename, _astropy.pyfits.hdu.image._ImageBaseHDU):
self.handle = _astropy.pyfits.HDUList(filename)
+ elif isinstance(filename, _astropy.pyfits.HDUList):
+ self.handle = filename
else:
self.handle = _astropy.pyfits.open(
filename, memmap=True, do_not_scale_image_data=True,
ignore_blank=True)
+ self._fits_files = []
def __del__(self):
for f in self._fits_files:
https://bitbucket.org/yt_analysis/yt/commits/15ce90f03567/
Changeset: 15ce90f03567
Branch: yt
User: chummels
Date: 2015-06-05 23:42:26+00:00
Summary: Modularizing and cleaning up code for adding gadget metal fields according to PR comments.
Affected #: 1 file
diff -r 98b3bad6d4065d85c5567f9ad186e4f109f4b20c -r 15ce90f035670659af4dcfbaf294fda72fa38988 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -24,35 +24,8 @@
# setup some special fields that only make sense for SPH particles
- # if the FourMetalFractions field (Nx4 field) exists, break
- # it up into its four component metal fraction fields and
- # corresponding metal density fields which later get smoothed
if (ptype, "FourMetalFractions") in self.ds.field_list:
- metal_fraction_names = ['C', 'O', 'Si', 'Fe']
- for i in range(len(metal_fraction_names)):
-
- # add the metal fraction fields
- def _Fraction_wrap(i):
- def _Fraction(field,data):
- return data[(ptype, 'FourMetalFractions')][:,i]
- return _Fraction
-
- self.add_field( (ptype, metal_fraction_names[i]+"_fraction"),
- function=_Fraction_wrap(i),
- particle_type=True,
- units="")
-
- # add the metal density fields
- def _Density_wrap(i):
- def _Metal_density(field,data):
- return data[(ptype, 'FourMetalFractions')][:,i] * \
- data[(ptype, 'density')]
- return _Metal_density
-
- self.add_field( (ptype, metal_fraction_names[i]+"_density"),
- function=_Density_wrap(i),
- particle_type=True,
- units="code_mass/code_length**3")
+ self._setup_four_metal_fractions()
super(GadgetFieldInfo, self).setup_particle_fields(
ptype, *args, **kwargs)
@@ -60,6 +33,42 @@
if ptype in ("PartType0", "Gas"):
self.setup_gas_particle_fields(ptype)
+ def _setup_four_metal_fractions(self):
+ """
+ This function breaks the FourMetalFractions field (if present)
+ into its four component metal fraction fields and adds
+ corresponding metal density fields which will later get smoothed
+
+ This gets used with the Gadget group0000 format
+ as defined in the gadget_field_specs in frontends/gadget/definitions.py
+ """
+ metal_names = ['C', 'O', 'Si', 'Fe']
+ for i in range(len(metal_fraction_names)):
+ for i, metal_name in enumerate(metal_names)
+
+ # add the metal fraction fields
+ def _Fraction_wrap(i):
+ def _Fraction(field,data):
+ return data[(ptype, 'FourMetalFractions')][:,i]
+ return _Fraction
+
+ self.add_field( (ptype, metal_name+"_fraction"),
+ function=_Fraction_wrap(i),
+ particle_type=True,
+ units="")
+
+ # add the metal density fields
+ def _Density_wrap(i):
+ def _Metal_density(field,data):
+ return data[(ptype, 'FourMetalFractions')][:,i] * \
+ data[(ptype, 'density')]
+ return _Metal_density
+
+ self.add_field( (ptype, metal_name+"_density"),
+ function=_Density_wrap(i),
+ particle_type=True,
+ units="g/cm**3")
+
def setup_gas_particle_fields(self, ptype):
if (ptype, "ElectronAbundance") in self.ds.field_list:
def _temperature(field, data):
https://bitbucket.org/yt_analysis/yt/commits/ae0604509422/
Changeset: ae0604509422
Branch: yt
User: chummels
Date: 2015-06-05 23:50:16+00:00
Summary: Correcting a bug in FourMetalFraction gadget field.
Affected #: 1 file
diff -r 15ce90f035670659af4dcfbaf294fda72fa38988 -r ae0604509422b5c64862302370efb9c5169655e6 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -25,7 +25,7 @@
# setup some special fields that only make sense for SPH particles
if (ptype, "FourMetalFractions") in self.ds.field_list:
- self._setup_four_metal_fractions()
+ self._setup_four_metal_fractions(ptype)
super(GadgetFieldInfo, self).setup_particle_fields(
ptype, *args, **kwargs)
@@ -33,7 +33,7 @@
if ptype in ("PartType0", "Gas"):
self.setup_gas_particle_fields(ptype)
- def _setup_four_metal_fractions(self):
+ def _setup_four_metal_fractions(self, ptype):
"""
This function breaks the FourMetalFractions field (if present)
into its four component metal fraction fields and adds
@@ -43,12 +43,11 @@
as defined in the gadget_field_specs in frontends/gadget/definitions.py
"""
metal_names = ['C', 'O', 'Si', 'Fe']
- for i in range(len(metal_fraction_names)):
- for i, metal_name in enumerate(metal_names)
+ for i, metal_name in enumerate(metal_names):
# add the metal fraction fields
def _Fraction_wrap(i):
- def _Fraction(field,data):
+ def _Fraction(field, data):
return data[(ptype, 'FourMetalFractions')][:,i]
return _Fraction
@@ -59,7 +58,7 @@
# add the metal density fields
def _Density_wrap(i):
- def _Metal_density(field,data):
+ def _Metal_density(field, data):
return data[(ptype, 'FourMetalFractions')][:,i] * \
data[(ptype, 'density')]
return _Metal_density
https://bitbucket.org/yt_analysis/yt/commits/b37c9f299b51/
Changeset: b37c9f299b51
Branch: yt
User: chummels
Date: 2015-06-06 00:16:25+00:00
Summary: Generalizing vector_fields to deal with any number of dimensions, but defaulting to 3 dimensions.
Affected #: 1 file
diff -r ae0604509422b5c64862302370efb9c5169655e6 -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -49,6 +49,10 @@
self._last_selector_counts = None
self._array_fields = {}
self._cached_fields = {}
+ # Make sure _vector_fields is a dict of fields and their dimension
+ # and assume all non-specified vector fields are 3D
+ if not isinstance(self._vector_fields, dict):
+ self._vector_fields = dict((field, 3) for field in self._vector_fields)
# We need a function for reading a list of sets
# and a function for *popping* from a queue all the appropriate sets
https://bitbucket.org/yt_analysis/yt/commits/242b9f739613/
Changeset: 242b9f739613
Branch: yt
User: chummels
Date: 2015-06-06 00:17:47+00:00
Summary: Merging.
Affected #: 20 files
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f distribute_setup.py
--- a/distribute_setup.py
+++ /dev/null
@@ -1,541 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
- from distribute_setup import use_setuptools
- use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
- from site import USER_SITE
-except ImportError:
- USER_SITE = None
-
-try:
- import subprocess
-
- def _python_cmd(*args):
- args = (sys.executable,) + args
- return subprocess.call(args) == 0
-
-except ImportError:
- # will be used for python 2.3
- def _python_cmd(*args):
- args = (sys.executable,) + args
- # quoting arguments if windows
- if sys.platform == 'win32':
- def quote(arg):
- if ' ' in arg:
- return '"%s"' % arg
- return arg
- args = [quote(arg) for arg in args]
- return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.32"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # installing
- log.warn('Installing Distribute')
- if not _python_cmd('setup.py', 'install', *install_args):
- log.warn('Something went wrong during the installation.')
- log.warn('See the error message above.')
- # exitcode will be 2
- return 2
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # building an egg
- log.warn('Building a Distribute egg in %s', to_dir)
- _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
- # returning the result
- log.warn(egg)
- if not os.path.exists(egg):
- raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
- egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
- % (version, sys.version_info[0], sys.version_info[1]))
- if not os.path.exists(egg):
- tarball = download_setuptools(version, download_base,
- to_dir, download_delay)
- _build_egg(egg, tarball, to_dir)
- sys.path.insert(0, egg)
- import setuptools
- setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, download_delay=15, no_fake=True):
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- was_imported = 'pkg_resources' in sys.modules or \
- 'setuptools' in sys.modules
- try:
- try:
- import pkg_resources
- if not hasattr(pkg_resources, '_distribute'):
- if not no_fake:
- _fake_setuptools()
- raise ImportError
- except ImportError:
- return _do_download(version, download_base, to_dir, download_delay)
- try:
- pkg_resources.require("distribute>=" + version)
- return
- except pkg_resources.VersionConflict:
- e = sys.exc_info()[1]
- if was_imported:
- sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
- sys.exit(2)
- else:
- del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return _do_download(version, download_base, to_dir,
- download_delay)
- except pkg_resources.DistributionNotFound:
- return _do_download(version, download_base, to_dir,
- download_delay)
- finally:
- if not no_fake:
- _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, delay=15):
- """Download distribute from a specified location and return its filename
-
- `version` should be a valid distribute version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download
- attempt.
- """
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- try:
- from urllib.request import urlopen
- except ImportError:
- from urllib2 import urlopen
- tgz_name = "distribute-%s.tar.gz" % version
- url = download_base + tgz_name
- saveto = os.path.join(to_dir, tgz_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- log.warn("Downloading %s", url)
- src = urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = src.read()
- dst = open(saveto, "wb")
- dst.write(data)
- finally:
- if src:
- src.close()
- if dst:
- dst.close()
- return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
- def __no_sandbox(*args, **kw):
- try:
- from setuptools.sandbox import DirectorySandbox
- if not hasattr(DirectorySandbox, '_old'):
- def violation(*args):
- pass
- DirectorySandbox._old = DirectorySandbox._violation
- DirectorySandbox._violation = violation
- patched = True
- else:
- patched = False
- except ImportError:
- patched = False
-
- try:
- return function(*args, **kw)
- finally:
- if patched:
- DirectorySandbox._violation = DirectorySandbox._old
- del DirectorySandbox._old
-
- return __no_sandbox
-
-
-def _patch_file(path, content):
- """Will backup the file then patch it"""
- existing_content = open(path).read()
- if existing_content == content:
- # already patched
- log.warn('Already patched.')
- return False
- log.warn('Patching...')
- _rename_path(path)
- f = open(path, 'w')
- try:
- f.write(content)
- finally:
- f.close()
- return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
- return open(path).read() == content
-
-
-def _rename_path(path):
- new_name = path + '.OLD.%s' % time.time()
- log.warn('Renaming %s to %s', path, new_name)
- os.rename(path, new_name)
- return new_name
-
-
-def _remove_flat_installation(placeholder):
- if not os.path.isdir(placeholder):
- log.warn('Unknown installation at %s', placeholder)
- return False
- found = False
- for file in os.listdir(placeholder):
- if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
- found = True
- break
- if not found:
- log.warn('Could not locate setuptools*.egg-info')
- return
-
- log.warn('Moving elements out of the way...')
- pkg_info = os.path.join(placeholder, file)
- if os.path.isdir(pkg_info):
- patched = _patch_egg_dir(pkg_info)
- else:
- patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
- if not patched:
- log.warn('%s already patched.', pkg_info)
- return False
- # now let's move the files out of the way
- for element in ('setuptools', 'pkg_resources.py', 'site.py'):
- element = os.path.join(placeholder, element)
- if os.path.exists(element):
- _rename_path(element)
- else:
- log.warn('Could not find the %s element of the '
- 'Setuptools distribution', element)
- return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
- log.warn('After install bootstrap.')
- placeholder = dist.get_command_obj('install').install_purelib
- _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
- if not placeholder or not os.path.exists(placeholder):
- log.warn('Could not find the install location')
- return
- pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
- setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
- pkg_info = os.path.join(placeholder, setuptools_file)
- if os.path.exists(pkg_info):
- log.warn('%s already exists', pkg_info)
- return
-
- log.warn('Creating %s', pkg_info)
- try:
- f = open(pkg_info, 'w')
- except EnvironmentError:
- log.warn("Don't have permissions to write %s, skipping", pkg_info)
- return
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
-
- pth_file = os.path.join(placeholder, 'setuptools.pth')
- log.warn('Creating %s', pth_file)
- f = open(pth_file, 'w')
- try:
- f.write(os.path.join(os.curdir, setuptools_file))
- finally:
- f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
- _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
- # let's check if it's already patched
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- if os.path.exists(pkg_info):
- if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
- log.warn('%s already patched.', pkg_info)
- return False
- _rename_path(path)
- os.mkdir(path)
- os.mkdir(os.path.join(path, 'EGG-INFO'))
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- f = open(pkg_info, 'w')
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
- return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
- log.warn('Before install bootstrap.')
- _fake_setuptools()
-
-
-def _under_prefix(location):
- if 'install' not in sys.argv:
- return True
- args = sys.argv[sys.argv.index('install') + 1:]
- for index, arg in enumerate(args):
- for option in ('--root', '--prefix'):
- if arg.startswith('%s=' % option):
- top_dir = arg.split('root=')[-1]
- return location.startswith(top_dir)
- elif arg == option:
- if len(args) > index:
- top_dir = args[index + 1]
- return location.startswith(top_dir)
- if arg == '--user' and USER_SITE is not None:
- return location.startswith(USER_SITE)
- return True
-
-
-def _fake_setuptools():
- log.warn('Scanning installed packages')
- try:
- import pkg_resources
- except ImportError:
- # we're cool
- log.warn('Setuptools or Distribute does not seem to be installed.')
- return
- ws = pkg_resources.working_set
- try:
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools', replacement=False)
- )
- except TypeError:
- # old distribute API
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools')
- )
-
- if setuptools_dist is None:
- log.warn('No setuptools distribution found')
- return
- # detecting if it was already faked
- setuptools_location = setuptools_dist.location
- log.warn('Setuptools installation detected at %s', setuptools_location)
-
- # if --root or --preix was provided, and if
- # setuptools is not located in them, we don't patch it
- if not _under_prefix(setuptools_location):
- log.warn('Not patching, --root or --prefix is installing Distribute'
- ' in another location')
- return
-
- # let's see if its an egg
- if not setuptools_location.endswith('.egg'):
- log.warn('Non-egg installation')
- res = _remove_flat_installation(setuptools_location)
- if not res:
- return
- else:
- log.warn('Egg installation')
- pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
- if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
- log.warn('Already patched.')
- return
- log.warn('Patching...')
- # let's create a fake egg replacing setuptools one
- res = _patch_egg_dir(setuptools_location)
- if not res:
- return
- log.warn('Patching complete.')
- _relaunch()
-
-
-def _relaunch():
- log.warn('Relaunching...')
- # we have to relaunch the process
- # pip marker to avoid a relaunch bug
- _cmd1 = ['-c', 'install', '--single-version-externally-managed']
- _cmd2 = ['-c', 'install', '--record']
- if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
- sys.argv[0] = 'setup.py'
- args = [sys.executable] + sys.argv
- sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- import copy
- import operator
- from tarfile import ExtractError
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directories with a safe mode.
- directories.append(tarinfo)
- tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
- self.extract(tarinfo, path)
-
- # Reverse sort directories.
- if sys.version_info < (2, 4):
- def sorter(dir1, dir2):
- return cmp(dir1.name, dir2.name)
- directories.sort(sorter)
- directories.reverse()
- else:
- directories.sort(key=operator.attrgetter('name'), reverse=True)
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, dirpath)
- self.utime(tarinfo, dirpath)
- self.chmod(tarinfo, dirpath)
- except ExtractError:
- e = sys.exc_info()[1]
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
- """
- Build the arguments to 'python setup.py install' on the distribute package
- """
- install_args = []
- if options.user_install:
- if sys.version_info < (2, 6):
- log.warn("--user requires Python 2.6 or later")
- raise SystemExit(1)
- install_args.append('--user')
- return install_args
-
-def _parse_args():
- """
- Parse the command line for options
- """
- parser = optparse.OptionParser()
- parser.add_option(
- '--user', dest='user_install', action='store_true', default=False,
- help='install in user site package (requires Python 2.6 or later)')
- parser.add_option(
- '--download-base', dest='download_base', metavar="URL",
- default=DEFAULT_URL,
- help='alternative URL from where to download the distribute package')
- options, args = parser.parse_args()
- # positional arguments are ignored
- return options
-
-def main(version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
- options = _parse_args()
- tarball = download_setuptools(download_base=options.download_base)
- return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
- sys.exit(main())
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,18 +1,14 @@
#
# Hi there! Welcome to the yt installation script.
#
+# First things first, if you experience problems, please visit the Help
+# section at http://yt-project.org.
+#
# This script is designed to create a fully isolated Python installation
# with the dependencies you need to run yt.
#
-# There are a few options, but you only need to set *one* of them. And
-# that's the next one, DEST_DIR. But, if you want to use an existing HDF5
-# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of yt, you can set YT_DIR, too. (It'll already
-# check the current directory and one up.
-#
-# If you experience problems, please visit the Help section at
-# http://yt-project.org.
-#
+# There are a few options, but you only need to set *one* of them, which is
+# the next one, DEST_DIR:
DEST_SUFFIX="yt-`uname -m`"
DEST_DIR="`pwd`/${DEST_SUFFIX/ /}" # Installation location
@@ -23,16 +19,25 @@
DEST_DIR=${YT_DEST}
fi
+# What follows are some other options that you may or may not need to change.
+
# Here's where you put the HDF5 path if you like; otherwise it'll download it
# and install it on its own
#HDF5_DIR=
+# If you've got yt some other place, set this to point to it. The script will
+# already check the current directory and the one above it in the tree.
+YT_DIR=""
+
# If you need to supply arguments to the NumPy or SciPy build, supply them here
# This one turns on gfortran manually:
#NUMPY_ARGS="--fcompiler=gnu95"
# If you absolutely can't get the fortran to work, try this:
#NUMPY_ARGS="--fcompiler=fake"
+INST_PY3=0 # Install Python 3 along with Python 2. If this is turned
+ # on, all Python packages (including yt) will be installed
+ # in Python 3 (except Mercurial, which requires Python 2).
INST_HG=1 # Install Mercurial or not? If hg is not already
# installed, yt cannot be installed.
INST_ZLIB=1 # On some systems (Kraken) matplotlib has issues with
@@ -50,9 +55,6 @@
INST_ROCKSTAR=0 # Install the Rockstar halo finder?
INST_SCIPY=0 # Install scipy?
-# If you've got yt some other place, set this to point to it.
-YT_DIR=""
-
# If you need to pass anything to matplotlib, do so here.
MPL_SUPP_LDFLAGS=""
MPL_SUPP_CFLAGS=""
@@ -111,6 +113,7 @@
echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+ echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
@@ -415,6 +418,10 @@
get_willwont ${INST_SQLITE3}
echo "be installing SQLite3"
+printf "%-15s = %s so I " "INST_PY3" "${INST_PY3}"
+get_willwont ${INST_PY3}
+echo "be installing Python 3"
+
printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
get_willwont ${INST_HG}
echo "be installing Mercurial"
@@ -487,6 +494,13 @@
exit 1
}
+if [ $INST_PY3 -eq 1 ]
+then
+ PYTHON_EXEC='python3.4'
+else
+ PYTHON_EXEC='python2.7'
+fi
+
function do_setup_py
{
[ -e $1/done ] && return
@@ -501,21 +515,27 @@
[ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
touch $LIB/extracted
BUILD_ARGS=""
+ if [[ $LIB =~ .*mercurial.* ]]
+ then
+ PYEXE="python2.7"
+ else
+ PYEXE=${PYTHON_EXEC}
+ fi
case $LIB in
*h5py*)
pushd $LIB &> /dev/null
- ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
popd &> /dev/null
;;
*numpy*)
- if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+ if [ -e ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/numpy/__init__.py ]
then
- VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+ VER=$(${DEST_DIR}/bin/${PYTHON_EXEC} -c 'from distutils.version import StrictVersion as SV; \
import numpy; print SV(numpy.__version__) < SV("1.8.0")')
if [ $VER == "True" ]
then
echo "Removing previous NumPy instance (see issue #889)"
- rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+ rm -rf ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/{numpy*,*.pth}
fi
fi
;;
@@ -523,8 +543,8 @@
;;
esac
cd $LIB
- ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( ${DEST_DIR}/bin/python2.7 setup.py install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( ${DEST_DIR}/bin/${PYEXE} setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( ${DEST_DIR}/bin/${PYEXE} setup.py install 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
}
@@ -592,14 +612,15 @@
# Set paths to what they should be when yt is activated.
export PATH=${DEST_DIR}/bin:$PATH
export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
+PYTHON2='Python-2.7.9'
+PYTHON3='Python-3.4.3'
CYTHON='Cython-0.22'
PYX='PyX-0.12.1'
-PYTHON='Python-2.7.9'
BZLIB='bzip2-1.0.6'
FREETYPE_VER='freetype-2.4.12'
H5PY='h5py-2.5.0'
@@ -620,11 +641,13 @@
TORNADO='tornado-4.0.2'
ZEROMQ='zeromq-4.0.5'
ZLIB='zlib-1.2.8'
+SETUPTOOLS='setuptools-16.0'
# Now we dump all our SHA512 files out.
echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267 Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160 Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
+echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7 Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -646,6 +669,7 @@
echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242 tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408 zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
+echo '38a89aad89dc9aa682dbfbca623e2f69511f5e20d4a3526c01aabbc7e93ae78f20aac566676b431e111540b41540a1c4f644ce4174e7ecf052318612075e02dc setuptools-16.0.tar.gz' > setuptools-16.0.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
@@ -660,10 +684,11 @@
[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-get_ytproject $PYTHON.tgz
+[ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
+[ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+get_ytproject $PYTHON2.tgz
get_ytproject $NUMPY.tar.gz
get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $MERCURIAL.tar.gz
get_ytproject $IPYTHON.tar.gz
get_ytproject $H5PY.tar.gz
get_ytproject $CYTHON.tar.gz
@@ -671,6 +696,7 @@
get_ytproject $NOSE.tar.gz
get_ytproject $PYTHON_HGLIB.tar.gz
get_ytproject $SYMPY.tar.gz
+get_ytproject $SETUPTOOLS.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
if [ ! -e $BZLIB/done ]
@@ -787,11 +813,11 @@
fi
fi
-if [ ! -e $PYTHON/done ]
+if [ ! -e $PYTHON2/done ]
then
- echo "Installing Python. This may take a while, but don't worry. yt loves you."
- [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
- cd $PYTHON
+ echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+ [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+ cd $PYTHON2
( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -802,7 +828,30 @@
cd ..
fi
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages/
+if [ $INST_PY3 -eq 1 ]
+then
+ if [ ! -e $PYTHON3/done ]
+ then
+ echo "Installing Python 3. Because two Pythons are better than one."
+ [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
+ cd $PYTHON3
+ ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+ ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+ ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/python 2>&1 ) 1>> ${LOG_FILE}
+ ( ln -sf ${DEST_DIR}/bin/python3-config ${DEST_DIR}/bin/python-config 2>&1 ) 1>> ${LOG_FILE}
+ ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+ touch done
+ cd ..
+ fi
+fi
+
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
+
+# Install setuptools
+do_setup_py $SETUPTOOLS
if [ $INST_HG -eq 1 ]
then
@@ -847,12 +896,10 @@
# This fixes problems with gfortran linking.
unset LDFLAGS
-
-echo "Installing distribute"
-( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
+
echo "Installing pip"
-( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
if [ $INST_SCIPY -eq 0 ]
then
@@ -986,13 +1033,14 @@
echo "Installing yt"
[ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd $MY_PWD
-if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
+ [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]]
then
- if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+ if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
then
echo "Installing pure-python readline"
( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
my_image = frb["density"]
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
:func:`~matplotlib.pyplot.imshow`.
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+
+.. code-block:: python
+
+ frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+ frb.export_fits("my_images.fits", fields=["density","temperature"],
+ clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``.
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+ ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+ sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
.. _generating-profiles-and-histograms:
Profiles and Histograms
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,6 @@
sys.exit(1)
import setuptools
-from distutils.version import StrictVersion
-if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
- import distribute_setup
- distribute_setup.use_setuptools()
-
from distutils.command.build_py import build_py
from numpy.distutils.misc_util import appendpath
from numpy.distutils.command import install_data as np_install_data
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -16,12 +16,9 @@
import numpy as np
from yt.utilities.physical_constants import \
charge_proton_cgs, \
- cm_per_km, \
- km_per_cm, \
mass_electron_cgs, \
speed_of_light_cgs
-
def voigt(a,u):
"""
NAME:
@@ -140,67 +137,75 @@
k1 = k1.astype(np.float64).clip(0)
return k1
-def tau_profile(lam0, fval, gamma, vkms, column_density,
- deltav=None, delta_lambda=None,
+def tau_profile(lamba_0, f_value, gamma, v_doppler, column_density,
+ delta_v=None, delta_lambda=None,
lambda_bins=None, n_lambda=12000, dlambda=0.01):
- """
+ r"""
Create an optical depth vs. wavelength profile for an
absorption line using a voigt profile.
- :param lam0 (float): central wavelength (angstroms).
- :param fval (float): f-value.
- :param gamma (float): gamma value.
- :param vkms (float): doppler b-parameter.
- :param column_density (float): column density (cm^-2).
- :param deltav (float): velocity offset from lam0 (km/s).
- Default: None (no shift).
- :param delta_lambda (float): wavelength offset in angstroms.
- Default: None (no shift).
- :param lambda_bins (array): array of wavelengths in angstroms.
- Default: None
- :param n_lambda (float): size of lambda bins to create
- array if lambda_bins is None. Default: 12000
- :param dlambda (float): lambda bin width if lambda_bins is
- None. Default: 0.01
+
+ Parameters
+ ----------
+
+ lamba_0 : float YTQuantity in length units
+ central wavelength.
+ f_value : float
+ absorption line f-value.
+ gamma : float
+ absorption line gamma value.
+ v_doppler : float YTQuantity in velocity units
+ doppler b-parameter.
+ column_density : float YTQuantity in (length units)^-2
+ column density.
+ delta_v : float YTQuantity in velocity units
+ velocity offset from lamba_0.
+ Default: None (no shift).
+ delta_lambda : float YTQuantity in length units
+ wavelength offset.
+ Default: None (no shift).
+ lambda_bins : YTArray in length units
+ wavelength array for line deposition. If None, one will be
+ created using n_lambda and dlambda.
+ Default: None.
+ n_lambda : int
+ size of lambda bins to create if lambda_bins is None.
+ Default: 12000.
+ dlambda : float
+ lambda bin width in angstroms if lambda_bins is None.
+ Default: 0.01.
+
"""
- ## constants
- me = mass_electron_cgs # grams mass electron
- e = charge_proton_cgs # esu
- c = speed_of_light_cgs * km_per_cm # km/s
- ccgs = speed_of_light_cgs # cm/s
-
- ## shift lam0 by deltav
- if deltav is not None:
- lam1 = lam0 * (1 + deltav / c)
+ ## shift lamba_0 by delta_v
+ if delta_v is not None:
+ lam1 = lamba_0 * (1 + delta_v / speed_of_light_cgs)
elif delta_lambda is not None:
- lam1 = lam0 + delta_lambda
+ lam1 = lamba_0 + delta_lambda
else:
- lam1 = lam0
+ lam1 = lamba_0
## conversions
- vdop = vkms * cm_per_km # in cm/s
- lam0cgs = lam0 / 1.e8 # rest wavelength in cm
- lam1cgs = lam1 / 1.e8 # line wavelength in cm
- nu1 = ccgs / lam1cgs # line freq in Hz
- nudop = vdop / ccgs * nu1 # doppler width in Hz
- lamdop = vdop / ccgs * lam1 # doppler width in Ang
+ nu1 = speed_of_light_cgs / lam1 # line freq in Hz
+ nudop = v_doppler / speed_of_light_cgs * nu1 # doppler width in Hz
+ lamdop = v_doppler / speed_of_light_cgs * lam1 # doppler width in Ang
## create wavelength
if lambda_bins is None:
lambda_bins = lam1 + \
np.arange(n_lambda, dtype=np.float) * dlambda - \
- n_lambda * dlambda / 2 # wavelength vector (angstroms)
- nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
+ n_lambda * dlambda / 2 # wavelength vector (angstroms)
+ nua = (speed_of_light_cgs / lambda_bins) # frequency vector (Hz)
## tau_0
- tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
- column_density * fval / vdop
- tau0 = tau_X * lam0cgs
+ tau_X = np.sqrt(np.pi) * charge_proton_cgs**2 / \
+ (mass_electron_cgs * speed_of_light_cgs) * \
+ column_density * f_value / v_doppler
+ tau0 = tau_X * lamba_0
# dimensionless frequency offset in units of doppler freq
- x = (nua - nu1) / nudop
- a = gamma / (4 * np.pi * nudop) # damping parameter
- phi = voigt(a, x) # profile
- tauphi = tau0 * phi # profile scaled with tau0
+ x = ((nua - nu1) / nudop).in_units("")
+ a = (gamma / (4 * np.pi * nudop)).in_units("s") # damping parameter
+ phi = voigt(a, x) # line profile
+ tauphi = (tau0 * phi).in_units("") # profile scaled with tau0
return (lambda_bins, tauphi)
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -4,7 +4,7 @@
"""
-from __future__ import print_function
+
from __future__ import absolute_import
#-----------------------------------------------------------------------------
@@ -21,13 +21,12 @@
from .absorption_line import tau_profile
from yt.funcs import get_pbar, mylog
-from yt.units.yt_array import YTArray
+from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.physical_constants import \
- amu_cgs, boltzmann_constant_cgs, \
- speed_of_light_cgs, km_per_cm
+ boltzmann_constant_cgs, \
+ speed_of_light_cgs
from yt.utilities.on_demand_imports import _astropy
-speed_of_light_kms = speed_of_light_cgs * km_per_cm
pyfits = _astropy.pyfits
class AbsorptionSpectrum(object):
@@ -49,8 +48,10 @@
self.tau_field = None
self.flux_field = None
self.spectrum_line_list = None
- self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
- self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
+ self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
+ "angstrom")
+ self.bin_width = YTQuantity((lambda_max - lambda_min) /
+ float(n_lambda - 1), "angstrom")
self.line_list = []
self.continuum_list = []
@@ -76,8 +77,10 @@
mass of atom in amu.
"""
self.line_list.append({'label': label, 'field_name': field_name,
- 'wavelength': wavelength, 'f_value': f_value,
- 'gamma': gamma, 'atomic_mass': atomic_mass,
+ 'wavelength': YTQuantity(wavelength, "angstrom"),
+ 'f_value': f_value,
+ 'gamma': gamma,
+ 'atomic_mass': YTQuantity(atomic_mass, "amu"),
'label_threshold': label_threshold})
def add_continuum(self, label, field_name, wavelength,
@@ -209,16 +212,15 @@
# include factor of (1 + z) because our velocity is in proper frame.
delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
field_data['velocity_los'] / speed_of_light_cgs
- thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
- field_data['temperature']) /
- (amu_cgs * line['atomic_mass']))
- thermal_b.convert_to_cgs()
+ thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
+ field_data['temperature']) /
+ line['atomic_mass'])
center_bins = np.digitize((delta_lambda + line['wavelength']),
self.lambda_bins)
# ratio of line width to bin width
width_ratio = ((line['wavelength'] + delta_lambda) * \
- thermal_b / speed_of_light_kms / self.bin_width).value
+ thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
if (width_ratio < 1.0).any():
mylog.warn(("%d out of %d line components are unresolved, " +
@@ -240,11 +242,13 @@
my_bin_ratio = spectrum_bin_ratio
while True:
lambda_bins, line_tau = \
- tau_profile(line['wavelength'], line['f_value'],
- line['gamma'], thermal_b[lixel],
- column_density[lixel],
- delta_lambda=delta_lambda[lixel],
- lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
+ tau_profile(
+ line['wavelength'], line['f_value'],
+ line['gamma'], thermal_b[lixel].in_units("km/s"),
+ column_density[lixel],
+ delta_lambda=delta_lambda[lixel],
+ lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
+
# Widen wavelength window until optical depth reaches a max value at the ends.
if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
(left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
@@ -260,7 +264,7 @@
if line['label_threshold'] is not None and \
column_density[lixel] >= line['label_threshold']:
if use_peculiar_velocity:
- peculiar_velocity = km_per_cm * field_data['velocity_los'][lixel]
+ peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
else:
peculiar_velocity = 0.0
self.spectrum_line_list.append({'label': line['label'],
@@ -271,7 +275,7 @@
'redshift': field_data['redshift'][lixel],
'v_pec': peculiar_velocity})
pbar.update(i)
- pbar.finish()
+ pbar.finish()
del column_density, delta_lambda, thermal_b, \
center_bins, width_ratio, left_index, right_index
@@ -280,7 +284,7 @@
"""
Write out list of spectral lines.
"""
- print("Writing spectral line list: %s." % filename)
+ mylog.info("Writing spectral line list: %s." % filename)
self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
f = open(filename, 'w')
f.write('#%-14s %-14s %-12s %-12s %-12s %-12s\n' %
@@ -295,7 +299,7 @@
"""
Write spectrum to an ascii file.
"""
- print("Writing spectrum to ascii file: %s." % filename)
+ mylog.info("Writing spectrum to ascii file: %s." % filename)
f = open(filename, 'w')
f.write("# wavelength[A] tau flux\n")
for i in range(self.lambda_bins.size):
@@ -307,7 +311,7 @@
"""
Write spectrum to a fits file.
"""
- print("Writing spectrum to fits file: %s." % filename)
+ mylog.info("Writing spectrum to fits file: %s." % filename)
col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_bins)
col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
cols = pyfits.ColDefs([col1, col2])
@@ -319,7 +323,7 @@
Write spectrum to an hdf5 file.
"""
- print("Writing spectrum to hdf5 file: %s." % filename)
+ mylog.info("Writing spectrum to hdf5 file: %s." % filename)
output = h5py.File(filename, 'w')
output.create_dataset('wavelength', data=self.lambda_bins)
output.create_dataset('tau', data=self.tau_field)
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,9 +1,14 @@
-from __future__ import print_function
+import h5py
import numpy as np
-import h5py
-from yt.analysis_modules.absorption_spectrum.absorption_line \
- import voigt
-from yt.utilities.on_demand_imports import _scipy
+
+from yt.analysis_modules.absorption_spectrum.absorption_line import \
+ voigt
+from yt.funcs import \
+ mylog
+from yt.units.yt_array import \
+ YTArray
+from yt.utilities.on_demand_imports import \
+ _scipy
optimize = _scipy.optimize
@@ -79,6 +84,10 @@
absorption profiles. Same size as x.
"""
+ # convert to NumPy array if we have a YTArray
+ if isinstance(x, YTArray):
+ x = x.d
+
#Empty dictionary for fitted lines
allSpeciesLines = {}
@@ -1007,6 +1016,5 @@
f.create_dataset("{0}/b".format(ion),data=params['b'])
f.create_dataset("{0}/z".format(ion),data=params['z'])
f.create_dataset("{0}/complex".format(ion),data=params['group#'])
- print('Writing spectrum fit to {0}'.format(file_name))
+ mylog.info('Writing spectrum fit to {0}'.format(file_name))
f.close()
-
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -23,7 +23,8 @@
ValidateSpatial
from yt.units.yt_array import \
- uconcatenate
+ uconcatenate, \
+ ucross
from yt.utilities.math_utils import \
get_sph_r_component, \
@@ -122,10 +123,8 @@
units = "g")
def particle_density(field, data):
- pos = data[ptype, coord_name]
- mass = data[ptype, mass_name]
- pos.convert_to_units("code_length")
- mass.convert_to_units("code_mass")
+ pos = data[ptype, coord_name].convert_to_units("code_length")
+ mass = data[ptype, mass_name].convert_to_units("code_mass")
d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
d = data.ds.arr(d, "code_mass")
d /= data["index", "cell_volume"]
@@ -248,17 +247,26 @@
units = "cm / s",
particle_type=True)
+def get_angular_momentum_components(ptype, data, spos, svel):
+ if data.has_field_parameter("normal"):
+ normal = data.get_field_parameter("normal")
+ else:
+ normal = data.ds.arr([0.0,0.0,1.0],"code_length") # default to simulation axis
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
+ vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
+ return pos, vel, normal, bv
+
def standard_particle_fields(registry, ptype,
spos = "particle_position_%s",
svel = "particle_velocity_%s"):
# This function will set things up based on the scalar fields and standard
# yt field names.
+ # data.get_field_parameter("bulk_velocity") defaults to YTArray([0,0,0] cm/s)
def _particle_velocity_magnitude(field, data):
""" M{|v|} """
bulk_velocity = data.get_field_parameter("bulk_velocity")
- if bulk_velocity is None:
- bulk_velocity = np.zeros(3)
return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2
+ (data[ptype, svel % 'y'] - bulk_velocity[1])**2
+ (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )
@@ -274,19 +282,13 @@
Calculate the angular of a particle velocity. Returns a vector for each
particle.
"""
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- xv = data[ptype, svel % 'x'] - bv[0]
- yv = data[ptype, svel % 'y'] - bv[1]
- zv = data[ptype, svel % 'z'] - bv[2]
center = data.get_field_parameter('center')
- coords = self.ds.arr([data[ptype, spos % d] for d in 'xyz'],
- dtype=np.float64, units='cm')
- new_shape = tuple([3] + [1]*(len(coords.shape)-1))
- r_vec = coords - np.reshape(center,new_shape)
- v_vec = self.ds.arr([xv,yv,zv], dtype=np.float64, units='cm/s')
- return np.cross(r_vec, v_vec, axis=0).T
+ pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)
+ L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)
+ # adding in the unit registry allows us to have a reference to the dataset
+ # and thus we will always get the correct units after applying the cross product.
+ return -ucross(r_vec, v_vec, registry=data.ds.unit_registry)
+
registry.add_field((ptype, "particle_specific_angular_momentum"),
function=_particle_specific_angular_momentum,
@@ -294,87 +296,29 @@
units="cm**2/s",
validators=[ValidateParameter("center")])
- def _particle_specific_angular_momentum_x(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- y = data[ptype, spos % "y"] - center[1]
- z = data[ptype, spos % "z"] - center[2]
- yv = data[ptype, svel % "y"] - bv[1]
- zv = data[ptype, svel % "z"] - bv[2]
- return yv*z - zv*y
-
- registry.add_field((ptype, "particle_specific_angular_momentum_x"),
- function=_particle_specific_angular_momentum_x,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- def _particle_specific_angular_momentum_y(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- x = data[ptype, spos % "x"] - center[0]
- z = data[ptype, spos % "z"] - center[2]
- xv = data[ptype, svel % "x"] - bv[0]
- zv = data[ptype, svel % "z"] - bv[2]
- return -(xv*z - zv*x)
-
- registry.add_field((ptype, "particle_specific_angular_momentum_y"),
- function=_particle_specific_angular_momentum_y,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- def _particle_specific_angular_momentum_z(field, data):
- if data.has_field_parameter("bulk_velocity"):
- bv = data.get_field_parameter("bulk_velocity")
- else: bv = np.zeros(3, dtype=np.float64)
- center = data.get_field_parameter('center')
- x = data[ptype, spos % "x"] - center[0]
- y = data[ptype, spos % "y"] - center[1]
- xv = data[ptype, svel % "x"] - bv[0]
- yv = data[ptype, svel % "y"] - bv[1]
- return xv*y - yv*x
-
- registry.add_field((ptype, "particle_specific_angular_momentum_z"),
- function=_particle_specific_angular_momentum_z,
- particle_type=True,
- units="cm**2/s",
- validators=[ValidateParameter("center")])
-
- create_magnitude_field(registry, "particle_specific_angular_momentum",
- "cm**2/s", ftype=ptype, particle_type=True)
-
- def _particle_angular_momentum_x(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_x"]
- registry.add_field((ptype, "particle_angular_momentum_x"),
- function=_particle_angular_momentum_x,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
-
- def _particle_angular_momentum_y(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_y"]
- registry.add_field((ptype, "particle_angular_momentum_y"),
- function=_particle_angular_momentum_y,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
-
- def _particle_angular_momentum_z(field, data):
- return data[ptype, "particle_mass"] * \
- data[ptype, "particle_specific_angular_momentum_z"]
- registry.add_field((ptype, "particle_angular_momentum_z"),
- function=_particle_angular_momentum_z,
- units="g*cm**2/s", particle_type=True,
- validators=[ValidateParameter('center')])
+ def _get_spec_ang_mom_comp(axi, ax, _ptype):
+ def _particle_specific_angular_momentum_component(field, data):
+ return data[_ptype, "particle_specific_angular_momentum"][:, axi]
+ def _particle_angular_momentum_component(field, data):
+ return data[_ptype, "particle_mass"] * \
+ data[ptype, "particle_specific_angular_momentum_%s" % ax]
+ return _particle_specific_angular_momentum_component, \
+ _particle_angular_momentum_component
+ for axi, ax in enumerate("xyz"):
+ f, v = _get_spec_ang_mom_comp(axi, ax, ptype)
+ registry.add_field(
+ (ptype, "particle_specific_angular_momentum_%s" % ax),
+ particle_type = True, function=f, units="cm**2/s",
+ validators=[ValidateParameter("center")]
+ )
+ registry.add_field((ptype, "particle_angular_momentum_%s" % ax),
+ function=v, units="g*cm**2/s", particle_type=True,
+ validators=[ValidateParameter('center')])
def _particle_angular_momentum(field, data):
- return (data[ptype, "particle_mass"] *
- data[ptype, "particle_specific_angular_momentum"].T).T
+ am = data[ptype, "particle_mass"] * data[ptype, "particle_specific_angular_momentum"].T
+ return am.T
+
registry.add_field((ptype, "particle_angular_momentum"),
function=_particle_angular_momentum,
particle_type=True,
@@ -409,9 +353,7 @@
"""
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- pos = pos.T
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T
L, pos = modify_reference_frame(center, normal, P=pos)
return pos
@@ -422,81 +364,6 @@
units="cm",
validators=[ValidateParameter("normal"), ValidateParameter("center")])
- def _particle_position_relative_x(field, data):
- """The x component of the particle positions in a rotated reference
- frame
-
- Relative to the coordinate system defined by the *normal* vector and
- *center* field parameters.
-
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- pos = pos.T
- L, pos, = modify_reference_frame(center, normal, P=pos)
- pos = pos.T
- return pos[0]
-
- registry.add_field(
- (ptype, "particle_position_relative_x"),
- function=_particle_position_relative_x,
- particle_type=True,
- units="cm",
- validators=[ValidateParameter("normal"), ValidateParameter("center")])
-
- def _particle_position_relative_y(field, data):
- """The y component of the particle positions in a rotated reference
- frame
-
- Relative to the coordinate system defined by the *normal* vector and
- *center* field parameters.
-
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- pos = pos.T
- L, pos = modify_reference_frame(center, normal, P=pos)
- pos = pos.T
- return pos[1]
-
- registry.add_field((ptype, "particle_position_relative_y"),
- function=_particle_position_relative_y,
- particle_type=True, units="cm",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
-
- def _particle_position_relative_z(field, data):
- """The z component of the particle positions in a rotated reference
- frame
-
- Relative to the coordinate system defined by the *normal* vector and
- *center* field parameters.
-
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
-
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- pos = pos.T
- L, pos = modify_reference_frame(center, normal, P=pos)
- pos = pos.T
- return pos[2]
-
- registry.add_field((ptype, "particle_position_relative_z"),
- function=_particle_position_relative_z,
- particle_type=True, units="cm",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
def _particle_velocity_relative(field, data):
"""The vector particle velocities in an arbitrary coordinate system
@@ -508,10 +375,7 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- vel = vel - np.reshape(bv, (3, 1))
- vel = vel.T
+ vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate("xyz")]).T
L, vel = modify_reference_frame(center, normal, V=vel)
return vel
@@ -521,83 +385,22 @@
validators=[ValidateParameter("normal"),
ValidateParameter("center")])
- def _particle_velocity_relative_x(field, data):
- """The x component of the particle velocities in an arbitrary coordinate
- system
- Relative to the coordinate system defined by the *normal* vector,
- *bulk_velocity* vector and *center* field parameters.
+ def _get_coord_funcs_relative(axi, _ptype):
+ def _particle_pos_rel(field, data):
+ return data[_ptype, "particle_position_relative"][:, axi]
+ def _particle_vel_rel(field, data):
+ return data[_ptype, "particle_velocity_relative"][:, axi]
+ return _particle_vel_rel, _particle_pos_rel
+ for axi, ax in enumerate("xyz"):
+ v, p = _get_coord_funcs_relative(axi, ptype)
+ registry.add_field((ptype, "particle_velocity_relative_%s" % ax),
+ particle_type = True, function = v,
+ units = "code_velocity")
+ registry.add_field((ptype, "particle_position_relative_%s" % ax),
+ particle_type = True, function = p,
+ units = "code_length")
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- vel = vel - np.reshape(bv, (3, 1))
- vel = vel.T
- L, vel = modify_reference_frame(center, normal, V=vel)
- vel = vel.T
- return vel[0]
-
- registry.add_field((ptype, "particle_velocity_relative_x"),
- function=_particle_velocity_relative_x,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_velocity_relative_y(field, data):
- """The y component of the particle velocities in an arbitrary coordinate
- system
-
- Relative to the coordinate system defined by the *normal* vector,
- *bulk_velocity* vector and *center* field parameters.
-
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter('bulk_velocity')
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- vel = vel - np.reshape(bv, (3, 1))
- vel = vel.T
- L, vel = modify_reference_frame(center, normal, V=vel)
- vel = vel.T
- return vel[1]
-
- registry.add_field((ptype, "particle_velocity_relative_y"),
- function=_particle_velocity_relative_y,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
-
- def _particle_velocity_relative_z(field, data):
- """The z component of the particle velocities in an arbitrary coordinate
- system
-
- Relative to the coordinate system defined by the *normal* vector,
- *bulk_velocity* vector and *center* field parameters.
-
- Note that the orientation of the x and y axes are arbitrary.
- """
- normal = data.get_field_parameter('normal')
- center = data.get_field_parameter('center')
- bv = data.get_field_parameter("bulk_velocity")
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
- bv = vel - np.reshape(bv, (3, 1))
- vel = vel.T
- L, vel = modify_reference_frame(center, normal, V=vel)
- vel = vel.T
- return vel[2]
-
- registry.add_field((ptype, "particle_velocity_relative_z"),
- function=_particle_velocity_relative_z,
- particle_type=True, units="cm/s",
- validators=[ValidateParameter("normal"),
- ValidateParameter("center")])
# this is just particle radius but we add it with an alias for the sake of
# consistent naming
@@ -625,8 +428,7 @@
"""
normal = data.get_field_parameter("normal")
center = data.get_field_parameter("center")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
return data.ds.arr(get_sph_theta(pos, normal), "")
@@ -655,8 +457,7 @@
"""
normal = data.get_field_parameter("normal")
center = data.get_field_parameter("center")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
return data.ds.arr(get_sph_phi(pos, normal), "")
@@ -687,10 +488,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
theta = get_sph_theta(pos, center)
phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
@@ -732,10 +531,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
theta = get_sph_theta(pos, center)
phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
@@ -769,8 +566,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
- vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
@@ -802,7 +599,7 @@
"""
normal = data.get_field_parameter("normal")
center = data.get_field_parameter('center')
- pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
return data.ds.arr(get_cyl_r(pos, normal),
'code_length')
@@ -822,7 +619,7 @@
"""
normal = data.get_field_parameter("normal")
center = data.get_field_parameter('center')
- pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
return data.ds.arr(get_cyl_theta(pos, normal), "")
@@ -841,7 +638,7 @@
"""
normal = data.get_field_parameter("normal")
center = data.get_field_parameter('center')
- pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
return data.ds.arr(get_cyl_z(pos, normal),
'code_length')
@@ -862,10 +659,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
theta = get_cyl_theta(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
@@ -888,10 +683,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
theta = get_cyl_theta(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
@@ -924,10 +717,8 @@
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
bv = data.get_field_parameter("bulk_velocity")
- pos = spos
- pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
- vel = svel
- vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
+ pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
+ vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
cylz = get_cyl_z_component(vel, normal)
@@ -1048,3 +839,4 @@
units = "g/cm**3")
return [field_name]
+
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -709,7 +709,7 @@
pdata = pdata_ftype
# This will update the stream handler too
assign_particle_data(sds, pdata)
-
+
return sds
def load_amr_grids(grid_data, domain_dimensions,
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1224,6 +1224,19 @@
v = validate_numpy_wrapper_units(v, arrs)
return v
+def ucross(arr1,arr2, registry=None):
+ """Applies the cross product to two YT arrays.
+
+ This wrapper around numpy.cross preserves units.
+ See the documentation of numpy.cross for full
+ details.
+ """
+
+ v = np.cross(arr1,arr2)
+ units = arr1.units * arr2.units
+ arr = YTArray(v,units, registry=registry)
+ return arr
+
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -301,22 +301,37 @@
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]])
"""
- if (L == np.array([0, 0, 1.])).all():
- # Whew! Nothing to do!
- if V is None:
+ # First translate the positions to center of mass reference frame.
+ if P is not None:
+ P = P - CoM
+
+ # is the L vector pointing in the Z direction?
+ if np.inner(L[:2], L[:2]) == 0.0:
+ # the reason why we need to explicitly check for the above
+ # is that formula is used in denominator
+ # this just checks if we need to flip the z axis or not
+ if L[2] < 0.0:
+ # this is just a simple flip in direction of the z axis
+ if P is not None:
+ P = -P
+ if V is not None:
+ V = -V
+
+ # return the values
+ if V is None and P is not None:
return L, P
- if P is None:
+ elif P is None and V is not None:
return L, V
else:
return L, P, V
- # First translate the positions to center of mass reference frame.
- if P is not None:
- P = P - CoM
+
+ # Normal vector is not aligned with simulation Z axis
+ # Therefore we are going to have to apply a rotation
# Now find the angle between modified L and the x-axis.
LL = L.copy()
- LL[2] = 0.
- theta = np.arccos(np.inner(LL, [1.,0,0])/np.inner(LL,LL)**.5)
- if L[1] < 0:
+ LL[2] = 0.0
+ theta = np.arccos(np.inner(LL, [1.0, 0.0, 0.0]) / np.inner(LL, LL) ** 0.5)
+ if L[1] < 0.0:
theta = -theta
# Now rotate all the position, velocity, and L vectors by this much around
# the z axis.
@@ -326,16 +341,18 @@
V = rotate_vector_3D(V, 2, theta)
L = rotate_vector_3D(L, 2, theta)
# Now find the angle between L and the z-axis.
- theta = np.arccos(np.inner(L, [0,0,1])/np.inner(L,L)**.5)
+ theta = np.arccos(np.inner(L, [0.0, 0.0, 1.0]) / np.inner(L, L) ** 0.5)
# This time we rotate around the y axis.
if P is not None:
P = rotate_vector_3D(P, 1, theta)
if V is not None:
V = rotate_vector_3D(V, 1, theta)
L = rotate_vector_3D(L, 1, theta)
- if V is None:
+
+ # return the values
+ if V is None and P is not None:
return L, P
- if P is None:
+ elif P is None and V is not None:
return L, V
else:
return L, P, V
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -16,7 +16,9 @@
mass_sun_grams = 1.98841586e33
temp_sun_kelvin = 5870.0
luminosity_sun_ergs_per_sec = 3.8270e33
-metallicity_sun = 0.02041
+
+# Consistent with solar abundances used in Cloudy
+metallicity_sun = 0.01295
# Conversion Factors: X au * mpc_per_au = Y mpc
# length
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -54,6 +54,7 @@
self._type_name = "CuttingPlane"
else:
self._type_name = viewer._plot_type
+ self.aspect = window_plot._aspect
self.font_properties = font_properties
self.font_color = font_color
diff -r b37c9f299b510c79df8bbe6b781548ac3fa161f0 -r 242b9f739613f32ed715b6ec2298476bb88d1f3f yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -20,6 +20,7 @@
from yt.utilities.lib.pixelization_routines import \
pixelize_cylinder
from yt.utilities.lib.api import add_points_to_greyscale_image
+from yt.frontends.stream.api import load_uniform_grid
from . import _MPL
import numpy as np
@@ -73,13 +74,13 @@
To make a projection and then several images, you can generate a
single FRB and then access multiple fields:
- >>> proj = ds.proj(0, "Density")
+ >>> proj = ds.proj(0, "density")
>>> frb1 = FixedResolutionBuffer(proj, (0.2, 0.3, 0.4, 0.5),
- (1024, 1024))
- >>> print frb1["Density"].max()
- 1.0914e-9
- >>> print frb1["Temperature"].max()
- 104923.1
+ ... (1024, 1024))
+ >>> print frb1["density"].max()
+ 1.0914e-9 g/cm**3
+ >>> print frb1["temperature"].max()
+ 104923.1 K
"""
_exclude_fields = ('pz','pdz','dx','x','y','z',
'r', 'dr', 'phi', 'dphi', 'theta', 'dtheta',
@@ -289,7 +290,7 @@
These fields will be pixelized and output.
"""
import h5py
- if fields is None: fields = self.data.keys()
+ if fields is None: fields = list(self.data.keys())
output = h5py.File(filename, "a")
for field in fields:
output.create_dataset(field,data=self[field])
@@ -307,30 +308,68 @@
filename : string
The name of the FITS file to be written.
fields : list of strings
- These fields will be pixelized and output.
+ These fields will be pixelized and output. If "None", the keys of the
+ FRB will be used.
clobber : boolean
If the file exists, this governs whether we will overwrite.
other_keys : dictionary, optional
A set of header keys and values to write into the FITS header.
units : string, optional
- the length units that the coordinates are written in, default 'cm'
- If units are set to "deg" then assume that sky coordinates are
- requested.
+ the length units that the coordinates are written in, default 'cm'.
"""
from yt.utilities.fits_image import FITSImageBuffer
- extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
- if fields is None:
- fields = [field[-1] for field in self.data_source.field_data
- if field not in extra_fields]
+ if fields is None: fields = list(self.data.keys())
fib = FITSImageBuffer(self, fields=fields, units=units)
if other_keys is not None:
for k,v in other_keys.items():
fib.update_all_headers(k,v)
fib.writeto(filename, clobber=clobber)
-
+
+ def export_dataset(self, fields=None, nprocs=1):
+ r"""Export a set of pixelized fields to an in-memory dataset that can be
+ analyzed as any other in yt. Unit information and other parameters (e.g.,
+ geometry, current_time, etc.) will be taken from the parent dataset.
+
+ Parameters
+ ----------
+ fields : list of strings, optional
+ These fields will be pixelized and output. If "None", the keys of the
+ FRB will be used.
+ nprocs: integer, optional
+ If greater than 1, will create this number of subarrays out of data
+
+ Examples
+ --------
+ >>> import yt
+ >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+ >>> slc = ds.slice(2, 0.0)
+ >>> frb = slc.to_frb((500.,"kpc"), 500)
+ >>> ds2 = frb.export_dataset(fields=["density","temperature"], nprocs=32)
+ """
+ nx, ny = self.buff_size
+ data = {}
+ if fields is None:
+ fields = list(self.keys())
+ for field in fields:
+ arr = self[field]
+ data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
+ bounds = [b.in_units("code_length").v for b in self.bounds]
+ bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
+ return load_uniform_grid(data, [nx,ny,1],
+ length_unit=self.ds.length_unit,
+ bbox=bbox,
+ sim_time=self.ds.current_time.in_units("s").v,
+ mass_unit=self.ds.mass_unit,
+ time_unit=self.ds.time_unit,
+ velocity_unit=self.ds.velocity_unit,
+ magnetic_unit=self.ds.magnetic_unit,
+ periodicity=(False,False,False),
+ geometry=self.ds.geometry,
+ nprocs=nprocs)
+
@property
def limits(self):
rv = dict(x = None, y = None, z = None)
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list