[yt-svn] commit/yt: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Aug 20 09:09:06 PDT 2015
5 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/159dbc0fc78f/
Changeset: 159dbc0fc78f
Branch: yt
User: RicardaBeckmann
Date: 2015-08-04 12:24:29+00:00
Summary: fixed units for ramses with boxlen not equal to 1
Affected #: 1 file
diff -r fffa77d2fdc205b54fb87befaa50ade850f826d6 -r 159dbc0fc78f368fd95b901053fd8a06ca9834a7 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -495,8 +495,8 @@
#still needs to be folded in, as shown below!
boxlen=self.parameters['boxlen']
- length_unit = self.parameters['unit_l'] * boxlen
- density_unit = self.parameters['unit_d']/ boxlen**3
+ length_unit = self.parameters['unit_l']
+ density_unit = self.parameters['unit_d']
# In the mass unit, the factors of boxlen cancel back out, so this
#is equivalent to unit_d*unit_l**3
@@ -518,13 +518,13 @@
self.density_unit = self.quan(density_unit, 'g/cm**3')
self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+ self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+ self.time_unit = self.quan(time_unit, "s")
self.length_unit = self.quan(length_unit * boxlen, "cm")
self.mass_unit = self.quan(mass_unit, "g")
- self.time_unit = self.quan(time_unit, "s")
self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
- self.temperature_unit = (self.velocity_unit**2 * mp *
- mean_molecular_weight_factor / kb)
- self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+ self.temperature_unit = (self.pressure_unit/self.density_unit*mp/kb)
+
def _parse_parameter_file(self):
# hardcoded for now
https://bitbucket.org/yt_analysis/yt/commits/fd65cad798fe/
Changeset: fd65cad798fe
Branch: yt
User: RicardaBeckmann
Date: 2015-08-12 13:48:14+00:00
Summary: fixed ramses units for boxlen not equal to 1
Affected #: 1 file
diff -r 159dbc0fc78f368fd95b901053fd8a06ca9834a7 -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,26 +491,17 @@
"""
Generates the conversion to various physical _units based on the parameter file
"""
- #Please note that for all units given in the info file, the boxlen
- #still needs to be folded in, as shown below!
boxlen=self.parameters['boxlen']
length_unit = self.parameters['unit_l']
density_unit = self.parameters['unit_d']
+ time_unit = self.parameters['unit_t']
- # In the mass unit, the factors of boxlen cancel back out, so this
- #is equivalent to unit_d*unit_l**3
-
- mass_unit = density_unit * length_unit**3
-
- # Cosmological runs are done in lookback conformal time.
- # To convert to proper time, the time unit is calculated from
- # the expansion factor. This is not yet done here!
-
- time_unit = self.parameters['unit_t']
+ mass_unit = density_unit * length_unit**3
magnetic_unit = np.sqrt(4*np.pi * mass_unit /
(time_unit**2 * length_unit))
pressure_unit = density_unit * (length_unit / time_unit)**2
+
# TODO:
# Generalize the temperature field to account for ionization
# For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
@@ -520,11 +511,11 @@
self.magnetic_unit = self.quan(magnetic_unit, "gauss")
self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
self.time_unit = self.quan(time_unit, "s")
- self.length_unit = self.quan(length_unit * boxlen, "cm")
self.mass_unit = self.quan(mass_unit, "g")
self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
- self.temperature_unit = (self.pressure_unit/self.density_unit*mp/kb)
-
+ self.temperature_unit = (self.velocity_unit**2*mp*
+ mean_molecular_weight_factor/kb).in_units('K')
+ self.length_unit = self.quan(length_unit * boxlen, "cm")
def _parse_parameter_file(self):
# hardcoded for now
https://bitbucket.org/yt_analysis/yt/commits/4eaf2e5d154b/
Changeset: 4eaf2e5d154b
Branch: yt
User: RicardaBeckmann
Date: 2015-08-12 13:49:05+00:00
Summary: merged in newest yt
Affected #: 26 files
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -42,7 +42,18 @@
$ pip install mpi4py
-Once that has been installed, you're all done! You just need to launch your
+If you have an Anaconda installation of yt and there is no MPI library on the
+system you are using try:
+
+.. code-block:: bash
+
+ $ conda install mpi4py
+
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+other MPI libraries that are already installed. Therefore, it is preferable to
+use the ``pip`` installation method.
+
+Once mpi4py has been installed, you're all done! You just need to launch your
scripts with ``mpirun`` (or equivalent) and signal to yt that you want to
run them in parallel by invoking the ``yt.enable_parallelism()`` function in
your script. In general, that's all it takes to get a speed benefit on a
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -34,7 +34,7 @@
try:
subprocess.check_call(cmd)
result = True
- except subprocess.CalledProcessError, e:
+ except subprocess.CalledProcessError as e:
print(("Stdout output:\n", e.output))
result = False
assert result
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,6 +11,18 @@
:ref:`sample data for each format <getting-sample-data>` with
:ref:`instructions on how to load and examine each data type <examining-data>`.
+.. raw:: html
+
+ <form action="search.html" method="get" _lpchecked="1">
+ <div class="form-group">
+ <input type="text" name="q" class="form-control" placeholder="Search" style="width: 70%">
+ </div>
+ <input type="hidden" name="check_keywords" value="yes">
+ <input type="hidden" name="area" value="default">
+ </form>
+
+
+
Table of Contents
-----------------
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,12 @@
have the the necessary compilers installed (e.g. the ``build-essentials``
package on debian and ubuntu).
+.. note::
+ See `Parallel Computation
+ <http://yt-project.org/docs/dev/analyzing/parallel_computation.html>`_
+ for a discussion on using yt in parallel.
+
+
.. _branches-of-yt:
Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
@@ -201,7 +207,8 @@
bash Miniconda-3.3.0-Linux-x86_64.sh
-Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
+For both the Anaconda and Miniconda installations, make sure that the Anaconda
+``bin`` directory is in your path, and then issue:
.. code-block:: bash
@@ -209,6 +216,34 @@
which will install yt along with all of its dependencies.
+Obtaining Source Code
+^^^^^^^^^^^^^^^^^^^^^
+
+There are two ways to get the yt source code when using an Anaconda
+installation.
+
+Option 1:
+
+Clone the yt repository with:
+
+.. code-block:: bash
+
+ hg clone https://bitbucket.org/yt_analysis/yt
+
+Once inside the yt directory, update to the appropriate branch and
+run ``setup.py``. For example, the following commands will allow you
+to see the tip of the development branch.
+
+.. code-block:: bash
+
+ hg up yt
+ python setup.py develop
+
+This will make sure you are running a version of yt corresponding to the
+most up-to-date source code.
+
+Option 2:
+
Recipes to build conda packages for yt are available at
https://github.com/conda/conda-recipes. To build the yt conda recipe, first
clone the conda-recipes repository
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -42,9 +42,9 @@
~~~~~~~~~~~~~~~~~~~~~~
The :code:`yt` command-line tool allows you to access some of yt's basic
-funcionality without opening a python interpreter. The tools is a collection of
+functionality without opening a python interpreter. The tools is a collection of
subcommands. These can quickly making plots of slices and projections through a
-dataset, updating yt's codebase, print basic statistics about a dataset, laucnh
+dataset, updating yt's codebase, print basic statistics about a dataset, launch
an IPython notebook session, and more. To get a quick list of what is
available, just type:
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -34,7 +34,7 @@
called on it. ``dir()`` will return the available commands and objects that
can be directly called, and ``dir(something)`` will return information about
all the commands that ``something`` provides. This probably sounds a bit
-opaque, but it will become clearer with time -- it's also probably heldsul to
+opaque, but it will become clearer with time -- it's also probably helpful to
call ``help`` on any or all of the objects we create during this orientation.
To start up Python, at your prompt simply type:
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,7 +114,7 @@
# End snippet
######
-VERSION = "3.3-dev"
+VERSION = "3.3.dev0"
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -669,7 +669,7 @@
registry.add_field(
(ptype, "particle_velocity_cylindrical_radius"),
- function=_particle_velocity_spherical_radius,
+ function=_particle_velocity_cylindrical_radius,
particle_type=True,
units="cm/s",
validators=[ValidateParameter("normal"), ValidateParameter("center")])
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -28,10 +28,7 @@
OctreeSubset
from yt.geometry.oct_container import \
ARTOctreeContainer
-from .fields import \
- ARTFieldInfo
-from yt.utilities.definitions import \
- mpc_conversion
+from .fields import ARTFieldInfo
from yt.utilities.io_handler import \
io_registry
from yt.utilities.lib.misc_utilities import \
@@ -50,14 +47,10 @@
from .io import b2t
from .io import a2b
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
from yt.utilities.io_handler import \
io_registry
from yt.fields.field_info_container import \
FieldInfoContainer, NullFunc
-from yt.utilities.physical_constants import \
- mass_hydrogen_cgs, sec_per_Gyr
class ARTIndex(OctreeIndex):
@@ -300,6 +293,7 @@
self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
self.child_grid_offset = f.tell()
self.parameters.update(amr_header_vals)
+ amr_header_vals = None
# estimate the root level
float_center, fl, iocts, nocts, root_level = _read_art_level_info(
f,
@@ -347,18 +341,18 @@
# setup standard simulation params yt expects to see
self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
- self.omega_lambda = amr_header_vals['Oml0']
- self.omega_matter = amr_header_vals['Om0']
- self.hubble_constant = amr_header_vals['hubble']
- self.min_level = amr_header_vals['min_level']
- self.max_level = amr_header_vals['max_level']
+ self.omega_lambda = self.parameters['Oml0']
+ self.omega_matter = self.parameters['Om0']
+ self.hubble_constant = self.parameters['hubble']
+ self.min_level = self.parameters['min_level']
+ self.max_level = self.parameters['max_level']
if self.limit_level is not None:
self.max_level = min(
- self.limit_level, amr_header_vals['max_level'])
+ self.limit_level, self.parameters['max_level'])
if self.force_max_level is not None:
self.max_level = self.force_max_level
self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
- self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+ self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
self.gamma = self.parameters["gamma"]
mylog.info("Max level is %02i", self.max_level)
@@ -600,7 +594,7 @@
# self.max_level = self.force_max_level
self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
self.parameters['t'] = a2b(self.parameters['aexpn'])
- self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+ self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
self.gamma = self.parameters["gamma"]
mylog.info("Max level is %02i", self.max_level)
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -110,7 +110,7 @@
]
star_struct = [
- ('>d', ('tdum', 'adum')),
+ ('>d', ('t_stars', 'a_stars')),
('>i', 'nstars'),
('>d', ('ws_old', 'ws_oldi')),
('>f', 'particle_mass'),
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -55,7 +55,7 @@
("particle_mass", ("code_mass", [], None)),
("particle_index", ("", [], None)),
("particle_species", ("", ["particle_type"], None)),
- ("particle_creation_time", ("code_time", [], None)),
+ ("particle_creation_time", ("Gyr", [], None)),
("particle_mass_initial", ("code_mass", [], None)),
("particle_metallicity1", ("", [], None)),
("particle_metallicity2", ("", [], None)),
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -31,6 +31,7 @@
from yt.utilities.physical_constants import sec_per_year
from yt.utilities.lib.geometry_utils import compute_morton
from yt.geometry.oct_container import _ORDER_MAX
+from yt.units.yt_array import YTQuantity
class IOHandlerART(BaseIOHandler):
@@ -300,18 +301,19 @@
def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
current_time=None):
if interp_tb is None:
- tdum, adum = read_star_field(file_stars,
- field="tdum")
+ t_stars, a_stars = read_star_field(file_stars,
+ field="t_stars")
# timestamp of file should match amr timestamp
if current_time:
- tdiff = b2t(tdum)-current_time/(sec_per_year*1e9)
- if np.abs(tdiff) < 1e-4:
+ tdiff = YTQuantity(b2t(t_stars), 'Gyr') - current_time.in_units('Gyr')
+ if np.abs(tdiff) > 1e-4:
mylog.info("Timestamp mismatch in star " +
- "particle header")
+ "particle header: %s", tdiff)
mylog.info("Interpolating ages")
interp_tb, interp_ages = b2t(data)
+ interp_tb = YTArray(interp_tb, 'Gyr')
+ interp_ages = YTArray(interp_ages, 'Gyr')
temp = np.interp(data, interp_tb, interp_ages)
- temp *= 1.0e9*sec_per_year
return interp_tb, interp_ages, temp
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -241,19 +241,18 @@
index['particle_r'] = 11
index['particle_mdeut'] = 12
index['particle_n'] = 13
- index['particle_mdot'] = 14,
+ index['particle_mdot'] = 14
index['particle_burnstate'] = 15
- elif len(line.strip().split()) == 18:
+ elif (len(line.strip().split()) == 18 or len(line.strip().split()) == 19):
# these are the newer style, add luminosity as well
index['particle_mlast'] = 10
index['particle_r'] = 11
index['particle_mdeut'] = 12
index['particle_n'] = 13
- index['particle_mdot'] = 14,
- index['particle_burnstate'] = 15,
+ index['particle_mdot'] = 14
+ index['particle_burnstate'] = 15
index['particle_luminosity']= 16
-
else:
# give a warning if none of the above apply:
mylog.warning('Warning - could not figure out particle output file')
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -264,7 +264,11 @@
# or the byte swapped equivalents (65536 and 134217728).
# The int32 following the header (first 4+256 bytes) must equal this
# number.
- (rhead,) = struct.unpack('<I',f.read(4))
+ try:
+ (rhead,) = struct.unpack('<I',f.read(4))
+ except struct.error:
+ f.close()
+ return False, 1
# Use value to check endianess
if rhead == 256:
endianswap = '<'
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/stream/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -0,0 +1,45 @@
+"""
+Tests for loading in-memory datasets
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from yt.testing import assert_raises
+from yt.utilities.answer_testing.framework import data_dir_load
+from yt.utilities.exceptions import YTOutputNotIdentified
+
+class TestEmptyLoad(unittest.TestCase):
+
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.curdir = os.getcwd()
+ os.chdir(self.tmpdir)
+
+ # create 0 byte file
+ open("empty_file", "a")
+
+ # create empty directory
+ os.makedirs("empty_directory")
+
+ def tearDown(self):
+ os.chdir(self.curdir)
+ shutil.rmtree(self.tmpdir)
+
+ def test_load_empty_file(self):
+ assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
+ assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
+ assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -255,7 +255,7 @@
f.seek(0, os.SEEK_SET)
#Read in the header
t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))
- except IOError:
+ except (IOError, struct.error):
return False, 1
endianswap = "<"
#Check Endianness
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -351,7 +351,8 @@
np.int64_t domain_ind
):
- cdef int i, j, k, ii
+ cdef int i, j, k
+ cdef np.uint64_t ii
cdef int ind[3]
cdef np.float64_t rpos[3]
cdef np.float64_t rdds[3][2]
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -446,7 +446,7 @@
@cython.wraparound(False)
@cython.cdivision(True)
cdef int fill_mask_selector(self, np.float64_t left_edge[3],
- np.float64_t right_edge[3],
+ np.float64_t right_edge[3],
np.float64_t dds[3], int dim[3],
np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask,
np.ndarray[np.uint8_t, ndim=3] mask,
@@ -603,9 +603,12 @@
return mask.view("bool")
def __hash__(self):
+ # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
+ # http://www.eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
cdef np.int64_t hash_val = 0
for v in self._hash_vals() + self._base_hash():
- hash_val ^= hash(v)
+ # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
+ hash_val = (hash_val * 16777619) ^ hash(v)
return hash_val
def _hash_vals(self):
@@ -1107,7 +1110,7 @@
def _hash_vals(self):
return (("norm_vec[0]", self.norm_vec[0]),
- ("norm_vec[1]", self.norm_vec[1]),
+ ("norm_vec[1]", self.norm_vec[1]),
("norm_vec[2]", self.norm_vec[2]),
("d", self.d))
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -21,6 +21,7 @@
assert_approx_equal, assert_array_almost_equal_nulp, \
assert_allclose, assert_raises
from nose.tools import assert_true
+import operator
from sympy import Symbol
from yt.testing import fake_random_ds
@@ -30,7 +31,7 @@
# functions
from yt.units.unit_object import get_conversion_factor
# classes
-from yt.units.unit_object import Unit, UnitParseError
+from yt.units.unit_object import Unit, UnitParseError, InvalidUnitOperation
# objects
from yt.units.unit_lookup_table import \
default_unit_symbol_lut, unit_prefixes, prefixable_units
@@ -441,3 +442,10 @@
yield assert_true, u4.is_code_unit
yield assert_true, not u5.is_code_unit
yield assert_true, not u6.is_code_unit
+
+def test_temperature_offsets():
+ u1 = Unit('degC')
+ u2 = Unit('degF')
+
+ assert_raises(InvalidUnitOperation, operator.mul, u1, u2)
+ assert_raises(InvalidUnitOperation, operator.div, u1, u2)
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -1009,7 +1009,18 @@
V = YTQuantity(1.0, "statV")
V_mks = V.to_equivalent("V", "SI")
yield assert_array_almost_equal, V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s
-
+
+def test_ytarray_coercion():
+ a = YTArray([1, 2, 3], 'cm')
+ q = YTQuantity(3, 'cm')
+ na = np.array([1, 2, 3])
+
+ assert_isinstance(a*q, YTArray)
+ assert_isinstance(q*na, YTArray)
+ assert_isinstance(q*3, YTQuantity)
+ assert_isinstance(q*np.float64(3), YTQuantity)
+ assert_isinstance(q*np.array(3), YTQuantity)
+
def test_numpy_wrappers():
a1 = YTArray([1, 2, 3], 'cm')
a2 = YTArray([2, 3, 4, 5, 6], 'cm')
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -290,9 +290,9 @@
base_offset = 0.0
if self.base_offset or u.base_offset:
- if u.dimensions is dims.temperature and self.is_dimensionless:
+ if u.dimensions is temperature and self.is_dimensionless:
base_offset = u.base_offset
- elif self.dimensions is dims.temperature and u.is_dimensionless:
+ elif self.dimensions is temperature and u.is_dimensionless:
base_offset = self.base_offset
else:
raise InvalidUnitOperation("Quantities with units of Farhenheit "
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1085,7 +1085,7 @@
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with size > 1
- return YTArray(np.array(out_arr, unit))
+ return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -388,7 +388,7 @@
--------
>>> co = Cosmology()
- >>> print co.t_from_z(4.e17)
+ >>> print co.z_from_t(4.e17)
"""
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -32,6 +32,9 @@
counters = [Counter(mro) for mro in mros]
+ if len(counters) == 0:
+ return counters
+
count = reduce(lambda x, y: x + y, counters)
return [x for x in count.keys() if count[x] == 1]
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/utilities/quantities.py
--- a/yt/utilities/quantities.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Some old field names.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.units.yt_array import YTArray
-from yt.units.unit_object import Unit
-from yt.utilities.exceptions import YTUnitOperationError
-
-
-class Quantity(YTArray):
- """
- A physical quantity. Attaches units to a scalar.
-
- """
- def __new__(cls, input_array, input_units=None):
- if isinstance(input_array, Quantity):
- return input_array
-
- # Input array is an already formed ndarray instance
- # We first cast to be our class type
- obj = np.asarray(input_array).view(cls)
-
- # Restrict the array to a scalar.
- if obj.size != 1:
- raise ValueError("A Quantity can contain only one element. The "
- "caller provided the array %s with %s elements."
- % (obj, obj.size))
-
- return YTArray.__new__(cls, input_array, input_units)
diff -r fd65cad798fec7b8feefd5b55278dc3d7a21e0a6 -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -290,7 +290,7 @@
self.axes_drawn = True
#=============================================================================
-
+
def axis_box_yt(self, plot, units=None, bare_axes=False,
tickcolor=None, xlabel=None, ylabel=None, **kwargs):
r"""Wrapper around DualEPS.axis_box to automatically fill in the
@@ -355,7 +355,32 @@
_ylabel = 'y (%s)' % (units)
if tickcolor == None:
_tickcolor = pyx.color.cmyk.white
- elif isinstance(plot, (ProfilePlot, PhasePlot)):
+ elif isinstance(plot, ProfilePlot):
+ subplot = plot.axes.values()[0]
+ # limits for axes
+ xlimits = subplot.get_xlim()
+ _xrange = (YTQuantity(xlimits[0], 'm'), YTQuantity(xlimits[1], 'm')) # unit hardcoded but afaik it is not used anywhere so it doesn't matter
+ if list(plot.axes.ylim.viewvalues())[0][0] is None:
+ ylimits = subplot.get_ylim()
+ else:
+ ylimits = list(plot.axes.ylim.viewvalues())[0]
+ _yrange = (YTQuantity(ylimits[0], 'm'), YTQuantity(ylimits[1], 'm')) # unit hardcoded but afaik it is not used anywhere so it doesn't matter
+ # axis labels
+ xaxis = subplot.xaxis
+ _xlabel = pyxize_label(xaxis.label.get_text())
+ yaxis = subplot.yaxis
+ _ylabel = pyxize_label(yaxis.label.get_text())
+ # set log if necessary
+ if subplot.get_xscale() == "log":
+ _xlog = True
+ else:
+ _xlog = False
+ if subplot.get_yscale() == "log":
+ _ylog = True
+ else:
+ _ylog = False
+ _tickcolor = None
+ elif isinstance(plot, PhasePlot):
k = plot.plots.keys()[0]
_xrange = plot[k].axes.get_xlim()
_yrange = plot[k].axes.get_ylim()
@@ -502,10 +527,7 @@
_p1 = plot.plots[self.field].figure
force_square = True
elif isinstance(plot, ProfilePlot):
- plot._redraw_image()
- # Remove colorbar
- _p1 = plot._figure
- _p1.delaxes(_p1.axes[1])
+ _p1 = plot.figures.items()[0][1]
elif isinstance(plot, np.ndarray):
fig = plt.figure()
iplot = plt.figimage(plot)
@@ -689,6 +711,9 @@
>>> d.colorbar_yt(p)
>>> d.save_fig()
"""
+
+ if isinstance(plot, ProfilePlot):
+ raise RuntimeError("When using ProfilePlots you must either set yt_nocbar=True or provide colorbar flags so that the profiles don't have colorbars")
_cmap = None
if field != None:
self.field = plot.data_source._determine_fields(field)[0]
@@ -1108,6 +1133,7 @@
if yaxis_flags[index] != None:
yaxis = yaxis_flags[index]
if _yt:
+ this_plot._setup_plots()
if xlabels != None:
xlabel = xlabels[i]
else:
https://bitbucket.org/yt_analysis/yt/commits/d95710885486/
Changeset: d95710885486
Branch: yt
User: RicardaBeckmann
Date: 2015-08-12 13:53:52+00:00
Summary: added comments to ramses unit calculation
Affected #: 1 file
diff -r 4eaf2e5d154b7231a8965489f1fe76b41327d41c -r d957108854865b34279e44f29c2f50304adfbe74 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,12 +491,13 @@
"""
Generates the conversion to various physical _units based on the parameter file
"""
-
+ # loading the units from the info file
boxlen=self.parameters['boxlen']
length_unit = self.parameters['unit_l']
density_unit = self.parameters['unit_d']
time_unit = self.parameters['unit_t']
+ # calculating derived units (except velocity and temperature, done below)
mass_unit = density_unit * length_unit**3
magnetic_unit = np.sqrt(4*np.pi * mass_unit /
(time_unit**2 * length_unit))
@@ -515,6 +516,8 @@
self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
self.temperature_unit = (self.velocity_unit**2*mp*
mean_molecular_weight_factor/kb).in_units('K')
+
+ # Only the length unit get scales by a factor of boxlen
self.length_unit = self.quan(length_unit * boxlen, "cm")
def _parse_parameter_file(self):
https://bitbucket.org/yt_analysis/yt/commits/e0b16d0403a7/
Changeset: e0b16d0403a7
Branch: yt
User: chummels
Date: 2015-08-20 16:08:57+00:00
Summary: Merged in RicardaBeckmann/yt (pull request #1694)
Fixing units for ramses when boxlen>1.
Affected #: 1 file
diff -r cfc98fddd9d791464330adf166d4229749dab22c -r e0b16d0403a7812a8a302f87327ab0fe5ff07728 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,26 +491,18 @@
"""
Generates the conversion to various physical _units based on the parameter file
"""
- #Please note that for all units given in the info file, the boxlen
- #still needs to be folded in, as shown below!
+ # loading the units from the info file
+ boxlen=self.parameters['boxlen']
+ length_unit = self.parameters['unit_l']
+ density_unit = self.parameters['unit_d']
+ time_unit = self.parameters['unit_t']
- boxlen=self.parameters['boxlen']
- length_unit = self.parameters['unit_l'] * boxlen
- density_unit = self.parameters['unit_d']/ boxlen**3
-
- # In the mass unit, the factors of boxlen cancel back out, so this
- #is equivalent to unit_d*unit_l**3
-
- mass_unit = density_unit * length_unit**3
-
- # Cosmological runs are done in lookback conformal time.
- # To convert to proper time, the time unit is calculated from
- # the expansion factor. This is not yet done here!
-
- time_unit = self.parameters['unit_t']
+ # calculating derived units (except velocity and temperature, done below)
+ mass_unit = density_unit * length_unit**3
magnetic_unit = np.sqrt(4*np.pi * mass_unit /
(time_unit**2 * length_unit))
pressure_unit = density_unit * (length_unit / time_unit)**2
+
# TODO:
# Generalize the temperature field to account for ionization
# For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
@@ -518,13 +510,15 @@
self.density_unit = self.quan(density_unit, 'g/cm**3')
self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+ self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+ self.time_unit = self.quan(time_unit, "s")
+ self.mass_unit = self.quan(mass_unit, "g")
+ self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+ self.temperature_unit = (self.velocity_unit**2*mp*
+ mean_molecular_weight_factor/kb).in_units('K')
+
+ # Only the length unit get scales by a factor of boxlen
self.length_unit = self.quan(length_unit * boxlen, "cm")
- self.mass_unit = self.quan(mass_unit, "g")
- self.time_unit = self.quan(time_unit, "s")
- self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
- self.temperature_unit = (self.velocity_unit**2 * mp *
- mean_molecular_weight_factor / kb)
- self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
def _parse_parameter_file(self):
# hardcoded for now
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list