[yt-svn] commit/yt: 8 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon Oct 5 11:47:24 PDT 2015
8 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/5fdef9e225b4/
Changeset: 5fdef9e225b4
Branch: stable
User: nmearl
Date: 2015-09-03 13:55:28+00:00
Summary: Backporting PR #1668 https://bitbucket.org/yt_analysis/yt/pull-requests/1668
Affected #: 2 files
diff -r 694464438371a38369fefff2e181f3209e08e859 -r 5fdef9e225b4742d060412a46e2546482016d53b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1257,8 +1257,8 @@
.. _specifying-cosmology-tipsy:
-Specifying Tipsy Cosmological Parameters
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Specifying Tipsy Cosmological Parameters and Setting Default Units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Cosmological parameters can be specified to Tipsy to enable computation of
default units. The parameters recognized are of this form:
@@ -1270,5 +1270,27 @@
'omega_matter': 0.272,
'hubble_constant': 0.702}
-These will be used set the units, if they are specified.
+If you wish to set the default units directly, you can do so by using the
+``unit_base`` keyword in the load statement.
+ .. code-block:: python
+
+ import yt
+ ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+
+
+Loading Cosmological Simulations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are not using a parameter file (i.e. non-Gasoline users), then you must
+use keyword ``cosmology_parameters`` when loading your data set to indicate to
+yt that it is a cosmological data set. If you do not wish to set any
+non-default cosmological parameters, you may pass an empty dictionary.
+
+ .. code-block:: python
+
+ import yt
+ ds = yt.load(filename, cosmology_parameters={})
+
+
+
diff -r 694464438371a38369fefff2e181f3209e08e859 -r 5fdef9e225b4742d060412a46e2546482016d53b yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -32,6 +32,7 @@
from yt.utilities.physical_constants import \
G, \
cm_per_kpc
+from yt import YTQuantity
from .fields import \
TipsyFieldInfo
@@ -167,9 +168,9 @@
self.domain_dimensions = np.ones(3, "int32") * nz
periodic = self.parameters.get('bPeriodic', True)
period = self.parameters.get('dPeriod', None)
- comoving = self.parameters.get('bComove', False)
self.periodicity = (periodic, periodic, periodic)
- if comoving and period is None:
+ self.comoving = self.parameters.get('bComove', False)
+ if self.comoving and period is None:
period = 1.0
if self.bounding_box is None:
if periodic and period is not None:
@@ -186,7 +187,9 @@
self.domain_left_edge = bbox[:,0]
self.domain_right_edge = bbox[:,1]
- if comoving:
+ # If the cosmology parameters dictionary got set when data is
+ # loaded, we can assume it's a cosmological data set
+ if self.comoving or self._cosmology_parameters is not None:
cosm = self._cosmology_parameters or {}
self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
self.cosmological_simulation = 1
@@ -224,8 +227,15 @@
self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
self.mass_unit = self.quan(mu, 'Msun')
density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
- # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
- self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)
+
+ # If self.comoving is set, we know this is a gasoline data set,
+ # and we do the conversion on the hubble constant.
+ if self.comoving:
+ # Gasoline's hubble constant, dHubble0, is stored units of
+ # proper code time.
+ self.hubble_constant *= np.sqrt(G.in_units(
+ 'kpc**3*Msun**-1*s**-2') * density_unit).value / (
+ 3.2407793e-18)
cosmo = Cosmology(self.hubble_constant,
self.omega_matter, self.omega_lambda)
self.current_time = cosmo.hubble_time(self.current_redshift)
@@ -237,6 +247,24 @@
density_unit = self.mass_unit / self.length_unit**3
self.time_unit = 1.0 / np.sqrt(G * density_unit)
+ # If unit base is defined by the user, override all relevant units
+ if self._unit_base is not None:
+ length = self._unit_base.get('length', self.length_unit)
+ length = self.quan(*length) if isinstance(length, tuple) else self.quan(length)
+ self.length_unit = length
+
+ mass = self._unit_base.get('mass', self.mass_unit)
+ mass = self.quan(*mass) if isinstance(mass, tuple) else self.quan(mass)
+ self.mass_unit = mass
+
+ density_unit = self.mass_unit / self.length_unit**3
+ self.time_unit = 1.0 / np.sqrt(G * density_unit)
+
+ time = self._unit_base.get('time', self.time_unit)
+ time = self.quan(*time) if isinstance(time, tuple) else self.quan(time)
+ self.time_unit = time
+
+
@staticmethod
def _validate_header(filename):
'''
https://bitbucket.org/yt_analysis/yt/commits/47a5dfb9c1e4/
Changeset: 47a5dfb9c1e4
Branch: stable
User: ngoldbaum
Date: 2015-09-11 02:15:27+00:00
Summary: Backporting PR #1742 https://bitbucket.org/yt_analysis/yt/pull-requests/1742
Affected #: 1 file
diff -r 5fdef9e225b4742d060412a46e2546482016d53b -r 47a5dfb9c1e4cdf9e84a4b2d7fd6812873863e07 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -84,6 +84,9 @@
* :func:`~yt.testing.assert_equal` can operate on arrays.
* :func:`~yt.testing.assert_almost_equal` can operate on arrays and accepts a
relative allowable difference.
+* :func:`~yt.testing.assert_allclose_units` raises an error if two arrays are
+ not equal up to a desired absolute or relative tolerance. This wraps numpy's
+ assert_allclose to correctly verify unit consistency as well.
* :func:`~yt.testing.amrspace` provides the ability to create AMR grid
structures.
* :func:`~yt.testing.expand_keywords` provides the ability to iterate over
@@ -99,9 +102,10 @@
#. Inside that directory, create a new python file prefixed with ``test_`` and
including the name of the functionality.
#. Inside that file, create one or more routines prefixed with ``test_`` that
- accept no arguments. These should ``yield`` a set of values of the form
- ``function``, ``arguments``. For example ``yield assert_equal, 1.0, 1.0``
- would evaluate that 1.0 equaled 1.0.
+ accept no arguments. These should ``yield`` a tuple of the form
+ ``function``, ``argument_one``, ``argument_two``, etc. For example
+ ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
+ asserts that 1.0 is equal to 1.0.
#. Use ``fake_random_ds`` to test on datasets, and be sure to test for
several combinations of ``nproc``, so that domain decomposition can be
tested as well.
@@ -113,6 +117,53 @@
``yt/data_objects/tests/test_covering_grid.py``, which covers a great deal of
functionality.
+Debugging failing tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When writing new tests, often one exposes bugs or writes a test incorrectly,
+causing an exception to be raised or a failed test. To help debug issues like
+this, ``nose`` can drop into a debugger whenever a test fails or raises an
+exception. This can be accomplished by passing ``--pdb`` and ``--pdb-failures``
+to the ``nosetests`` executable. These options will drop into the pdb debugger
+whenever an error is raised or a failure happens, respectively. Inside the
+debugger you can interactively print out variables and go up and down the call
+stack to determine the context for your failure or error.
+
+.. code-block:: bash
+
+ nosetests --pdb --pdb-failures
+
+In addition, one can debug more crudely using print statements. To do this,
+you can add print statements to the code as normal. However, the test runner
+will capture all print output by default. To ensure that output gets printed
+to your terminal while the tests are running, pass ``-s`` to the ``nosetests``
+executable.
+
+Lastly, to quickly debug a specific failing test, it is best to only run that
+one test during your testing session. This can be accomplished by explicitly
+passing the name of the test function or class to ``nosetests``, as in the
+following example:
+
+.. code-block:: bash
+
+ $ nosetests yt.visualization.tests.test_plotwindow:TestSetWidth
+
+This nosetests invocation will only run the tests defined by the
+``TestSetWidth`` class.
+
+Finally, to determine which test is failing while the tests are running, it helps
+to run the tests in "verbose" mode. This can be done by passing the ``-v`` option
+to the ``nosetests`` executable.
+
+All of the above ``nosetests`` options can be combined. So, for example to run
+the ``TestSetWidth`` tests with verbose output, letting the output of print
+statements come out on the terminal prompt, and enabling pdb debugging on errors
+or test failures, one would do:
+
+.. code-block:: bash
+
+ $ nosetests --pdb --pdb-failures -v -s yt.visualization.tests.test_plotwindow:TestSetWidth
+
.. _answer_testing:
Answer Testing
@@ -122,8 +173,8 @@
^^^^^^^^^^^^^^^^^^^^^^^
Answer tests test **actual data**, and many operations on that data, to make
-sure that answers don't drift over time. This is how we will be testing
-frontends, as opposed to operations, in yt.
+sure that answers don't drift over time. This is how we test frontends, as
+opposed to operations, in yt.
.. _run_answer_testing:
@@ -133,20 +184,104 @@
The very first step is to make a directory and copy over the data against which
you want to test. Currently, we test:
+NMSU ART
+~~~~~~~~
+
+* ``D9p_500/10MpcBox_HartGal_csf_a0.500.d``
+
+ARTIO
+~~~~~
+
+* ``sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art``
+
+Athena
+~~~~~~
+
+* ``ShockCloud/id0/Cloud.0050.vtk``
+* ``MHDBlast/id0/Blast.0100.vtk``
+* ``RamPressureStripping/id0/rps.0062.vtk``
+* ``MHDSloshing/virgo_low_res.0054.vtk``
+
+Boxlib
+~~~~~~
+
+* ``RadAdvect/plt00000``
+* ``RadTube/plt00500``
+* ``StarParticles/plrd01000``
+
+Chombo
+~~~~~~
+
+* ``TurbBoxLowRes/data.0005.3d.hdf5``
+* ``GaussianCloud/data.0077.3d.hdf5``
+* ``IsothermalSphere/data.0000.3d.hdf5``
+* ``ZeldovichPancake/plt32.2d.hdf5``
+* ``KelvinHelmholtz/data.0004.hdf5``
+
+Enzo
+~~~~
+
* ``DD0010/moving7_0010`` (available in ``tests/`` in the yt distribution)
* ``IsolatedGalaxy/galaxy0030/galaxy0030``
+* ``enzo_tiny_cosmology/DD0046/DD0046``
+* ``enzo_cosmology_pluts/DD0046/DD0046``
+
+FITS
+~~~~
+
+* ``radio_fits/grs-50-cube.fits``
+* ``UnigridData/velocity_field_20.fits``
+
+FLASH
+~~~~~
+
* ``WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030``
* ``GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300``
-* ``TurbBoxLowRes/data.0005.3d.hdf5``
-* ``GaussianCloud/data.0077.3d.hdf5``
-* ``RadAdvect/plt00000``
-* ``RadTube/plt00500``
+
+Gadget
+~~~~~~
+
+* ``IsothermalCollapse/snap_505``
+* ``IsothermalCollapse/snap_505.hdf5``
+* ``GadgetDiskGalaxy/snapshot_200.hdf5``
+
+Halo Catalog
+~~~~~~~~~~~~
+
+* ``owls_fof_halos/groups_001/group_001.0.hdf5``
+* ``owls_fof_halos/groups_008/group_008.0.hdf5``
+* ``gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5``
+* ``gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5``
+* ``rockstar_halos/halos_0.0.bin``
+
+MOAB
+~~~~
+
+* ``c5/c5.h5m``
+
+
+RAMSES
+~~~~~~
+
+* ``output_00080/info_00080.txt``
+
+Tipsy
+~~~~~
+
+* ``halo1e11_run1.00400/halo1e11_run1.00400``
+* ``agora_1e11.00400/agora_1e11.00400``
+* ``TipsyGalaxy/galaxy.00300``
+
+OWLS
+~~~~
+
+* ``snapshot_033/snap_033.0.hdf5``
These datasets are available at http://yt-project.org/data/.
Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
with the parameter ``test_data_dir``. Set this to point to the
-directory with the test data you want to compare. Here is an example
+directory with the test data you want to test with. Here is an example
config file:
.. code-block:: none
@@ -154,47 +289,45 @@
[yt]
test_data_dir = /Users/tomservo/src/yt-data
-More data will be added over time. To run the tests, you can import the yt
-module and invoke ``yt.run_nose()`` with a new keyword argument:
+More data will be added over time. To run the answer tests, you must first
+generate a set of test answers locally on a "known good" revision, then update
+to the revision you want to test, and run the tests again using the locally
+stored answers.
-.. code-block:: python
-
- import yt
- yt.run_nose(run_answer_tests=True)
-
-If you have installed yt using ``python setup.py develop`` you can also
-optionally invoke nose using the ``nosetests`` command line interface:
+Let's focus on running the answer tests for a single frontend. It's possible to
+run the answer tests for **all** the frontends, but due to the large number of
+test datasets we currently use this is not normally done except on the yt
+project's contiguous integration server.
.. code-block:: bash
$ cd $YT_HG
- $ nosetests --with-answer-testing
+ $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
-In either case, the current gold standard results will be downloaded from the
-rackspace cloud and compared to what is generated locally. The results from a
-nose testing session are pretty straightforward to understand, the results for
-each test are printed directly to STDOUT. If a test passes, nose prints a
-period, F if a test fails, and E if the test encounters an exception or errors
-out for some reason. If you want to also run tests for the 'big' datasets,
-then you can use the ``answer_big_data`` keyword argument:
-
-.. code-block:: python
-
- import yt
- yt.run_nose(run_answer_tests=True, answer_big_data=True)
-
-or, in the base directory of the yt mercurial repository:
+This command will create a set of local answers from the tipsy frontend tests
+and store them in ``$HOME/Documents/test`` (this can but does not have to be the
+same directory as the ``test_data_dir`` configuration variable defined in your
+``.yt/config`` file). To run the tipsy frontend's answer tests using a different
+yt changeset, update to that changeset, recompile if necessary, and run the
+tests using the following command:
.. code-block:: bash
- $ nosetests --with-answer-testing --answer-big-data
+ $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
-It's also possible to only run the answer tests for one frontend. For example,
-to run only the enzo answers tests, one can do,
+The results from a nose testing session are pretty straightforward to
+understand, the results for each test are printed directly to STDOUT. If a test
+passes, nose prints a period, F if a test fails, and E if the test encounters an
+exception or errors out for some reason. Explicit descriptions for each test
+are also printed if you pass ``-v`` to the ``nosetests`` executable. If you
+want to also run tests for the 'big' datasets, then you will need to pass
+``--answer-big-data`` to ``nosetests``. For example, to run the tests for the
+OWLS frontend, do the following:
.. code-block:: bash
- $ nosetests --with-answer-testing yt.frontends.enzo
+ $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+
How to Write Answer Tests
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,38 +393,21 @@
directory.
* Create a new routine that operates similarly to the routines you can see
- in Enzo's outputs.
+ in Enzo's output tests.
* This routine should test a number of different fields and data objects.
* The test routine itself should be decorated with
- ``@requires_ds(file_name)`` This decorate can accept the argument
- ``big_data`` for if this data is too big to run all the time.
+ ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
+ argument ``big_data=True`` if the test is expensive.
- * There are ``small_patch_amr`` and ``big_patch_amr`` routines that
- you can yield from to execute a bunch of standard tests. This is where
- you should start, and then yield additional tests that stress the
- outputs in whatever ways are necessary to ensure functionality.
+ * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
+ yield from to execute a bunch of standard tests. In addition we have created
+ ``sph_answer`` which is more suited for particle SPH datasets. This is where
+ you should start, and then yield additional tests that stress the outputs in
+ whatever ways are necessary to ensure functionality.
* **All tests should be yielded!**
If you are adding to a frontend that has a few tests already, skip the first
two steps.
-
-How to Upload Answers
-^^^^^^^^^^^^^^^^^^^^^
-
-To upload answers you can execute this command:
-
-.. code-block:: bash
-
- $ nosetests --with-answer-testing frontends/enzo/ --answer-store --answer-name=whatever
-
-The current version of the gold standard can be found in the variable
-``_latest`` inside ``yt/utilities/answer_testing/framework.py`` As of
-the time of this writing, it is ``gold007`` Note that the name of the
-suite of results is now disconnected from the dataset's name, so you
-can upload multiple outputs with the same name and not collide.
-
-To upload answers, you **must** have the package boto installed, and you
-**must** have an Amazon key provided by Matt. Contact Matt for these keys.
https://bitbucket.org/yt_analysis/yt/commits/616cdbad53a4/
Changeset: 616cdbad53a4
Branch: stable
User: ngoldbaum
Date: 2015-09-10 15:49:48+00:00
Summary: Fixing a typo in the particle filter docs
Affected #: 1 file
diff -r 47a5dfb9c1e4cdf9e84a4b2d7fd6812873863e07 -r 616cdbad53a4fdca65d930738aa35cddb2617eef doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -111,7 +111,7 @@
.. code-block:: python
- @yt.particle_filter(requires=["particle_type], filtered_type='all')
+ @yt.particle_filter(requires=["particle_type"], filtered_type='all')
def stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 2
return filter
https://bitbucket.org/yt_analysis/yt/commits/9334f87a59b8/
Changeset: 9334f87a59b8
Branch: stable
User: ngoldbaum
Date: 2015-09-15 20:49:59+00:00
Summary: [testing] Avoid test failure if test_data_dir isn't configured
Affected #: 1 file
diff -r 616cdbad53a4fdca65d930738aa35cddb2617eef -r 9334f87a59b88662b9523d93ad0a4cf1df4be567 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,7 +19,7 @@
import unittest
from yt.testing import assert_raises
-from yt.utilities.answer_testing.framework import data_dir_load
+from yt.convenience import load
from yt.utilities.exceptions import YTOutputNotIdentified
class TestEmptyLoad(unittest.TestCase):
@@ -40,6 +40,6 @@
shutil.rmtree(self.tmpdir)
def test_load_empty_file(self):
- assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
- assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
- assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
+ assert_raises(YTOutputNotIdentified, load, "not_a_file")
+ assert_raises(YTOutputNotIdentified, load, "empty_file")
+ assert_raises(YTOutputNotIdentified, load, "empty_directory")
https://bitbucket.org/yt_analysis/yt/commits/219bd0434020/
Changeset: 219bd0434020
Branch: stable
User: MatthewTurk
Date: 2015-09-16 19:19:11+00:00
Summary: If the parent has not had its dx set up yet, set it up here.
Affected #: 1 file
diff -r 9334f87a59b88662b9523d93ad0a4cf1df4be567 -r 219bd0434020864f2dc117803e1a445b85fc2185 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -136,6 +136,8 @@
# that dx=dy=dz, at least here. We probably do elsewhere.
id = self.id - self._id_offset
if self.Parent is not None:
+ if not hasattr(self.Parent, 'dds'):
+ self.Parent._setup_dx()
self.dds = self.Parent.dds.ndarray_view() / self.ds.refine_by
else:
LE, RE = self.index.grid_left_edge[id,:], \
https://bitbucket.org/yt_analysis/yt/commits/4af9f8a26a3f/
Changeset: 4af9f8a26a3f
Branch: stable
User: ngoldbaum
Date: 2015-09-17 18:46:56+00:00
Summary: Ensure plots are valid after invalidating the figure
Affected #: 1 file
diff -r 219bd0434020864f2dc117803e1a445b85fc2185 -r 4af9f8a26a3fb5effcc4cb7a3721951e736934e6 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -52,6 +52,7 @@
args[0].plots[field].figure = None
args[0].plots[field].axes = None
args[0].plots[field].cax = None
+ args[0]._setup_plots()
return rv
return newfunc
https://bitbucket.org/yt_analysis/yt/commits/2c04b44a4371/
Changeset: 2c04b44a4371
Branch: stable
User: ngoldbaum
Date: 2015-09-18 19:28:46+00:00
Summary: Ensure ARTIOIndex.get_smallest_dx() returns a quantity with units
Affected #: 1 file
diff -r 4af9f8a26a3fb5effcc4cb7a3721951e736934e6 -r 2c04b44a4371302581c47edb79e294614e806172 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -176,7 +176,8 @@
"""
Returns (in code units) the smallest cell size in the simulation.
"""
- return 1.0/(2**self.max_level)
+ return (self.dataset.domain_width /
+ (self.dataset.domain_dimensions * 2**(self.max_level))).min()
def convert(self, unit):
return self.dataset.conversion_factors[unit]
https://bitbucket.org/yt_analysis/yt/commits/96b3f211c60d/
Changeset: 96b3f211c60d
Branch: stable
User: atmyers
Date: 2015-09-18 21:14:20+00:00
Summary: removing some dead code from the Chombo frontend
Affected #: 1 file
diff -r 2c04b44a4371302581c47edb79e294614e806172 -r 96b3f211c60dc6fb4405080e9470231d4bcb8fcb yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -185,27 +185,10 @@
for alias in aliases:
self.alias((ptype, alias), (ptype, f), units = output_units)
- # We'll either have particle_position or particle_position_[xyz]
- if (ptype, "particle_position") in self.field_list or \
- (ptype, "particle_position") in self.field_aliases:
- particle_scalar_functions(ptype,
- "particle_position", "particle_velocity",
- self)
- else:
- # We need to check to make sure that there's a "known field" that
- # overlaps with one of the vector fields. For instance, if we are
- # in the Stream frontend, and we have a set of scalar position
- # fields, they will overlap with -- and be overridden by -- the
- # "known" vector field that the frontend creates. So the easiest
- # thing to do is to simply remove the on-disk field (which doesn't
- # exist) and replace it with a derived field.
- if (ptype, "particle_position") in self and \
- self[ptype, "particle_position"]._function == NullFunc:
- self.pop((ptype, "particle_position"))
- particle_vector_functions(ptype,
- ["particle_position_%s" % ax for ax in 'xyz'],
- ["particle_velocity_%s" % ax for ax in 'xyz'],
- self)
+ ppos_fields = ["particle_position_%s" % ax for ax in 'xyz']
+ pvel_fields = ["particle_velocity_%s" % ax for ax in 'xyz']
+ particle_vector_functions(ptype, ppos_fields, pvel_fields, self)
+
particle_deposition_functions(ptype, "particle_position",
"particle_mass", self)
standard_particle_fields(self, ptype)
@@ -219,7 +202,7 @@
self.add_output_field(field,
units = self.ds.field_units.get(field, ""),
particle_type = True)
- self.setup_smoothed_fields(ptype,
+ self.setup_smoothed_fields(ptype,
num_neighbors=num_neighbors,
ftype=ftype)
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list