[yt-svn] commit/yt: 14 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu May 8 16:56:19 PDT 2014
14 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/4e1b073f6e17/
Changeset: 4e1b073f6e17
Branch: yt-3.0
User: hegan
Date: 2014-05-07 19:18:18
Summary: Fixed bug in printing angular momentum vector in cookbook recipe
Affected #: 1 file
diff -r 2baafb904700f2eaccb6af2f6d545db6c2a286ca -r 4e1b073f6e17b5a8999cc2f6c2c548d1ca6629ff doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -11,7 +11,7 @@
# Get the angular momentum vector for the sphere.
L = sp.quantities.angular_momentum_vector()
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
# Create an OffAxisSlicePlot on the object with the L vector as its normal
p = OffAxisSlicePlot(pf, L, "density", sp.center, (25, "kpc"))
https://bitbucket.org/yt_analysis/yt/commits/ddd5a64d63b7/
Changeset: ddd5a64d63b7
Branch: yt-3.0
User: hegan
Date: 2014-05-07 19:43:15
Summary: Fixed units for disk creation
Affected #: 1 file
diff -r 4e1b073f6e17b5a8999cc2f6c2c548d1ca6629ff -r ddd5a64d63b71ad2d2469851fc4dc03cdfa0d02e doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -14,7 +14,8 @@
# thing! This is a convenience parameter that prepares an object that covers
# the whole domain. Note, though, that it will load on demand and not before!
data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
- 8./pf.units['kpc'], 1./pf.units['kpc'])
+ pf.quan(8,'code_length'),
+ pf.quan(1., 'code_length'))
# Now we set some sane min/max values between which we want to find contours.
# This is how we tell the clump finder what to look for -- it won't look for
https://bitbucket.org/yt_analysis/yt/commits/9d14420edf23/
Changeset: 9d14420edf23
Branch: yt-3.0
User: hegan
Date: 2014-05-07 21:19:10
Summary: Removed because this is outdated/
Affected #: 1 file
diff -r ddd5a64d63b71ad2d2469851fc4dc03cdfa0d02e -r 9d14420edf2328907b39ad6b1d2029e463ac7d65 doc/source/cookbook/halo_finding.py
--- a/doc/source/cookbook/halo_finding.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-This script shows the simplest way of getting halo information. For more
-information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
-
-pf = load("Enzo_64/DD0043/data0043")
-
-halos = HaloFinder(pf)
-halos.write_out("%s_halos.txt" % pf)
https://bitbucket.org/yt_analysis/yt/commits/7d3caaedd6db/
Changeset: 7d3caaedd6db
Branch: yt-3.0
User: hegan
Date: 2014-05-07 21:21:35
Summary: Removed because this is outdated/
Affected #: 1 file
diff -r 9d14420edf2328907b39ad6b1d2029e463ac7d65 -r 7d3caaedd6dbe25a23a16c4ecad577209366d1f7 doc/source/cookbook/halo_mass_info.py
--- a/doc/source/cookbook/halo_mass_info.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Title: Halo Mass Info
-Description: This recipe finds halos and then prints out information about
- them. Note that this recipe will take advantage of multiple CPUs
- if executed with mpirun and supplied the --parallel command line
- argument.
-Outputs: [RedshiftOutput0006_halo_info.txt]
-"""
-from yt.mods import *
-
-fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
-pf = load(fn) # load data
-
-# First we run our halo finder to identify all the halos in the dataset. This
-# can take arguments, but the default are pretty sane.
-halos = HaloFinder(pf)
-
-f = open("%s_halo_info.txt" % pf, "w")
-
-# Now, for every halo, we get the baryon data and examine it.
-for halo in halos:
- # The halo has a property called 'get_sphere' that obtains a sphere
- # centered on the point of maximum density (or the center of mass, if that
- # argument is supplied) and with the radius the maximum particle radius of
- # that halo.
- sphere = halo.get_sphere()
- # We use the quantities[] method to get the total mass in baryons and in
- # particles.
- baryon_mass, particle_mass = sphere.quantities["TotalQuantity"](
- ["cell_mass", "particle_mass"])
- # Now we print out this information, along with the ID.
- f.write("Total mass in HOP group %s is %0.5e (gas = %0.5e / particles = %0.5e)\n" % \
- (halo.id, baryon_mass + particle_mass, baryon_mass, particle_mass))
-f.close()
https://bitbucket.org/yt_analysis/yt/commits/f046313464bb/
Changeset: f046313464bb
Branch: yt-3.0
User: hegan
Date: 2014-05-07 21:27:21
Summary: Updated to reflect new halo annotating
Affected #: 1 file
diff -r 7d3caaedd6dbe25a23a16c4ecad577209366d1f7 -r f046313464bb6cce9921fccf1bc7706ce6841298 doc/source/cookbook/halo_particle_plotting.py
--- a/doc/source/cookbook/halo_particle_plotting.py
+++ b/doc/source/cookbook/halo_particle_plotting.py
@@ -4,11 +4,13 @@
"""
from yt.mods import * # set up our namespace
-pf = load("Enzo_64/DD0043/data0043")
+data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
-halos = HaloFinder(pf)
+halo_pf = load('rockstar_halos/halos_0.0.bin')
+
+hc - HaloCatalog(halos_pf = halo_pf)
+hc.load()
p = ProjectionPlot(pf, "x", "density")
-p.annotate_hop_circles(halos)
-p.annotate_hop_particles(halos, max_number=100)
+p.annotate_halos(hc)
p.save()
https://bitbucket.org/yt_analysis/yt/commits/bb9ba48d89dc/
Changeset: bb9ba48d89dc
Branch: yt-3.0
User: hegan
Date: 2014-05-07 21:29:26
Summary: Updated to reflect new halo annotating
Affected #: 1 file
diff -r f046313464bb6cce9921fccf1bc7706ce6841298 -r bb9ba48d89dca4bf6e170e91d9f71be432ff90be doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -4,10 +4,13 @@
"""
from yt.mods import * # set up our namespace
-pf = load("Enzo_64/DD0043/data0043")
+data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
-halos = HaloFinder(pf)
+halo_pf = load('rockstar_halos/halos_0.0.bin')
-p = ProjectionPlot(pf, "z", "density")
-p.annotate_hop_circles(halos)
+hc - HaloCatalog(halos_pf = halo_pf)
+hc.load()
+
+p = ProjectionPlot(pf, "x", "density")
+p.annotate_halos(hc)
p.save()
https://bitbucket.org/yt_analysis/yt/commits/4fd1e2a98dad/
Changeset: 4fd1e2a98dad
Branch: yt-3.0
User: hegan
Date: 2014-05-07 21:29:45
Summary: These arent two separate methods anymore
Affected #: 1 file
diff -r bb9ba48d89dca4bf6e170e91d9f71be432ff90be -r 4fd1e2a98dad5dfc1e5c66124831d4f1a9380a9c doc/source/cookbook/halo_particle_plotting.py
--- a/doc/source/cookbook/halo_particle_plotting.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""
-This is a simple mechanism for overplotting the particles belonging only to
-halos. For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
-
-data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
-
-halo_pf = load('rockstar_halos/halos_0.0.bin')
-
-hc - HaloCatalog(halos_pf = halo_pf)
-hc.load()
-
-p = ProjectionPlot(pf, "x", "density")
-p.annotate_halos(hc)
-p.save()
https://bitbucket.org/yt_analysis/yt/commits/9aa1e590554e/
Changeset: 9aa1e590554e
Branch: yt-3.0
User: hegan
Date: 2014-05-07 22:32:14
Summary: typo
Affected #: 1 file
diff -r 4fd1e2a98dad5dfc1e5c66124831d4f1a9380a9c -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -27,7 +27,7 @@
FieldName)``.
* Previously, yt would use "Enzo-isms" for field names. We now very
specifically define fields as lowercase with underscores. For instance,
- what used to be ``VelocityMagnitude`` would not be ``velocity_magnitude``.
+ what used to be ``VelocityMagnitude`` would now be ``velocity_magnitude``.
* Particles are either named by their type or default to the type ``io``.
* Axis names are now at the *end* of field names, not the beginning.
``x-velocity`` is now ``velocity_x``.
https://bitbucket.org/yt_analysis/yt/commits/256f8dad8684/
Changeset: 256f8dad8684
Branch: yt-3.0
User: hegan
Date: 2014-05-08 18:03:50
Summary: merged in yt-3.0
Affected #: 108 files
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -15,11 +15,12 @@
details on the relative differences between these halo finders see
:ref:`halo_finding`.
-.. code-block::
- from yt.mods import *
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
- data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+.. code-block:: python
+
+ from yt.mods import *
+ from yt.analysis_modules.halo_analysis.api import HaloCatalog
+ data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+ hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
A halo catalog may also be created from already run rockstar outputs.
This method is not implemented for previously run friends-of-friends or
@@ -28,9 +29,10 @@
only specify the file output by the processor with ID 0. Note that the
argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
-.. code-block::
- halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
- hc = HaloCatalog(halos_pf=halos_pf)
+.. code-block:: python
+
+ halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+ hc = HaloCatalog(halos_pf=halos_pf)
Although supplying only the binary output of the rockstar halo finder
is sufficient for creating a halo catalog, it is not possible to find
@@ -38,10 +40,11 @@
with the dataset from which they were found, supply arguments to both
halos_pf and data_pf.
-.. code-block::
- halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
- data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+.. code-block:: python
+
+ halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+ data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+ hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
A data container can also be supplied via keyword data_source,
associated with either dataset, to control the spatial region in
@@ -72,9 +75,9 @@
An example of adding a filter:
-.. code-block::
+.. code-block:: python
- hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+ hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
Currently quantity_value is the only available filter, but more can be
added by the user by defining a function that accepts a halo object as
@@ -85,20 +88,21 @@
An example of defining your own filter:
-.. code-block::
- def my_filter_function(halo):
-
- # Define condition for filter
- filter_value = True
-
- # Return a boolean value
- return filter_value
+.. code-block:: python
- # Add your filter to the filter registry
- add_filter("my_filter", my_filter_function)
+ def my_filter_function(halo):
+
+ # Define condition for filter
+ filter_value = True
+
+ # Return a boolean value
+ return filter_value
- # ... Later on in your script
- hc.add_filter("my_filter")
+ # Add your filter to the filter registry
+ add_filter("my_filter", my_filter_function)
+
+ # ... Later on in your script
+ hc.add_filter("my_filter")
Quantities
----------
@@ -118,25 +122,26 @@
An example of adding a quantity:
-.. code-block::
- hc.add_quantity('center_of_mass')
+.. code-block:: python
+
+ hc.add_quantity('center_of_mass')
An example of defining your own quantity:
-.. code-block::
+.. code-block:: python
- def my_quantity_function(halo):
- # Define quantity to return
- quantity = 5
-
- return quantity
+ def my_quantity_function(halo):
+ # Define quantity to return
+ quantity = 5
+
+ return quantity
- # Add your filter to the filter registry
- add_quantity('my_quantity', my_quantity_function)
+ # Add your filter to the filter registry
+ add_quantity('my_quantity', my_quantity_function)
- # ... Later on in your script
- hc.add_quantity("my_quantity")
+ # ... Later on in your script
+ hc.add_quantity("my_quantity")
Callbacks
---------
@@ -150,8 +155,9 @@
An example of using a pre-defined callback where we create a sphere for
each halo with a radius that is twice the saved “radius”.
-.. code-block::
- hc.add_callback("sphere", factor=2.0)
+.. code-block:: python
+
+ hc.add_callback("sphere", factor=2.0)
Currently available callbacks are located in
yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may
@@ -161,19 +167,19 @@
An example of defining your own callback:
-.. code-block::
+.. code-block:: python
- def my_callback_function(halo):
- # Perform some callback actions here
- x = 2
- halo.x_val = x
+ def my_callback_function(halo):
+ # Perform some callback actions here
+ x = 2
+ halo.x_val = x
- # Add the callback to the callback registry
- add_callback('my_callback', my_callback_function)
+ # Add the callback to the callback registry
+ add_callback('my_callback', my_callback_function)
- # ... Later on in your script
- hc.add_callback("my_callback")
+ # ... Later on in your script
+ hc.add_callback("my_callback")
Running Analysis
================
@@ -181,8 +187,9 @@
After all callbacks, quantities, and filters have been added, the
analysis begins with a call to HaloCatalog.create.
-.. code-block::
- hc.create()
+.. code-block:: python
+
+ hc.create()
The save_halos keyword determines whether the actual Halo objects
are saved after analysis on them has completed or whether just the
@@ -206,13 +213,14 @@
standard call to load. Any side data, such as profiles, can be reloaded
with a load_profiles callback and a call to HaloCatalog.load.
-.. code-block::
- hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
- hc = HaloCatalog(halos_pf=hpf,
- output_dir="halo_catalogs/catalog_0046")
- hc.add_callback("load_profiles", output_dir="profiles",
- filename="virial_profiles")
- hc.load()
+.. code-block:: python
+
+ hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+ hc = HaloCatalog(halos_pf=hpf,
+ output_dir="halo_catalogs/catalog_0046")
+ hc.add_callback("load_profiles", output_dir="profiles",
+ filename="virial_profiles")
+ hc.load()
Summary
=======
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
{
"metadata": {
"name": "",
- "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
+ "signature": "sha256:b7541e0167001c6dd74306c8490385ace7bdb0533a829286f0505c0b24c67f16"
},
"nbformat": 3,
"nbformat_minor": 0,
@@ -296,6 +296,166 @@
"language": "python",
"metadata": {},
"outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 3,
+ "metadata": {},
+ "source": [
+ "Round-Trip Conversions to and from AstroPy's Units System"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Some examples of converting from AstroPy units to yt:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from astropy import units as u\n",
+ "x = 42.0 * u.meter\n",
+ "y = YTQuantity(x)\n",
+ "y2 = YTQuantity.from_astropy(x) # Another way to create the quantity"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print x, type(x)\n",
+ "print y, type(y)\n",
+ "print y2, type(y2)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "a = np.random.random(size=10) * u.km/u.s\n",
+ "b = YTArray(a)\n",
+ "b2 = YTArray.from_astropy(a) # Another way to create the quantity"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print a, type(a)\n",
+ "print b, type(b)\n",
+ "print b2, type(b2)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "temp = dd[\"temperature\"]\n",
+ "atemp = temp.to_astropy()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print temp, type(temp)\n",
+ "print atemp, type(atemp)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "and quantities:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from yt.utilities.physical_constants import kboltz\n",
+ "kb = kboltz.to_astropy()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "print kboltz, type(kboltz)\n",
+ "print kb, type(kb)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As a sanity check, you can show that it works round-trip:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "k1 = kboltz.to_astropy()\n",
+ "k2 = YTQuantity(kb)\n",
+ "print k1 == k2"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "c = YTArray(a)\n",
+ "d = c.to_astropy()\n",
+ "print a == d"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
}
],
"metadata": {}
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -1,12 +1,12 @@
-from yt.mods import *
+import yt
# Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
# Create a 1 kpc radius sphere, centered on the max density. Note that this
# sphere is very small compared to the size of our final plot, and it has a
# non-axially aligned L vector.
-sp = pf.sphere("center", (15.0, "kpc"))
+sp = ds.sphere("center", (15.0, "kpc"))
# Get the angular momentum vector for the sphere.
L = sp.quantities.angular_momentum_vector()
@@ -14,5 +14,5 @@
print "Angular momentum vector: {0}".format(L)
# Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = OffAxisSlicePlot(pf, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
p.save()
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -1,26 +1,28 @@
-## Using AMRKDTree Homogenized Volumes to examine large datasets at lower resolution.
+# Using AMRKDTree Homogenized Volumes to examine large datasets
+# at lower resolution.
# In this example we will show how to use the AMRKDTree to take a simulation
# with 8 levels of refinement and only use levels 0-3 to render the dataset.
# We begin by loading up yt, and importing the AMRKDTree
+import numpy as np
-from yt.mods import *
+import yt
from yt.utilities.amr_kdtree.api import AMRKDTree
# Load up a data and print out the maximum refinement level
-pf = load('IsolatedGalaxy/galaxy0030/galaxy0030')
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-kd = AMRKDTree(pf)
+kd = AMRKDTree(ds)
# Print out the total volume of all the bricks
print kd.count_volume()
# Print out the number of cells
print kd.count_cells()
-tf = ColorTransferFunction((-30, -22))
-cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
- tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
+tf = yt.ColorTransferFunction((-30, -22))
+cam = ds.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
+ tf, volume=kd)
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
cam.snapshot("v1.png", clip_ratio=6.0)
# This rendering is okay, but lets say I'd like to improve it, and I don't want
@@ -28,7 +30,7 @@
# generate a low resolution version of the AMRKDTree and pass that in to the
# camera. We do this by specifying a maximum refinement level of 3.
-kd_low_res = AMRKDTree(pf, l_max=3)
+kd_low_res = AMRKDTree(ds, max_level=3)
print kd_low_res.count_volume()
print kd_low_res.count_cells()
@@ -42,21 +44,21 @@
# rendering until we find something we like.
tf.clear()
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
- alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
+ alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
cam.snapshot("v2.png", clip_ratio=6.0)
# This looks better. Now let's try turning on opacity.
-tf.grey_opacity=True
+tf.grey_opacity = True
cam.snapshot("v4.png", clip_ratio=6.0)
# That seemed to pick out som interesting structures. Now let's bump up the
# opacity.
tf.clear()
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
- alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
+ alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
cam.snapshot("v3.png", clip_ratio=6.0)
# This looks pretty good, now lets go back to the full resolution AMRKDTree
@@ -65,4 +67,3 @@
cam.snapshot("v4.png", clip_ratio=6.0)
# This looks great!
-
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/average_value.py
--- a/doc/source/cookbook/average_value.py
+++ b/doc/source/cookbook/average_value.py
@@ -1,12 +1,12 @@
-from yt.mods import *
+import yt
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030") # load data
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") # load data
field = "temperature" # The field to average
-weight = "cell_mass" # The weight for the average
+weight = "cell_mass" # The weight for the average
-dd = pf.h.all_data() # This is a region describing the entire box,
- # but note it doesn't read anything in yet!
+dd = ds.h.all_data() # This is a region describing the entire box,
+ # but note it doesn't read anything in yet!
# We now use our 'quantities' call to get the average quantity
average_value = dd.quantities["WeightedAverageQuantity"](field, weight)
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/boolean_data_objects.py
--- a/doc/source/cookbook/boolean_data_objects.py
+++ b/doc/source/cookbook/boolean_data_objects.py
@@ -1,23 +1,23 @@
-from yt.mods import * # set up our namespace
+import yt
-pf = load("Enzo_64/DD0043/data0043") # load data
+ds = yt.load("Enzo_64/DD0043/data0043") # load data
# Make a few data ojbects to start.
-re1 = pf.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
-re2 = pf.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
-sp1 = pf.sphere([0.5, 0.5, 0.5], 0.05)
-sp2 = pf.sphere([0.1, 0.2, 0.3], 0.1)
+re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
+re2 = ds.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
+sp1 = ds.sphere([0.5, 0.5, 0.5], 0.05)
+sp2 = ds.sphere([0.1, 0.2, 0.3], 0.1)
# The "AND" operator. This will make a region identical to re2.
-bool1 = pf.boolean([re1, "AND", re2])
+bool1 = ds.boolean([re1, "AND", re2])
xp = bool1["particle_position_x"]
# The "OR" operator. This will make a region identical to re1.
-bool2 = pf.boolean([re1, "OR", re2])
+bool2 = ds.boolean([re1, "OR", re2])
# The "NOT" operator. This will make a region like re1, but with the corner
# that re2 covers cut out.
-bool3 = pf.boolean([re1, "NOT", re2])
+bool3 = ds.boolean([re1, "NOT", re2])
# Disjoint regions can be combined with the "OR" operator.
-bool4 = pf.boolean([sp1, "OR", sp2])
+bool4 = ds.boolean([sp1, "OR", sp2])
# Find oddly-shaped overlapping regions.
-bool5 = pf.boolean([re2, "AND", sp1])
+bool5 = ds.boolean([re2, "AND", sp1])
# Nested logic with parentheses.
# This is re1 with the oddly-shaped region cut out.
-bool6 = pf.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])
+bool6 = ds.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,43 +1,43 @@
-from yt.mods import * # set up our namespace
+import numpy as np
+
+import yt
# Follow the simple_volume_rendering cookbook for the first part of this.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030") # load data
-dd = pf.h.all_data()
-mi, ma = dd.quantities["Extrema"]("density")[0]
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") # load data
+dd = ds.all_data()
+mi, ma = dd.quantities["Extrema"]("density")
# Set up transfer function
-tf = ColorTransferFunction((np.log10(mi), np.log10(ma)))
+tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
tf.add_layers(6, w=0.05)
# Set up camera paramters
-c = [0.5, 0.5, 0.5] # Center
-L = [1, 1, 1] # Normal Vector
-W = 1.0 # Width
-Nvec = 512 # Pixels on a side
+c = [0.5, 0.5, 0.5] # Center
+L = [1, 1, 1] # Normal Vector
+W = 1.0 # Width
+Nvec = 512 # Pixels on a side
# Specify a north vector, which helps with rotations.
-north_vector = [0.,0.,1.]
+north_vector = [0., 0., 1.]
# Find the maximum density location, store it in max_c
-v,max_c = pf.h.find_max('density')
+v, max_c = ds.find_max('density')
# Initialize the Camera
-cam = pf.h.camera(c, L, W, (Nvec,Nvec), tf, north_vector=north_vector)
+cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
frame = 0
# Do a rotation over 5 frames
-for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio = 8.0)):
+for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
snapshot.write_png('camera_movement_%04i.png' % frame)
frame += 1
# Move to the maximum density location over 5 frames
-for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio = 8.0)):
+for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio=8.0)):
snapshot.write_png('camera_movement_%04i.png' % frame)
frame += 1
# Zoom in by a factor of 10 over 5 frames
-for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio = 8.0)):
+for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
snapshot.write_png('camera_movement_%04i.png' % frame)
- frame += 1
-
-
+ frame += 1
\ No newline at end of file
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/contours_on_slice.py
--- a/doc/source/cookbook/contours_on_slice.py
+++ b/doc/source/cookbook/contours_on_slice.py
@@ -1,13 +1,13 @@
-from yt.mods import * # set up our namespace
+import yt
# first add density contours on a density slice
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150") # load data
-p = SlicePlot(pf, "x", "density")
+pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150") # load data
+p = yt.SlicePlot(pf, "x", "density")
p.annotate_contour("density")
p.save()
# then add temperature contours on the same densty slice
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150") # load data
-p = SlicePlot(pf, "x", "density")
+pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150") # load data
+p = yt.SlicePlot(pf, "x", "density")
p.annotate_contour("temperature")
p.save(str(pf)+'_T_contour')
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/extract_fixed_resolution_data.py
--- a/doc/source/cookbook/extract_fixed_resolution_data.py
+++ b/doc/source/cookbook/extract_fixed_resolution_data.py
@@ -1,25 +1,25 @@
-from yt.mods import *
+import yt
# For this example we will use h5py to write to our output file.
import h5py
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
level = 2
-dims = pf.domain_dimensions * pf.refine_by**level
+dims = ds.domain_dimensions * ds.refine_by**level
# Now, we construct an object that describes the data region and structure we
# want
-cube = pf.covering_grid(2, # The level we are willing to extract to; higher
- # levels than this will not contribute to the data!
- left_edge=[0.0, 0.0, 0.0],
- # And any fields to preload (this is optional!)
- dims = dims,
- fields=["density"])
+cube = ds.covering_grid(2, # The level we are willing to extract to; higher
+ # levels than this will not contribute to the data!
+ left_edge=[0.0, 0.0, 0.0],
+ # And any fields to preload (this is optional!)
+ dims=dims,
+ fields=["density"])
# Now we open our output file using h5py
# Note that we open with 'w' which will overwrite existing files!
-f = h5py.File("my_data.h5", "w")
+f = h5py.File("my_data.h5", "w")
# We create a dataset at the root note, calling it density...
f.create_dataset("/density", data=cube["density"])
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,27 +1,30 @@
-# set up our namespace
-from yt.mods import *
-from yt.analysis_modules.level_sets.api import *
+import numpy as np
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030" # parameter file to load
-field = "density" # this is the field we look for contours over -- we could do
- # this over anything. Other common choices are 'AveragedDensity'
- # and 'Dark_Matter_Density'.
-step = 2.0 # This is the multiplicative interval between contours.
+import yt
+from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
+ get_lowest_clumps)
-pf = load(fn) # load data
+fn = "IsolatedGalaxy/galaxy0030/galaxy0030" # parameter file to load
+# this is the field we look for contours over -- we could do
+# this over anything. Other common choices are 'AveragedDensity'
+# and 'Dark_Matter_Density'.
+field = "density"
+
+step = 2.0 # This is the multiplicative interval between contours.
+
+ds = yt.load(fn) # load data
# We want to find clumps over the entire dataset, so we'll just grab the whole
# thing! This is a convenience parameter that prepares an object that covers
# the whole domain. Note, though, that it will load on demand and not before!
-data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
- pf.quan(8,'code_length'),
- pf.quan(1., 'code_length'))
+data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+ (8., 'kpc'), (1., 'kpc'))
# Now we set some sane min/max values between which we want to find contours.
# This is how we tell the clump finder what to look for -- it won't look for
# contours connected below or above these threshold values.
-c_min = 10**np.floor(np.log10(data_source[field]).min() )
-c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min())
+c_max = 10**np.floor(np.log10(data_source[field]).max() + 1)
# keep only clumps with at least 20 cells
function = 'self.data[\'%s\'].size > 20' % field
@@ -39,13 +42,13 @@
# As it goes, it appends the information about all the sub-clumps to the
# master-clump. Among different ways we can examine it, there's a convenience
# function for outputting the full index to a file.
-f = open('%s_clump_index.txt' % pf,'w')
-amods.level_sets.write_clump_index(master_clump,0,f)
+f = open('%s_clump_index.txt' % ds, 'w')
+yt.amods.level_sets.write_clump_index(master_clump, 0, f)
f.close()
# We can also output some handy information, as well.
-f = open('%s_clumps.txt' % pf,'w')
-amods.level_sets.write_clumps(master_clump,0,f)
+f = open('%s_clumps.txt' % ds, 'w')
+yt.amods.level_sets.write_clumps(master_clump, 0, f)
f.close()
# We can traverse the clump index to get a list of all of the 'leaf' clumps
@@ -53,7 +56,7 @@
# If you'd like to visualize these clumps, a list of clumps can be supplied to
# the "clumps" callback on a plot. First, we create a projection plot:
-prj = ProjectionPlot(pf, 2, field, center='c', width=(20,'kpc'))
+prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20, 'kpc'))
# Next we annotate the plot with contours on the borders of the clumps
prj.annotate_clumps(leaf_clumps)
@@ -63,7 +66,7 @@
# We can also save the clump object to disk to read in later so we don't have
# to spend a lot of time regenerating the clump objects.
-pf.h.save_object(master_clump, 'My_clumps')
+ds.h.save_object(master_clump, 'My_clumps')
# Later, we can read in the clump object like so,
-master_clump = pf.h.load_object('My_clumps')
+master_clump = ds.load_object('My_clumps')
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -1,25 +1,19 @@
-import os
-import sys
-import h5py
+import yt
+from yt.analysis_modules.cosmological_observation.light_ray.api import LightRay
+from yt.analysis_modules.api import AbsorptionSpectrum
+from yt.analysis_modules.absorption_spectrum.api import generate_total_fit
-from yt.mods import *
-from yt.analysis_modules.cosmological_observation.light_ray.api import \
- LightRay
-from yt.analysis_modules.api import AbsorptionSpectrum
-from yt.analysis_modules.absorption_spectrum.api import \
- generate_total_fit
+# Define and add a field to simulate OVI based on a constant relationship to HI
+def _OVI_NumberDensity(field, data):
+ return data['HI_NumberDensity']
-# Define and add a field to simulate OVI based on
-# a constant relationship to HI
-def _OVI_NumberDensity(field,data):
- return data['HI_NumberDensity']
def _convertOVI(data):
return 4.9E-4*.2
-add_field('my_OVI_NumberDensity',
- function=_OVI_NumberDensity,
- convert_function=_convertOVI)
+yt.add_field('my_OVI_NumberDensity',
+ function=_OVI_NumberDensity,
+ convert_function=_convertOVI)
# Define species andi associated parameters to add to continuum
@@ -29,33 +23,33 @@
# (as in the OVI doublet), 'numLines' will be equal to the number
# of lines, and f,gamma, and wavelength will have multiple values.
-HI_parameters = {'name':'HI',
- 'field' : 'HI_NumberDensity',
- 'f': [.4164],
- 'Gamma':[6.265E8],
- 'wavelength':[1215.67],
- 'mass': 1.00794,
- 'numLines':1,
- 'maxN': 1E22, 'minN':1E11,
- 'maxb': 300, 'minb':1,
- 'maxz': 6, 'minz':0,
- 'init_b':30,
- 'init_N':1E14}
+HI_parameters = {'name': 'HI',
+ 'field': 'HI_NumberDensity',
+ 'f': [.4164],
+ 'Gamma': [6.265E8],
+ 'wavelength': [1215.67],
+ 'mass': 1.00794,
+ 'numLines': 1,
+ 'maxN': 1E22, 'minN': 1E11,
+ 'maxb': 300, 'minb': 1,
+ 'maxz': 6, 'minz': 0,
+ 'init_b': 30,
+ 'init_N': 1E14}
-OVI_parameters = {'name':'OVI',
- 'field' : 'my_OVI_NumberDensity',
- 'f':[.1325,.06580],
- 'Gamma':[4.148E8,4.076E8],
- 'wavelength':[1031.9261,1037.6167],
- 'mass': 15.9994,
- 'numLines':2,
- 'maxN':1E17,'minN':1E11,
- 'maxb':300, 'minb':1,
- 'maxz':6, 'minz':0,
- 'init_b':20,
- 'init_N':1E12}
+OVI_parameters = {'name': 'OVI',
+ 'field': 'my_OVI_NumberDensity',
+ 'f': [.1325, .06580],
+ 'Gamma': [4.148E8, 4.076E8],
+ 'wavelength': [1031.9261, 1037.6167],
+ 'mass': 15.9994,
+ 'numLines': 2,
+ 'maxN': 1E17, 'minN': 1E11,
+ 'maxb': 300, 'minb': 1,
+ 'maxz': 6, 'minz': 0,
+ 'init_b': 20,
+ 'init_N': 1E12}
-species_dicts = {'HI':HI_parameters,'OVI':OVI_parameters}
+species_dicts = {'HI': HI_parameters, 'OVI': OVI_parameters}
# Create a LightRay object extending from z = 0 to z = 0.1
# and use only the redshift dumps.
@@ -63,7 +57,7 @@
'Enzo', 0.0, 0.1,
use_minimum_datasets=True,
time_data=False
- )
+ )
# Get all fields that need to be added to the light ray
fields = ['temperature']
@@ -80,34 +74,32 @@
get_los_velocity=True,
njobs=-1)
-# Create an AbsorptionSpectrum object extending from
+# Create an AbsorptionSpectrum object extending from
# lambda = 900 to lambda = 1800, with 10000 pixels
sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
# Iterate over species
-for s,params in species_dicts.iteritems():
-
- #Iterate over transitions for a single species
+for s, params in species_dicts.iteritems():
+ # Iterate over transitions for a single species
for i in range(params['numLines']):
-
- #Add the lines to the spectrum
- sp.add_line(s, params['field'],
- params['wavelength'][i], params['f'][i],
- params['Gamma'][i], params['mass'],
- label_threshold=1.e10)
+ # Add the lines to the spectrum
+ sp.add_line(s, params['field'],
+ params['wavelength'][i], params['f'][i],
+ params['Gamma'][i], params['mass'],
+ label_threshold=1.e10)
# Make and save spectrum
-wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
+wavelength, flux = sp.make_spectrum('lightray.h5',
+ output_file='spectrum.h5',
+ line_list_file='lines.txt',
+ use_peculiar_velocity=True)
-#Define order to fit species in
-order_fits = ['OVI','HI']
+# Define order to fit species in
+order_fits = ['OVI', 'HI']
# Fit spectrum and save fit
fitted_lines, fitted_flux = generate_total_fit(wavelength,
- flux, order_fits, species_dicts,
- output_file='spectrum_fit.h5')
+ flux, order_fits, species_dicts,
+ output_file='spectrum_fit.h5')
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ b/doc/source/cookbook/free_free_field.py
@@ -1,40 +1,42 @@
-from yt.mods import *
-from yt.utilities.physical_constants import mp # Need to grab the proton mass from the
- # constants database
+import numpy as np
+import yt
+# Need to grab the proton mass from the constants database
+from yt.utilities.physical_constants import mp
# Define the emission field
-keVtoerg = 1.602e-9 # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08 # Convert degrees Kelvin to degrees keV
+keVtoerg = 1.602e-9 # Convert energy in keV to energy in erg
+KtokeV = 8.617e-08 # Convert degrees Kelvin to degrees keV
sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799 # Exponential of Euler's constant
+expgamma = 1.78107241799 # Exponential of Euler's constant
-def _FreeFree_Emission(field, data) :
- if data.has_field_parameter("Z") :
+def _FreeFree_Emission(field, data):
+
+ if data.has_field_parameter("Z"):
Z = data.get_field_parameter("Z")
- else :
- Z = 1.077 # Primordial H/He plasma
+ else:
+ Z = 1.077 # Primordial H/He plasma
- if data.has_field_parameter("mue") :
+ if data.has_field_parameter("mue"):
mue = data.get_field_parameter("mue")
- else :
- mue = 1./0.875 # Primordial H/He plasma
+ else:
+ mue = 1./0.875 # Primordial H/He plasma
- if data.has_field_parameter("mui") :
+ if data.has_field_parameter("mui"):
mui = data.get_field_parameter("mui")
- else :
- mui = 1./0.8125 # Primordial H/He plasma
+ else:
+ mui = 1./0.8125 # Primordial H/He plasma
- if data.has_field_parameter("Ephoton") :
+ if data.has_field_parameter("Ephoton"):
Ephoton = data.get_field_parameter("Ephoton")
- else :
- Ephoton = 1.0 # in keV
+ else:
+ Ephoton = 1.0 # in keV
- if data.has_field_parameter("photon_emission") :
+ if data.has_field_parameter("photon_emission"):
photon_emission = data.get_field_parameter("photon_emission")
- else :
- photon_emission = False # Flag for energy or photon emission
+ else:
+ photon_emission = False # Flag for energy or photon emission
n_e = data["density"]/(mue*mp)
n_i = data["density"]/(mui*mp)
@@ -50,24 +52,25 @@
eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
np.exp(-Ephoton/kT)*g_ff
- if photon_emission: eps_E /= (Ephoton*keVtoerg)
+ if photon_emission:
+ eps_E /= (Ephoton*keVtoerg)
return eps_E
-add_field("FreeFree_Emission", function=_FreeFree_Emission)
+yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
# Define the luminosity derived quantity
-
-def _FreeFreeLuminosity(data) :
+def _FreeFreeLuminosity(data):
return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-def _combFreeFreeLuminosity(data, luminosity) :
+
+def _combFreeFreeLuminosity(data, luminosity):
return luminosity.sum()
-add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
- combine_function=_combFreeFreeLuminosity, n_ret = 1)
+yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
+ combine_function=_combFreeFreeLuminosity, n_ret=1)
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
sphere = pf.sphere(pf.domain_center, (100., "kpc"))
@@ -75,8 +78,8 @@
print "L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]()
-# The defaults for the field assume a H/He primordial plasma. Let's set the appropriate
-# parameters for a pure hydrogen plasma.
+# The defaults for the field assume a H/He primordial plasma.
+# Let's set the appropriate parameters for a pure hydrogen plasma.
sphere.set_field_parameter("mue", 1.0)
sphere.set_field_parameter("mui", 1.0)
@@ -90,10 +93,9 @@
print "L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]()
-# Finally, let's set the flag for photon emission, to get the total number of photons
-# emitted at this energy:
+# Finally, let's set the flag for photon emission, to get the total number
+# of photons emitted at this energy:
sphere.set_field_parameter("photon_emission", True)
print "L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]()
-
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/global_phase_plots.py
--- a/doc/source/cookbook/global_phase_plots.py
+++ b/doc/source/cookbook/global_phase_plots.py
@@ -1,14 +1,14 @@
-from yt.mods import * # set up our namespace
+import yt
# load the dataset
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
# This is an object that describes the entire box
-ad = pf.h.all_data()
+ad = ds.h.all_data()
-# We plot the average VelocityMagnitude (mass-weighted) in our object
+# We plot the average VelocityMagnitude (mass-weighted) in our object
# as a function of Density and temperature
-plot = PhasePlot(ad, "density","temperature","velocity_magnitude")
+plot = yt.PhasePlot(ad, "density", "temperature", "velocity_magnitude")
# save the plot
plot.save()
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,121 +1,134 @@
-from yt.mods import *
+import numpy as np
+import yt
-# Define the components of the gravitational acceleration vector field by taking the
-# gradient of the gravitational potential
+# Define the components of the gravitational acceleration vector field by
+# taking the gradient of the gravitational potential
-def _Grav_Accel_x(field, data) :
+
+def _Grav_Accel_x(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dx = div_fac * data['dx'].flat[0]
- gx = data["Grav_Potential"][sl_right,1:-1,1:-1]/dx
- gx -= data["Grav_Potential"][sl_left, 1:-1,1:-1]/dx
+ gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
+ gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
- new_field = np.zeros(data["Grav_Potential"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = -gx
+ new_field = np.zeros(data["gravitational_potential"].shape,
+ dtype='float64')
+ new_field[1:-1, 1:-1, 1:-1] = -gx
return new_field
-def _Grav_Accel_y(field, data) :
+
+def _Grav_Accel_y(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dy = div_fac * data['dy'].flat[0]
- gy = data["Grav_Potential"][1:-1,sl_right,1:-1]/dy
- gy -= data["Grav_Potential"][1:-1,sl_left ,1:-1]/dy
+ gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
+ gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
- new_field = np.zeros(data["Grav_Potential"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = -gy
+ new_field = np.zeros(data["gravitational_potential"].shape,
+ dtype='float64')
+ new_field[1:-1, 1:-1, 1:-1] = -gy
return new_field
-def _Grav_Accel_z(field, data) :
+
+def _Grav_Accel_z(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dz = div_fac * data['dz'].flat[0]
- gz = data["Grav_Potential"][1:-1,1:-1,sl_right]/dz
- gz -= data["Grav_Potential"][1:-1,1:-1,sl_left ]/dz
+ gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
+ gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
- new_field = np.zeros(data["Grav_Potential"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = -gz
+ new_field = np.zeros(data["gravitational_potential"].shape,
+ dtype='float64')
+ new_field[1:-1, 1:-1, 1:-1] = -gz
return new_field
+
# Define the components of the pressure gradient field
-def _Grad_Pressure_x(field, data) :
+
+def _Grad_Pressure_x(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dx = div_fac * data['dx'].flat[0]
- px = data["pressure"][sl_right,1:-1,1:-1]/dx
- px -= data["pressure"][sl_left, 1:-1,1:-1]/dx
+ px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
+ px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
new_field = np.zeros(data["pressure"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = px
+ new_field[1:-1, 1:-1, 1:-1] = px
return new_field
-def _Grad_Pressure_y(field, data) :
+
+def _Grad_Pressure_y(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dy = div_fac * data['dy'].flat[0]
- py = data["pressure"][1:-1,sl_right,1:-1]/dy
- py -= data["pressure"][1:-1,sl_left ,1:-1]/dy
+ py = data["pressure"][1:-1, sl_right, 1:-1]/dy
+ py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
new_field = np.zeros(data["pressure"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = py
+ new_field[1:-1, 1:-1, 1:-1] = py
return new_field
-def _Grad_Pressure_z(field, data) :
+
+def _Grad_Pressure_z(field, data):
# We need to set up stencils
- sl_left = slice(None,-2,None)
- sl_right = slice(2,None,None)
+ sl_left = slice(None, -2, None)
+ sl_right = slice(2, None, None)
div_fac = 2.0
dz = div_fac * data['dz'].flat[0]
- pz = data["pressure"][1:-1,1:-1,sl_right]/dz
- pz -= data["pressure"][1:-1,1:-1,sl_left ]/dz
+ pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
+ pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
new_field = np.zeros(data["pressure"].shape, dtype='float64')
- new_field[1:-1,1:-1,1:-1] = pz
+ new_field[1:-1, 1:-1, 1:-1] = pz
return new_field
+
# Define the "degree of hydrostatic equilibrium" field
-def _HSE(field, data) :
+
+def _HSE(field, data):
gx = data["density"]*data["Grav_Accel_x"]
gy = data["density"]*data["Grav_Accel_y"]
@@ -131,36 +144,37 @@
# Now add the fields to the database
-add_field("Grav_Accel_x", function=_Grav_Accel_x, take_log=False,
- validators=[ValidateSpatial(1,["Grav_Potential"])])
+yt.add_field("Grav_Accel_x", function=_Grav_Accel_x, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-add_field("Grav_Accel_y", function=_Grav_Accel_y, take_log=False,
- validators=[ValidateSpatial(1,["Grav_Potential"])])
+yt.add_field("Grav_Accel_y", function=_Grav_Accel_y, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-add_field("Grav_Accel_z", function=_Grav_Accel_z, take_log=False,
- validators=[ValidateSpatial(1,["Grav_Potential"])])
+yt.add_field("Grav_Accel_z", function=_Grav_Accel_z, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-add_field("Grad_Pressure_x", function=_Grad_Pressure_x, take_log=False,
- validators=[ValidateSpatial(1,["pressure"])])
+yt.add_field("Grad_Pressure_x", function=_Grad_Pressure_x, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["pressure"])])
-add_field("Grad_Pressure_y", function=_Grad_Pressure_y, take_log=False,
- validators=[ValidateSpatial(1,["pressure"])])
+yt.add_field("Grad_Pressure_y", function=_Grad_Pressure_y, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["pressure"])])
-add_field("Grad_Pressure_z", function=_Grad_Pressure_z, take_log=False,
- validators=[ValidateSpatial(1,["pressure"])])
+yt.add_field("Grad_Pressure_z", function=_Grad_Pressure_z, take_log=False,
+ validators=[yt.ValidateSpatial(1, ["pressure"])])
-add_field("HSE", function=_HSE, take_log=False)
+yt.add_field("HSE", function=_HSE, take_log=False)
-# Open two files, one at the beginning and the other at a later time when there's a
-# lot of sloshing going on.
+# Open two files, one at the beginning and the other at a later time when
+# there's a lot of sloshing going on.
-pfi = load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
-pff = load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
+dsi = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
+dsf = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
-# Sphere objects centered at the cluster potential minimum with a radius of 200 kpc
+# Sphere objects centered at the cluster potential minimum with a radius
+# of 200 kpc
-sphere_i = pfi.h.sphere(pfi.domain_center, (200, "kpc"))
-sphere_f = pff.h.sphere(pff.domain_center, (200, "kpc"))
+sphere_i = dsi.h.sphere(dsi.domain_center, (200, "kpc"))
+sphere_f = dsf.h.sphere(dsf.domain_center, (200, "kpc"))
# Average "degree of hydrostatic equilibrium" in these spheres
@@ -170,10 +184,13 @@
print "Degree of hydrostatic equilibrium initially: ", hse_i
print "Degree of hydrostatic equilibrium later: ", hse_f
-# Just for good measure, take slices through the center of the domain of the two files
+# Just for good measure, take slices through the center of the domains
+# of the two files
-slc_i = SlicePlot(pfi, 2, ["density","HSE"], center=pfi.domain_center, width=(1.0, "mpc"))
-slc_f = SlicePlot(pff, 2, ["density","HSE"], center=pff.domain_center, width=(1.0, "mpc"))
+slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
+ width=(1.0, "mpc"))
+slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
+ width=(1.0, "mpc"))
slc_i.save("initial")
slc_f.save("final")
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -282,7 +282,7 @@
.. code-block:: python
- ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
+ ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
You can specify alternate names, but note that this may cause problems with the
field specification if none of the names match old names.
@@ -300,23 +300,23 @@
.. code-block:: python
- default = (('Npart', 6, 'i'),
- ('Massarr', 6, 'd'),
- ('Time', 1, 'd'),
- ('Redshift', 1, 'd'),
- ('FlagSfr', 1, 'i'),
- ('FlagFeedback', 1, 'i'),
- ('Nall', 6, 'i'),
- ('FlagCooling', 1, 'i'),
- ('NumFiles', 1, 'i'),
- ('BoxSize', 1, 'd'),
- ('Omega0', 1, 'd'),
- ('OmegaLambda', 1, 'd'),
- ('HubbleParam', 1, 'd'),
- ('FlagAge', 1, 'i'),
- ('FlagMEtals', 1, 'i'),
- ('NallHW', 6, 'i'),
- ('unused', 16, 'i'))
+ default = (('Npart', 6, 'i'),
+ ('Massarr', 6, 'd'),
+ ('Time', 1, 'd'),
+ ('Redshift', 1, 'd'),
+ ('FlagSfr', 1, 'i'),
+ ('FlagFeedback', 1, 'i'),
+ ('Nall', 6, 'i'),
+ ('FlagCooling', 1, 'i'),
+ ('NumFiles', 1, 'i'),
+ ('BoxSize', 1, 'd'),
+ ('Omega0', 1, 'd'),
+ ('OmegaLambda', 1, 'd'),
+ ('HubbleParam', 1, 'd'),
+ ('FlagAge', 1, 'i'),
+ ('FlagMEtals', 1, 'i'),
+ ('NallHW', 6, 'i'),
+ ('unused', 16, 'i'))
These items will all be accessible inside the object ``pf.parameters``, which
is a dictionary. You can add combinations of new items, specified in the same
@@ -371,7 +371,7 @@
.. code-block:: python
- ds = load("./halo1e11_run1.00400")
+ ds = load("./halo1e11_run1.00400")
.. _specifying-cosmology-tipsy:
@@ -390,6 +390,11 @@
These will be used set the units, if they are specified.
+Using yt to view and analyze Tipsy outputs from Gasoline
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. notebook:: tipsy_and_yt.ipynb
+
.. _loading-artio-data:
ARTIO Data
@@ -409,7 +414,7 @@
.. code-block:: python
- ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
+ ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
.. _loading-art-data:
@@ -551,24 +556,24 @@
.. code-block:: python
- from yt.mods import *
- ds = load("m33_hi.fits")
- ds.print_stats()
+ from yt.mods import *
+ ds = load("m33_hi.fits")
+ ds.print_stats()
.. parsed-literal::
- level # grids # cells # cells^3
- ----------------------------------------------
- 0 512 981940800 994
- ----------------------------------------------
- 512 981940800
+ level # grids # cells # cells^3
+ ----------------------------------------------
+ 0 512 981940800 994
+ ----------------------------------------------
+ 512 981940800
yt will generate its own domain decomposition, but the number of grids can be
set manually by passing the ``nprocs`` parameter to the ``load`` call:
.. code-block:: python
- ds = load("m33_hi.fits", nprocs=1024)
+ ds = load("m33_hi.fits", nprocs=1024)
Making the Most of `yt` for FITS Data
+++++++++++++++++++++++++++++++++++++
@@ -591,12 +596,12 @@
.. code-block:: python
- import astropy.io.fits as pyfits
- f = pyfits.open("xray_flux_image.fits", mode="update")
- f[0].header["BUNIT"] = "cts/s/pixel"
- f[0].header["BTYPE"] = "flux"
- f.flush()
- f.close()
+ import astropy.io.fits as pyfits
+ f = pyfits.open("xray_flux_image.fits", mode="update")
+ f[0].header["BUNIT"] = "cts/s/pixel"
+ f[0].header["BTYPE"] = "flux"
+ f.flush()
+ f.close()
FITS Coordinates
++++++++++++++++
@@ -646,7 +651,7 @@
.. code-block:: python
- ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
+ ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
The image blocks in each of these files will be loaded as a separate field,
provided they have the same dimensions as the image blocks in the main file.
@@ -676,13 +681,13 @@
single floating-point number (applies to all fields) or a Python dictionary
containing different mask values for different fields:
-.. code-block::
+.. code-block:: python
- # passing a single float
- ds = load("m33_hi.fits", nan_mask=0.0)
+ # passing a single float
+ ds = load("m33_hi.fits", nan_mask=0.0)
- # passing a dict
- ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+ # passing a dict
+ ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
Generally, AstroPy may generate a lot of warnings about individual FITS
files, many of which you may want to ignore. If you want to see these
@@ -793,9 +798,9 @@
.. code-block:: python
- for g in grid_data:
- g["number_of_particles"] = 100000
- g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+ for g in grid_data:
+ g["number_of_particles"] = 100000
+ g["particle_position_x"] = np.random.random((g["number_of_particles"]))
.. rubric:: Caveats
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/examining/tipsy_and_yt.ipynb
--- /dev/null
+++ b/doc/source/examining/tipsy_and_yt.ipynb
@@ -0,0 +1,195 @@
+{
+ "metadata": {
+ "name": "",
+ "signature": "sha256:a80c1b224c121c67e57acfa9183c5660a332a37556a492e230476b424827885f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Using yt to view and analyze Tipsy outputs from Gasoline"
+ ]
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Loading Files"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Alright, let's start with some basics. Before we do anything, we will need to load a snapshot. You can do this using the ```load``` convenience function. yt will autodetect that you have a tipsy snapshot, and automatically set itself up appropriately."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from yt.mods import *"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will be looking at a fairly low resolution dataset. In the next cell, the `ds` object has an atribute called `n_ref` that tells the oct-tree how many particles to refine on. The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8. Just passing the argument `n_ref=8` to load does this for us."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ">This dataset is available for download at http://yt-project.org/data/TipsyGalaxy.tar.gz (10 MB)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ds = load('TipsyGalaxy/galaxy.00300', n_ref=8)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We now have a `TipsyDataset` object called `ds`. Let's see what fields it has."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ds.field_list"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`yt` also defines so-called \"derived\" fields. These fields are functions of the on-disk fields that live in the `field_list`. There is a `derived_field_list` attribute attached to the `Dataset` object - let's take look at the derived fields in this dataset:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ds.derived_field_list"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All of the field in the `field_list` are arrays containing the values for the associated particles. These haven't been smoothed or gridded in any way. We can grab the array-data for these particles using `ds.all_data()`. For example, let's take a look at a temperature-colored scatterplot of the gas particles in this output."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "%matplotlib inline\n",
+ "import matplotlib.pyplot as plt"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "dd = ds.all_data()\n",
+ "xcoord = dd['Gas','Coordinates'][:,0].v\n",
+ "ycoord = dd['Gas','Coordinates'][:,1].v\n",
+ "logT = np.log10(dd['Gas','Temperature'])\n",
+ "plt.scatter(xcoord, ycoord, c=logT, s=2*logT, marker='o', edgecolor='none', vmin=2, vmax=6)\n",
+ "plt.xlim(-20,20)\n",
+ "plt.ylim(-20,20)\n",
+ "cb = plt.colorbar()\n",
+ "cb.set_label('$\\log_{10}$ Temperature')\n",
+ "plt.gcf().set_size_inches(15,10)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Making Smoothed Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`yt` will automatically generate smoothed versions of these fields that you can use to plot. Let's make a temperature slice and a density projection."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "SlicePlot(ds, 'z', ('gas','density'), width=(40, 'kpc'), center='m')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "ProjectionPlot(ds, 'z', ('gas','density'), width=(40, 'kpc'), center='m')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Not only are the values in the tipsy snapshot read and automatically smoothed, the auxiliary files that have physical significance are also smoothed. Let's look at a slice of Iron mass fraction."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "SlicePlot(ds, 'z', ('gas', 'FeMassFrac'), width=(40, 'kpc'), center='m')"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": []
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -200,10 +200,10 @@
.. autosummary::
:toctree: generated/
- ~yt.frontends.halo_catalogs.data_structures.RockstarBinaryFile
- ~yt.frontends.halo_catalogs.data_structures.RockstarDataset
- ~yt.frontends.halo_catalogs.fields.RockstarFieldInfo
- ~yt.frontends.halo_catalogs.io.IOHandlerRockstarBinary
+ ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarBinaryFile
+ ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarDataset
+ ~yt.frontends.halo_catalogs.rockstar.fields.RockstarFieldInfo
+ ~yt.frontends.halo_catalogs.rockstar.io.IOHandlerRockstarBinary
MOAB
^^^^
@@ -313,7 +313,7 @@
~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder
~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder
~yt.analysis_modules.halo_finding.halo_objects.parallelHF
- ~yt.analysis_modules.halo_finding.rockstar.api.RockstarHaloFinder
+ ~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder
You can also operate on the Halo and HAloList objects themselves:
@@ -616,11 +616,8 @@
~yt.visualization.plot_modifications.ArrowCallback
~yt.visualization.plot_modifications.ClumpContourCallback
~yt.visualization.plot_modifications.ContourCallback
- ~yt.visualization.plot_modifications.CoordAxesCallback
~yt.visualization.plot_modifications.CuttingQuiverCallback
~yt.visualization.plot_modifications.GridBoundaryCallback
- ~yt.visualization.plot_modifications.HopCircleCallback
- ~yt.visualization.plot_modifications.HopParticleCallback
~yt.visualization.plot_modifications.LabelCallback
~yt.visualization.plot_modifications.LinePlotCallback
~yt.visualization.plot_modifications.MarkerAnnotateCallback
@@ -630,7 +627,6 @@
~yt.visualization.plot_modifications.SphereCallback
~yt.visualization.plot_modifications.TextLabelCallback
~yt.visualization.plot_modifications.TitleCallback
- ~yt.visualization.plot_modifications.UnitBoundaryCallback
~yt.visualization.plot_modifications.VelocityCallback
Function List
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,11 @@
import distribute_setup
distribute_setup.use_setuptools()
-from distutils.command.build_py import build_py
+try:
+ from distutils.command.build_py import build_py_2to3 \
+ as build_py
+except ImportError:
+ from distutils.command.build_py import build_py
from numpy.distutils.misc_util import appendpath
from numpy.distutils.command import install_data as np_install_data
from numpy.distutils import log
@@ -100,11 +104,11 @@
needs_cython = True
if needs_cython:
- print "Cython is a build-time requirement for the source tree of yt."
- print "Please either install yt from a provided, release tarball,"
- print "or install Cython (version 0.16 or higher)."
- print "You may be able to accomplish this by typing:"
- print " pip install -U Cython"
+ print("Cython is a build-time requirement for the source tree of yt.")
+ print("Please either install yt from a provided, release tarball,")
+ print("or install Cython (version 0.16 or higher).")
+ print("You may be able to accomplish this by typing:")
+ print(" pip install -U Cython")
sys.exit(1)
######
@@ -176,12 +180,12 @@
shell=True)
if (get_changeset.stderr.read() != ""):
- print "Error in obtaining current changeset of the Mercurial repository"
+ print("Error in obtaining current changeset of the Mercurial repository")
changeset = None
- changeset = get_changeset.stdout.read().strip()
+ changeset = get_changeset.stdout.read().strip().decode("UTF-8")
if (not re.search("^[0-9a-f]{12}", changeset)):
- print "Current changeset of the Mercurial repository is malformed"
+ print("Current changeset of the Mercurial repository is malformed")
changeset = None
return changeset
@@ -215,7 +219,7 @@
with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
fobj.write("hg_version = '%s'\n" % changeset)
- build_py.run(self)
+ build_py.run(self)
def configuration(parent_package='', top_path=None):
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -87,7 +87,7 @@
keys = set(registry_entries())
tests_to_run += [t for t in new_tests if t in keys]
tests = list(set(tests_to_run))
- print "\n ".join(tests)
+ print ("\n ".join(tests))
sys.exit(0)
# Load the test pf and make sure it's good.
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -13,7 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from halo_objects import \
+from .halo_objects import \
Halo, \
HOPHalo, \
parallelHOPHalo, \
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -173,14 +173,37 @@
__declspec(dllexport)
#endif
-void initEnzoFOF(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_EnzoFOF(void)
+#else
+#define _RETVAL
+initEnzoFOF(void)
+#endif
{
PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "EnzoFOF", /* m_name */
+ "EnzoFOF Module", /* m_doc */
+ -1, /* m_size */
+ _FOFMethods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL, /* m_free */
+ };
+ m = PyModule_Create(&moduledef);
+#else
m = Py_InitModule("EnzoFOF", _FOFMethods);
+#endif
d = PyModule_GetDict(m);
_FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL);
PyDict_SetItemString(d, "error", _FOFerror);
import_array();
+ return _RETVAL;
}
/*
diff -r 9aa1e590554e34bc2712a040d0e1dd9d7dcd935c -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -24,6 +24,7 @@
import os
import os.path as path
from collections import defaultdict
+from yt.extern.six import add_metaclass
from yt.funcs import *
@@ -50,12 +51,13 @@
ParallelAnalysisInterface, \
parallel_blocking_call
+
+ at add_metaclass(ParallelDummy)
class Halo(object):
"""
A data source that returns particle information about the members of a
HOP-identified halo.
"""
- __metaclass__ = ParallelDummy # This will proxy up our methods
_distributed = False
_processing = False
_owner = 0
@@ -491,39 +493,39 @@
# all the parameters except for the center of mass.
com = self.center_of_mass()
position = [self["particle_position_x"],
- self["particle_position_y"],
- self["particle_position_z"]]
+ self["particle_position_y"],
+ self["particle_position_z"]]
# Locate the furthest particle from com, its vector length and index
- DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
- position = [position[0] - com[0],
- position[1] - com[1],
- position[2] - com[2]]
- # different cases of particles being on other side of boundary
- for axis in range(np.size(DW)):
- cases = np.array([position[axis],
- position[axis] + DW[axis],
- position[axis] - DW[axis]])
+ DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+ position = [position[0] - com[0],
+ position[1] - com[1],
+ position[2] - com[2]]
+ # different cases of particles being on other side of boundary
+ for axis in range(np.size(DW)):
+ cases = np.array([position[axis],
+ position[axis] + DW[axis],
+ position[axis] - DW[axis]])
# pick out the smallest absolute distance from com
position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
- # find the furthest particle's index
- r = np.sqrt(position[0]**2 +
- position[1]**2 +
- position[2]**2)
+ # find the furthest particle's index
+ r = np.sqrt(position[0]**2 +
+ position[1]**2 +
+ position[2]**2)
A_index = r.argmax()
mag_A = r.max()
# designate the A vector
- A_vector = (position[0][A_index],
- position[1][A_index],
- position[2][A_index])
+ A_vector = (position[0][A_index],
+ position[1][A_index],
+ position[2][A_index])
# designate the e0 unit vector
e0_vector = A_vector / mag_A
# locate the tB particle position by finding the max B
- e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
+ e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
for i in range(3):
e0_vector_copy[:, i] = e0_vector[i]
rr = np.array([position[0],
- position[1],
- position[2]]).T # Similar to tB_vector in old code.
+ position[1],
+ position[2]]).T # Similar to tB_vector in old code.
tC_vector = np.cross(e0_vector_copy, rr)
te2 = tC_vector.copy()
for dim in range(3):
@@ -954,7 +956,7 @@
Examples
--------
>>> params = halos[0].get_ellipsoid_parameters()
- """
+ """
basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
toreturn = [self.center_of_mass()]
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/1c361a58104a/
Changeset: 1c361a58104a
Branch: yt-3.0
User: hegan
Date: 2014-05-08 18:21:45
Summary: dens -> density
Affected #: 1 file
diff -r 256f8dad8684ed08441bbf42f1163420b4e98ee2 -r 1c361a58104a6e756e9c6965e874489d83022022 doc/source/cookbook/simple_contour_in_slice.py
--- a/doc/source/cookbook/simple_contour_in_slice.py
+++ b/doc/source/cookbook/simple_contour_in_slice.py
@@ -4,20 +4,20 @@
pf = load("Sedov_3d/sedov_hdf5_chk_0002")
# Make a traditional slice plot.
-sp = SlicePlot(pf,"x","dens")
+sp = SlicePlot(pf,"x","density")
# Overlay the slice plot with thick red contours of density.
-sp.annotate_contour("dens", ncont=3, clim=(1e-2,1e-1), label=True,
+sp.annotate_contour("density", ncont=3, clim=(1e-2,1e-1), label=True,
plot_args={"colors": "red",
"linewidths": 2})
# What about some nice temperature contours in blue?
-sp.annotate_contour("temp", ncont=3, clim=(1e-8,1e-6), label=True,
+sp.annotate_contour("temperature", ncont=3, clim=(1e-8,1e-6), label=True,
plot_args={"colors": "blue",
"linewidths": 2})
# This is the plot object.
-po = sp.plots["dens"]
+po = sp.plots["density"]
# Turn off the colormap image, leaving just the contours.
po.axes.images[0].set_visible(False)
https://bitbucket.org/yt_analysis/yt/commits/405984f5131c/
Changeset: 405984f5131c
Branch: yt-3.0
User: hegan
Date: 2014-05-08 18:24:00
Summary: printing of angular momentum vector
Affected #: 1 file
diff -r 1c361a58104a6e756e9c6965e874489d83022022 -r 405984f5131c81689b13d587ab73268c138e0cce doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -11,7 +11,7 @@
# Get the angular momentum vector for the sphere.
L = sp.quantities["AngularMomentumVector"]()
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
# Create an OffAxisSlicePlot on the object with the L vector as its normal
p = OffAxisProjectionPlot(pf, L, "density", sp.center, (25, "kpc"))
https://bitbucket.org/yt_analysis/yt/commits/9c44441a395f/
Changeset: 9c44441a395f
Branch: yt-3.0
User: hegan
Date: 2014-05-08 18:28:52
Summary: Pressure-> pressure, VorticitySquared-> vorticity_squared
Affected #: 1 file
diff -r 405984f5131c81689b13d587ab73268c138e0cce -r 9c44441a395f4417cef3af459073170065a9004e doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -4,5 +4,5 @@
pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
# Create density slices of several fields along the x axis
-SlicePlot(pf, 'x', ['density','temperature','Pressure','VorticitySquared'],
+SlicePlot(pf, 'x', ['density','temperature','pressure','vorticity_squared'],
width = (800.0, 'kpc')).save()
https://bitbucket.org/yt_analysis/yt/commits/554200054447/
Changeset: 554200054447
Branch: yt-3.0
User: hegan
Date: 2014-05-08 18:42:59
Summary: units for region args
Affected #: 1 file
diff -r 9c44441a395f4417cef3af459073170065a9004e -r 554200054447b86e5fa0ed208e964c3abd7b11ef doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -17,10 +17,9 @@
right_corner = pf.domain_right_edge
# Now adjust the size of the region along the line of sight (x axis).
-depth = 10.0 # in Mpc
-left_corner[0] = center[0] - 0.5 * depth / pf.units['mpc']
-left_corner[0] = center[0] + 0.5 * depth / pf.units['mpc']
-
+depth = pf.quan(10.0,'Mpc')
+left_corner[0] = center[0] - 0.5 * depth
+left_corner[0] = center[0] + 0.5 * depth
# Create the region
region = pf.region(center, left_corner, right_corner)
https://bitbucket.org/yt_analysis/yt/commits/91fafe916170/
Changeset: 91fafe916170
Branch: yt-3.0
User: MatthewTurk
Date: 2014-05-09 01:56:11
Summary: Merged in hegan/yt/yt-3.0 (pull request #888)
Cookbook Recipe Updates
Affected #: 11 files
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/halo_finding.py
--- a/doc/source/cookbook/halo_finding.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-This script shows the simplest way of getting halo information. For more
-information, see :ref:`halo_finding`.
-"""
-import yt
-
-ds = yt.load("Enzo_64/DD0043/data0043")
-
-halos = yt.HaloFinder(ds)
-halos.write_out("%s_halos.txt" % ds)
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/halo_mass_info.py
--- a/doc/source/cookbook/halo_mass_info.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Title: Halo Mass Info
-Description: This recipe finds halos and then prints out information about
- them. Note that this recipe will take advantage of multiple CPUs
- if executed with mpirun and supplied the --parallel command line
- argument.
-Outputs: [RedshiftOutput0006_halo_info.txt]
-"""
-from yt.mods import *
-
-fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
-pf = load(fn) # load data
-
-# First we run our halo finder to identify all the halos in the dataset. This
-# can take arguments, but the default are pretty sane.
-halos = HaloFinder(pf)
-
-f = open("%s_halo_info.txt" % pf, "w")
-
-# Now, for every halo, we get the baryon data and examine it.
-for halo in halos:
- # The halo has a property called 'get_sphere' that obtains a sphere
- # centered on the point of maximum density (or the center of mass, if that
- # argument is supplied) and with the radius the maximum particle radius of
- # that halo.
- sphere = halo.get_sphere()
- # We use the quantities[] method to get the total mass in baryons and in
- # particles.
- baryon_mass, particle_mass = sphere.quantities["TotalQuantity"](
- ["cell_mass", "particle_mass"])
- # Now we print out this information, along with the ID.
- f.write("Total mass in HOP group %s is %0.5e (gas = %0.5e / particles = %0.5e)\n" % \
- (halo.id, baryon_mass + particle_mass, baryon_mass, particle_mass))
-f.close()
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/halo_particle_plotting.py
--- a/doc/source/cookbook/halo_particle_plotting.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This is a simple mechanism for overplotting the particles belonging only to
-halos. For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
-
-pf = load("Enzo_64/DD0043/data0043")
-
-halos = HaloFinder(pf)
-
-p = ProjectionPlot(pf, "x", "density")
-p.annotate_hop_circles(halos)
-p.annotate_hop_particles(halos, max_number=100)
-p.save()
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -4,10 +4,13 @@
"""
from yt.mods import * # set up our namespace
-pf = load("Enzo_64/DD0043/data0043")
+data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
-halos = HaloFinder(pf)
+halo_pf = load('rockstar_halos/halos_0.0.bin')
-p = ProjectionPlot(pf, "z", "density")
-p.annotate_hop_circles(halos)
+hc - HaloCatalog(halos_pf = halo_pf)
+hc.load()
+
+p = ProjectionPlot(pf, "x", "density")
+p.annotate_halos(hc)
p.save()
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/simple_contour_in_slice.py
--- a/doc/source/cookbook/simple_contour_in_slice.py
+++ b/doc/source/cookbook/simple_contour_in_slice.py
@@ -4,20 +4,20 @@
pf = load("Sedov_3d/sedov_hdf5_chk_0002")
# Make a traditional slice plot.
-sp = SlicePlot(pf,"x","dens")
+sp = SlicePlot(pf,"x","density")
# Overlay the slice plot with thick red contours of density.
-sp.annotate_contour("dens", ncont=3, clim=(1e-2,1e-1), label=True,
+sp.annotate_contour("density", ncont=3, clim=(1e-2,1e-1), label=True,
plot_args={"colors": "red",
"linewidths": 2})
# What about some nice temperature contours in blue?
-sp.annotate_contour("temp", ncont=3, clim=(1e-8,1e-6), label=True,
+sp.annotate_contour("temperature", ncont=3, clim=(1e-8,1e-6), label=True,
plot_args={"colors": "blue",
"linewidths": 2})
# This is the plot object.
-po = sp.plots["dens"]
+po = sp.plots["density"]
# Turn off the colormap image, leaving just the contours.
po.axes.images[0].set_visible(False)
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -11,7 +11,7 @@
# Get the angular momentum vector for the sphere.
L = sp.quantities["AngularMomentumVector"]()
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
# Create an OffAxisSlicePlot on the object with the L vector as its normal
p = OffAxisProjectionPlot(pf, L, "density", sp.center, (25, "kpc"))
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -4,5 +4,5 @@
pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
# Create density slices of several fields along the x axis
-SlicePlot(pf, 'x', ['density','temperature','Pressure','VorticitySquared'],
+SlicePlot(pf, 'x', ['density','temperature','pressure','vorticity_squared'],
width = (800.0, 'kpc')).save()
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -17,10 +17,9 @@
right_corner = pf.domain_right_edge
# Now adjust the size of the region along the line of sight (x axis).
-depth = 10.0 # in Mpc
-left_corner[0] = center[0] - 0.5 * depth / pf.units['mpc']
-left_corner[0] = center[0] + 0.5 * depth / pf.units['mpc']
-
+depth = pf.quan(10.0,'Mpc')
+left_corner[0] = center[0] - 0.5 * depth
+left_corner[0] = center[0] + 0.5 * depth
# Create the region
region = pf.region(center, left_corner, right_corner)
diff -r 721b37f6b4378a5487d0208dfd0897eace5db5a1 -r 91fafe9161705f1bfc26276508b19cdfcbb2d476 doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -27,7 +27,7 @@
FieldName)``.
* Previously, yt would use "Enzo-isms" for field names. We now very
specifically define fields as lowercase with underscores. For instance,
- what used to be ``VelocityMagnitude`` would not be ``velocity_magnitude``.
+ what used to be ``VelocityMagnitude`` would now be ``velocity_magnitude``.
* Particles are either named by their type or default to the type ``io``.
* Axis names are now at the *end* of field names, not the beginning.
``x-velocity`` is now ``velocity_x``.
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list