[yt-svn] commit/yt: jzuhone: Merged in ngoldbaum/yt/yt-3.0 (pull request #979)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Jun 28 09:40:45 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/15bf4f304a0e/
Changeset:   15bf4f304a0e
Branch:      yt-3.0
User:        jzuhone
Date:        2014-06-28 18:40:36
Summary:     Merged in ngoldbaum/yt/yt-3.0 (pull request #979)

Fixing a number of docs build issues.
Affected #:  18 files

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -57,3 +57,12 @@
 serial the operation ``for pf in ts:`` would also have worked identically.
 
 .. yt_cookbook:: time_series.py
+
+Complex Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe estimates the ratio of gravitational and pressure forces in a galaxy
+cluster simulation.  This shows how to create and work with vector derived 
+fields.
+
+.. yt_cookbook:: hse_field.py

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -36,7 +36,7 @@
 axes.  To focus on what's happening in the x-y plane, we make an additional
 Temperature slice for the bottom-right subpanel.
 
-.. yt-cookbook:: multiplot_2x2_coordaxes_slice.py
+.. yt_cookbook:: multiplot_2x2_coordaxes_slice.py
 
 Multi-Plot Slice and Projections
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/cookbook/fits_xray_images.rst
--- a/doc/source/cookbook/fits_xray_images.rst
+++ b/doc/source/cookbook/fits_xray_images.rst
@@ -1,6 +1,6 @@
 .. _xray_fits:
 
 FITS X-ray Images in yt
-----------------------
+-----------------------
 
-.. notebook:: fits_xray_images.ipynb
\ No newline at end of file
+.. notebook:: fits_xray_images.ipynb

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -7,8 +7,10 @@
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
 
- at yt.derived_field(name='grav_accel_x', units='cm/s**2', take_log=False)
-def grav_accel_x(field, data):
+ at yt.derived_field(name='gravitational_acceleration_x',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_x(field, data):
 
     # We need to set up stencils
 
@@ -16,20 +18,22 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'][0]
 
     gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
     gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gx.uq
     new_field[1:-1, 1:-1, 1:-1] = -gx
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_y', units='cm/s**2', take_log=False)
-def grav_accel_y(field, data):
+ at yt.derived_field(name='gravitational_acceleration_y',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_y(field, data):
 
     # We need to set up stencils
 
@@ -37,20 +41,23 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
     gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gy.uq
+
     new_field[1:-1, 1:-1, 1:-1] = -gy
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_z', units='cm/s**2', take_log=False)
-def grav_accel_z(field, data):
+ at yt.derived_field(name='gravitational_acceleration_z',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_z(field, data):
 
     # We need to set up stencils
 
@@ -58,13 +65,13 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
     gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gz.uq
     new_field[1:-1, 1:-1, 1:-1] = -gz
 
     return new_field
@@ -73,7 +80,8 @@
 # Define the components of the pressure gradient field
 
 
- at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_x(field, data):
 
     # We need to set up stencils
@@ -82,18 +90,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'].flatten()[0]
 
     px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
     px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.uq
     new_field[1:-1, 1:-1, 1:-1] = px
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_y(field, data):
 
     # We need to set up stencils
@@ -102,18 +111,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     py = data["pressure"][1:-1, sl_right, 1:-1]/dy
     py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*py.uq
     new_field[1:-1, 1:-1, 1:-1] = py
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_z(field, data):
 
     # We need to set up stencils
@@ -122,12 +132,12 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
     pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*pz.uq
     new_field[1:-1, 1:-1, 1:-1] = pz
 
     return new_field
@@ -135,49 +145,29 @@
 
 # Define the "degree of hydrostatic equilibrium" field
 
- at yt.derived_field(name='HSE', units=None, take_log=False)
+ at yt.derived_field(name='HSE', units=None, take_log=False,
+                  display_name='Hydrostatic Equilibrium')
 def HSE(field, data):
 
-    gx = data["density"]*data["Grav_Accel_x"]
-    gy = data["density"]*data["Grav_Accel_y"]
-    gz = data["density"]*data["Grav_Accel_z"]
+    gx = data["density"]*data["gravitational_acceleration_x"]
+    gy = data["density"]*data["gravitational_acceleration_y"]
+    gz = data["density"]*data["gravitational_acceleration_z"]
 
-    hx = data["Grad_Pressure_x"] - gx
-    hy = data["Grad_Pressure_y"] - gy
-    hz = data["Grad_Pressure_z"] - gz
+    hx = data["grad_pressure_x"] - gx
+    hy = data["grad_pressure_y"] - gy
+    hz = data["grad_pressure_z"] - gz
 
-    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))*gx.unit_array
+    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
 
     return h
 
 
-# Open two files, one at the beginning and the other at a later time when
-# there's a lot of sloshing going on.
+# Open a dataset from when there's a lot of sloshing going on.
 
-dsi = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
-dsf = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
+ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-# Sphere objects centered at the cluster potential minimum with a radius
-# of 200 kpc
 
-sphere_i = dsi.sphere(dsi.domain_center, (200, "kpc"))
-sphere_f = dsf.sphere(dsf.domain_center, (200, "kpc"))
+# Take a slice through the center of the domain
+slc = yt.SlicePlot(ds, 2, ["density", "HSE"], width=(1, 'Mpc'))
 
-# Average "degree of hydrostatic equilibrium" in these spheres
-
-hse_i = sphere_i.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-hse_f = sphere_f.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-
-print "Degree of hydrostatic equilibrium initially: ", hse_i
-print "Degree of hydrostatic equilibrium later: ", hse_f
-
-# Just for good measure, take slices through the center of the domains
-# of the two files
-
-slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
-                     width=(1.0, "Mpc"))
-slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
-                     width=(1.0, "Mpc"))
-
-slc_i.save("initial")
-slc_f.save("final")
+slc.save("hse")

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -51,7 +51,7 @@
 
 If you are developing new functionality, it is sometimes more convenient to use
 the Nose command line interface, ``nosetests``. You can run the unit tests
-using `no`qsetets` by navigating to the base directory of the yt mercurial
+using ``nose`` by navigating to the base directory of the yt mercurial
 repository and invoking ``nosetests``:
 
 .. code-block:: bash

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:cd145d8cadbf1a0065d0f9fb4ea107c215fcd53245b3bb7d29303af46f063552"
+  "signature": "sha256:5fc7783d6c99659c353a35348bb21210fcb7572d5357f32dd61755d4a7f8fe6c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -443,7 +443,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits.gz\")\n",
+      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
       "f.info()"
      ],
      "language": "python",
@@ -462,7 +462,7 @@
      "collapsed": false,
      "input": [
       "data = {}\n",
-      "for hdu in f[1:]:\n",
+      "for hdu in f:\n",
       "    name = hdu.name.lower()\n",
       "    data[name] = (hdu.data,\"km/s\")\n",
       "print data.keys()"

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -711,11 +711,13 @@
 ``spectral_factor``
 ~~~~~~~~~~~~~~~~~~~
 
-Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
-scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
-or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
- axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
- or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt
+sets the pixel scale as the ``code_length``, certain visualizations (such as
+volume renderings) may look extended or distended in ways that are
+undesirable. To adjust the width in ``code_length`` of the spectral axis, set
+``spectral_factor`` equal to a constant which gives the desired scaling, or set
+it to ``"auto"`` to make the width the same as the largest axis in the sky
+plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -792,11 +794,11 @@
 PyNE Data
 ---------
 
-.. _loading-numpy-array:
-
 Generic Array Data
 ------------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 Even if your data is not strictly related to fields commonly used in
 astrophysical codes or your code is not supported yet, you can still feed it to
 ``yt`` to use its advanced visualization and analysis facilities. The only
@@ -848,6 +850,8 @@
 Generic AMR Data
 ----------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 It is possible to create native ``yt`` parameter file from Python's dictionary
 that describes set of rectangular patches of data of possibly varying
 resolution. 

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,6 +120,8 @@
 .. python-script::
    
    from yt.mods import *
+   from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+
    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/visualizing/_images/mapserver.png
Binary file doc/source/visualizing/_images/mapserver.png has changed

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -478,8 +478,7 @@
     :ref:`cookbook-amrkdtree_to_uniformgrid`.
 
 System Requirements
--------------------
-.. versionadded:: 3.0
++++++++++++++++++++
 
 Nvidia graphics card - The memory limit of the graphics card sets the limit
                        on the size of the data source.
@@ -490,7 +489,7 @@
 the common/inc samples shipped with CUDA. The following shows an example
 in bash with CUDA 5.5 installed in /usr/local :
 
-export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
+    export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
 
 PyCUDA must also be installed to use Theia. 
 
@@ -503,13 +502,13 @@
 
 
 Tutorial
---------
-.. versionadded:: 3.0
+++++++++
 
 Currently rendering only works on uniform grids. Here is an example
 on a 1024 cube of float32 scalars.
 
 .. code-block:: python
+
    from yt.visualization.volume_rendering.theia.scene import TheiaScene
    from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
    import numpy as np
@@ -528,28 +527,27 @@
 .. _the-theiascene-interface:
 
 The TheiaScene Interface
---------------------
-.. versionadded:: 3.0
+++++++++++++++++++++++++
 
 A TheiaScene object has been created to provide a high level entry point for
-controlling the raycaster's view onto the data. The class  
-:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates
- a Camera object and a TheiaSource that intern encapsulates
-a volume. The :class:`~yt.visualization.volume_rendering.theia.Camera`
-provides controls for rotating, translating, and zooming into the volume.
-Using the :class:`~yt.visualization.volume_rendering.theia.TheiaSource`
-automatically transfers the volume to the graphic's card texture memory.
+controlling the raycaster's view onto the data. The class
+:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates a
+Camera object and a TheiaSource that intern encapsulates a volume. The
+:class:`~yt.visualization.volume_rendering.theia.Camera` provides controls for
+rotating, translating, and zooming into the volume.  Using the
+:class:`~yt.visualization.volume_rendering.theia.TheiaSource` automatically
+transfers the volume to the graphic's card texture memory.
 
 Example Cookbooks
----------------
++++++++++++++++++
 
 OpenGL Example for interactive volume rendering:
 :ref:`cookbook-opengl_volume_rendering`.
 
-OpenGL Stereoscopic Example :
 .. warning::  Frame rate will suffer significantly from stereoscopic rendering.
               ~2x slower since the volume must be rendered twice.
-:ref:`cookbook-opengl_stereo_volume_rendering`.
+
+OpenGL Stereoscopic Example: :ref:`cookbook-opengl_stereo_volume_rendering`.
 
 Pseudo-Realtime video rendering with ffmpeg :
 :ref:`cookbook-ffmpeg_volume_rendering`.

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -201,7 +201,8 @@
             if self.suppress_logging:
                 old_level = int(ytcfg.get("yt","loglevel"))
                 mylog.setLevel(40)
-            dd_first = self.data_series[0].all_data()
+            ds_first = self.data_series[0]
+            dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -156,7 +156,8 @@
 
     def _create_intensity(self, i):
         def _intensity(field, data):
-            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            vlos = data["v_los"]
+            w = np.abs(vlos-self.vmid[i])/self.dv.in_units(vlos.units)
             w = 1.-w
             w[w < 0.0] = 0.0
             return data[self.field]*w

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -283,7 +283,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn)) 
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         if self._flash_version == 7:
             for hn in hns:
@@ -300,7 +301,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         
         # Determine block size
@@ -363,7 +365,7 @@
         try:
             self.gamma = self.parameters["gamma"]
         except:
-            mylog.warning("Cannot find Gamma")
+            mylog.info("Cannot find Gamma")
             pass
 
         # Get the simulation time

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -756,7 +756,6 @@
     yield assert_array_equal, yt_arr, YTArray(yt_arr.to_astropy())
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
-
 def test_subclass():
 
     class YTASubclass(YTArray):

diff -r dc6a369b263c68adaf02af5d14effa934aed93d8 -r 15bf4f304a0e8b46658a6a7a1b887e4cc60722bb yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -501,7 +501,7 @@
         streamplot_args = {'x': X, 'y': Y, 'u':pixX, 'v': pixY,
                            'density': self.dens}
         streamplot_args.update(self.plot_args)
-        plot._axes.streamplot(**self.streamplot_args)
+        plot._axes.streamplot(**streamplot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list