[yt-svn] commit/yt: 13 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Feb 3 09:17:28 PST 2016


13 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7aae2972d12f/
Changeset:   7aae2972d12f
Branch:      yt
User:        atmyers
Date:        2016-01-18 22:23:03+00:00
Summary:     Adding the displacement fields in to the vertex array.
Affected #:  1 file

diff -r 8717f785d8fd0da987e2a881918748a105b6f8f3 -r 7aae2972d12f2a78d58018e95e397314c6287eaf yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -27,7 +27,7 @@
     ExodusIIFieldInfo
 from .util import \
     load_info_records, sanitize_string
-
+from yt.funcs import iterable
 
 class ExodusIIUnstructuredMesh(UnstructuredMesh):
     _index_offset = 1
@@ -63,6 +63,7 @@
     def __init__(self,
                  filename,
                  step=0,
+                 displacement_factor=0.0,
                  dataset_type='exodus_ii',
                  storage_filename=None,
                  units_override=None):
@@ -70,6 +71,11 @@
         self.parameter_filename = filename
         self.fluid_types += self._get_fluid_types()
         self.step = step
+        if iterable(displacement_factor):
+            assert(len(displacement_factor) == 3)
+        else:
+            displacement_factor = [displacement_factor]*3
+        self.displacement_factor = np.array(displacement_factor)
         super(ExodusIIDataset, self).__init__(filename, dataset_type,
                                               units_override=units_override)
         self.index_filename = filename
@@ -222,11 +228,20 @@
 
         mylog.info("Loading coordinates")
         if "coord" not in self._vars:
-            return np.array([self._vars["coord%s" % ax][:]
-                             for ax in coord_axes]).transpose().copy()
+            coords = np.array([self._vars["coord%s" % ax][:]
+                               for ax in coord_axes]).transpose().copy()
         else:
-            return np.array([coord for coord in
-                             self._vars["coord"][:]]).transpose().copy()
+            coords = np.array([coord for coord in
+                               self._vars["coord"][:]]).transpose().copy()
+
+        for i, ax in enumerate(coord_axes):
+            if "disp_%s" % ax in self.parameters['nod_names']:
+                ind = self.parameters['nod_names'].index("disp_%s" % ax)
+                offset = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
+                coords[:, i] += self.displacement_factor[i]*offset
+
+        return coords
+
 
     def _read_connectivity(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/2d025072675e/
Changeset:   2d025072675e
Branch:      yt
User:        atmyers
Date:        2016-01-18 23:27:54+00:00
Summary:     allow the displacement factor to be set on a mesh-by-mesh basis
Affected #:  1 file

diff -r 7aae2972d12f2a78d58018e95e397314c6287eaf -r 2d025072675eec3af8bf8332b05c696eeb59bbe1 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -42,10 +42,16 @@
 
     def _initialize_mesh(self):
         coords = self.ds._read_coordinates()
-        self.meshes = [ExodusIIUnstructuredMesh(
-            mesh_id, self.index_filename, conn_ind, coords, self)
-                       for mesh_id, conn_ind in
-                       enumerate(self.ds._read_connectivity())]
+        connectivity = self.ds._read_connectivity()
+        self.meshes = []
+        for mesh_id, conn_ind in enumerate(connectivity):
+            displaced_coords = self.ds._apply_displacement(coords, mesh_id)
+            mesh = ExodusIIUnstructuredMesh(mesh_id, 
+                                            self.index_filename,
+                                            conn_ind, 
+                                            displaced_coords, 
+                                            self)
+            self.meshes.append(mesh)
 
     def _detect_output_fields(self):
         elem_names = self.dataset.parameters['elem_names']
@@ -63,7 +69,7 @@
     def __init__(self,
                  filename,
                  step=0,
-                 displacement_factor=0.0,
+                 displacements=None,
                  dataset_type='exodus_ii',
                  storage_filename=None,
                  units_override=None):
@@ -71,11 +77,10 @@
         self.parameter_filename = filename
         self.fluid_types += self._get_fluid_types()
         self.step = step
-        if iterable(displacement_factor):
-            assert(len(displacement_factor) == 3)
+        if displacements is None:
+            self.displacements = {}
         else:
-            displacement_factor = [displacement_factor]*3
-        self.displacement_factor = np.array(displacement_factor)
+            self.displacements = displacements
         super(ExodusIIDataset, self).__init__(filename, dataset_type,
                                               units_override=units_override)
         self.index_filename = filename
@@ -233,16 +238,30 @@
         else:
             coords = np.array([coord for coord in
                                self._vars["coord"][:]]).transpose().copy()
+        return coords
 
+    def _apply_displacement(self, coords, mesh_id):
+        
+        mesh_name = "connect%d" % (mesh_id + 1)
+        if mesh_name not in self.displacements:
+            new_coords = coords.copy()
+            return new_coords
+
+        new_coords = np.zeros_like(coords)
+        fac = self.displacements[mesh_name]
+
+        if not iterable(fac):
+            fac = [fac]*self.dimensionality
+
+        coord_axes = 'xyz'[:self.dimensionality]
         for i, ax in enumerate(coord_axes):
             if "disp_%s" % ax in self.parameters['nod_names']:
                 ind = self.parameters['nod_names'].index("disp_%s" % ax)
                 offset = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
-                coords[:, i] += self.displacement_factor[i]*offset
+                new_coords[:, i] = coords[:, i] + fac[i]*offset
 
-        return coords
-
-
+        return new_coords
+        
     def _read_connectivity(self):
         """
         Loads the connectivity data for the mesh


https://bitbucket.org/yt_analysis/yt/commits/96aa17132500/
Changeset:   96aa17132500
Branch:      yt
User:        atmyers
Date:        2016-01-19 00:12:06+00:00
Summary:     correctly compute domain boundaries when displacements are applied.
Affected #:  1 file

diff -r 2d025072675eec3af8bf8332b05c696eeb59bbe1 -r 96aa17132500f307a40e2d643d1daa7dfdf44d26 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -112,8 +112,7 @@
         self.parameters['num_meshes'] = self._vars['eb_status'].shape[0]
         self.parameters['elem_names'] = self._get_elem_names()
         self.parameters['nod_names'] = self._get_nod_names()
-        self.domain_left_edge = self._load_domain_edge(0)
-        self.domain_right_edge = self._load_domain_edge(1)
+        self.domain_left_edge, self.domain_right_edge = self._load_domain_edge()
 
         # set up psuedo-3D for lodim datasets here
         if self.dimensionality == 2:
@@ -272,17 +271,22 @@
             connectivity.append(self._vars["connect%d" % (i+1)][:].astype("i8"))
         return connectivity
 
-    def _load_domain_edge(self, domain_idx):
+    def _load_domain_edge(self):
         """
         Loads the boundaries for the domain edge
 
-        Parameters:
-        - domain_idx: 0 corresponds to the left edge, 1 corresponds to the right edge
         """
-        if domain_idx == 0:
-            return self._read_coordinates().min(axis=0)
-        if domain_idx == 1:
-            return self._read_coordinates().max(axis=0)
+        
+        coords = self._read_coordinates()
+        connectivity = self._read_connectivity()
+
+        mi = 1e300
+        ma = -1e300
+        for mesh_id, _ in enumerate(connectivity):
+            displaced_coords = self._apply_displacement(coords, mesh_id)
+            mi = np.minimum(displaced_coords.min(axis=0), mi)
+            ma = np.maximum(displaced_coords.max(axis=0), ma)
+        return mi, ma
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/1ac7b456b11a/
Changeset:   1ac7b456b11a
Branch:      yt
User:        atmyers
Date:        2016-01-19 00:24:11+00:00
Summary:     Adding a docstring.
Affected #:  1 file

diff -r 96aa17132500f307a40e2d643d1daa7dfdf44d26 -r 1ac7b456b11abf29494954e9a61c2eff0b7f0cc1 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -73,7 +73,59 @@
                  dataset_type='exodus_ii',
                  storage_filename=None,
                  units_override=None):
+        """
 
+        A class used to represent an on-disk ExodusII dataset. The initializer takes 
+        two extra optional parameters, "step" and "displacements."
+
+        Parameters
+        ----------
+
+        step : integer
+            The step tells which time index to slice at. It throws an Error if
+            the index is larger than the number of time outputs in the ExodusII
+            file. Passing step=-1 picks out the last dataframe. 
+            Default is 0.
+
+        displacements : dictionary
+            This is a dictionary of scale factors that will be applied to the 
+            displacement fields in the ExodusII file. If no displacement fields
+            are present, then this dictionary is ignored. You can specify 
+            separate scale factors for each mesh. The scale factors can either be
+            floats, in which case the same scale will applied to each dimension,
+            or they can be iterables of 3 floats, in which case you can specify
+            anisotropic scale factors.
+
+        Examples
+        --------
+
+        This will load the Dataset at time index '0' with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+        This will load the Dataset at the final index with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
+
+        This will load the Dataset at index 10, scaling the 2nd mesh
+        by a factor of 5.0 in each direction.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect2': 5.0})
+        
+        This will load the Dataset at index 10, scaling the 2nd mesh
+        by a factor of 5.0 in each direction and the 1st mesh by an
+        anisotropic scale factor.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect1': [1.0, 2.0, 3.0],
+                                        'connect2': 5.0})
+
+        """
         self.parameter_filename = filename
         self.fluid_types += self._get_fluid_types()
         self.step = step


https://bitbucket.org/yt_analysis/yt/commits/bd1fced7cfb1/
Changeset:   bd1fced7cfb1
Branch:      yt
User:        atmyers
Date:        2016-01-23 07:52:11+00:00
Summary:     Adding an explanation of the displacement fields to the loading data docs
Affected #:  1 file

diff -r 1ac7b456b11abf29494954e9a61c2eff0b7f0cc1 -r bd1fced7cfb1c75f95d7c8bc3970320779b20a9b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -330,12 +330,17 @@
 Exodus II Data
 --------------
 
+.. note::
+   To load Exodus II data, you need to have the `netcdf4 <http://unidata.github.io/
+   netcdf4-python/>`_ python interface installed.
+
 Exodus II is a file format for Finite Element datasets that is used by the MOOSE
 framework for file IO. Support for this format (and for unstructured mesh data in 
-general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
-there to be some buggy features at present. Currently, yt can visualize first-order
-mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
-Development of higher-order visualization capability is a work in progress.
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we 
+also expect there to be some buggy features at present. Currently, yt can visualize 
+quads, hexes, triangles, and tetrahedral element types at first order. Additionally,
+there is experimental support for the high-order visualization of 20-node hex elements.
+Development of more high-order visualization capability is a work in progress.
 
 To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
 file:
@@ -348,14 +353,15 @@
 Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
 picard iterations, non-linear solve iterations, etc...), you can also specify a step
 argument when you load an Exodus II data that defines the index at which to look when
-you read data from the file.
+you read data from the file. Omitting this argument is the same as passing in 0, and
+setting ``step=-1`` selects the last time output in the file.
 
 You can access the connectivity information directly by doing:
 
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=-1)
    print(ds.index.meshes[0].connectivity_coords)
    print(ds.index.meshes[0].connectivity_indices)
    print(ds.index.meshes[1].connectivity_coords)
@@ -368,7 +374,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    print(ds.field_list)
 
 This will give you a list of field names like ``('connect1', 'diffused')`` and 
@@ -380,7 +386,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()  # geometric selection, this just grabs everything
    print(ad['connect1', 'convected'])
 
@@ -390,7 +396,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'convected'].shape)
 
@@ -401,7 +407,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'vertex_x'])
 
@@ -411,7 +417,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'conv_indicator'].shape)
 
@@ -420,6 +426,50 @@
 For information about visualizing unstructured mesh data, including Exodus II datasets, 
 please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
 
+Displacement Fields
+^^^^^^^^^^^^^^^^^^^
+
+Displacement fields can be toggled on or off when a dataset is loaded by passing in 
+an optional dictionary to the ``yt.load`` command. This feature is turned off by
+default, meaning that a dataset loaded as 
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+will not include the displacements in the vertex positions. The displacements can
+be turned on separately for each mesh in the file. For example, the following
+code snippet turns displacements on for the second mesh, but not the first:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect2': 1.0})
+
+The displacements can also be scaled by an arbitrary factor before they are 
+added in to the vertex positions. To blow them up by a factor of 10.0, we do
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect2': 10.0})
+
+Finally, we can also scale each dimension independently. This snippet:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                  displacements={'connect1': [1.0, 2.0, 3.0],
+                                 'connect2': 5.0})
+
+turns displacements on for both meshes, scaling those in the 2nd mesh
+by a factor of 5 and those in the first by 1.0, 2.0, and 3.0 in the
+x, y, and z directions, respectively.
+
 
 FITS Data
 ---------


https://bitbucket.org/yt_analysis/yt/commits/9ce3bacf5e53/
Changeset:   9ce3bacf5e53
Branch:      yt
User:        atmyers
Date:        2016-01-28 06:07:27+00:00
Summary:     adding more description of displacement fields
Affected #:  1 file

diff -r bd1fced7cfb1c75f95d7c8bc3970320779b20a9b -r 9ce3bacf5e533165ba29154f8a2a70b8c10c0dba doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -429,9 +429,17 @@
 Displacement Fields
 ^^^^^^^^^^^^^^^^^^^
 
-Displacement fields can be toggled on or off when a dataset is loaded by passing in 
-an optional dictionary to the ``yt.load`` command. This feature is turned off by
-default, meaning that a dataset loaded as 
+Finite element codes often solve for the displacement of each vertex from its 
+original position as a node variable, rather than updating the actual vertex 
+positions with time. For analysis and visualization, it is often useful to turn 
+these displacements on or off, and to be able to scale them arbitrarily to 
+emphasize certain features of the solution. To allow this, if ``yt`` detects 
+displacement fields in an Exodus II dataset (using the convention that they will
+ be named ``disp_x``, ``disp_y``, etc...), it will add optionally add these to 
+the mesh vertex positions for the purposes of visualization. Displacement fields 
+can be controlled when a dataset is loaded by passing in an optional dictionary 
+to the ``yt.load`` command. This feature is turned off by default, meaning that 
+a dataset loaded as 
 
 .. code-block:: python
 


https://bitbucket.org/yt_analysis/yt/commits/96af7a0cf1c6/
Changeset:   96af7a0cf1c6
Branch:      yt
User:        atmyers
Date:        2016-01-28 06:28:23+00:00
Summary:     adding an answer test for Exodus II dislacement fields.
Affected #:  1 file

diff -r 9ce3bacf5e533165ba29154f8a2a70b8c10c0dba -r 96af7a0cf1c68c7341dc8a53ecfa89bd0980c07d yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -18,7 +18,10 @@
     assert_array_equal, \
     requires_file
 from yt.utilities.answer_testing.framework import \
-    data_dir_load
+    data_dir_load, \
+    requires_ds, \
+    GenericArrayTest
+
 
 out = "ExodusII/out.e"
 
@@ -69,3 +72,17 @@
     field_list = [('connect1', 'forced')]
     yield assert_equal, str(ds), "gold.e"
     yield assert_array_equal, ds.field_list, field_list 
+
+big_data = "MOOSE_sample_data/mps_out.e"
+
+
+ at requires_ds(big_data)
+def test_displacement_fields():
+    displacement_dicts =[{'connect2':5.0},
+                         {'connect1': [1.0, 2.0, 3.0], 'connect2': 5.0}]
+    for disp in displacement_dicts:
+        ds = data_dir_load(big_data, displacements=disp)
+        for mesh in ds.index.meshes:
+            def array_func():
+                return mesh.connectivity_coords
+            yield GenericArrayTest(ds, array_func, 12)


https://bitbucket.org/yt_analysis/yt/commits/abe1ef6f58b2/
Changeset:   abe1ef6f58b2
Branch:      yt
User:        atmyers
Date:        2016-02-02 00:49:15+00:00
Summary:     changing the API for displacement fields to a more useful form
Affected #:  3 files

diff -r 96af7a0cf1c68c7341dc8a53ecfa89bd0980c07d -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -447,36 +447,39 @@
    ds = yt.load("MOOSE_sample_data/mps_out.e")
 
 will not include the displacements in the vertex positions. The displacements can
-be turned on separately for each mesh in the file. For example, the following
-code snippet turns displacements on for the second mesh, but not the first:
+be turned on separately for each mesh in the file by passing in a a tuple of 
+(scale, offset) pairs for the meshes you want to enable displacements for. 
+For example, the following code snippet turns displacements on for the second 
+mesh, but not the first:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
-                 displacements={'connect2': 1.0})
+                 displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
 
 The displacements can also be scaled by an arbitrary factor before they are 
-added in to the vertex positions. To blow them up by a factor of 10.0, we do
+added in to the vertex positions. The following code turns on displacements
+for both ``connect1`` and ``connect2``, scaling the former by a factor of 5.0
+and the later by a factor of 10.0:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
-                 displacements={'connect2': 10.0})
+                 displacements={'connect1': (5.0, [0.0, 0.0, 0.0]),
+                                'connect2': (10.0, [0.0, 0.0, 0.0])})
 
-Finally, we can also scale each dimension independently. This snippet:
+Finally, we can also apply an arbitrary offset to the mesh vertices after 
+the scale factor is applied. For example, the following code scales all
+displacements in the second mesh by a factor of 5.0, and then shifts
+each vertex in the mesh by 1.0 unit in the z-direction:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
-                  displacements={'connect1': [1.0, 2.0, 3.0],
-                                 'connect2': 5.0})
-
-turns displacements on for both meshes, scaling those in the 2nd mesh
-by a factor of 5 and those in the first by 1.0, 2.0, and 3.0 in the
-x, y, and z directions, respectively.
+                  displacements={'connect2': (5.0, [0.0, 0.0, 1.0])})
 
 
 FITS Data

diff -r 96af7a0cf1c68c7341dc8a53ecfa89bd0980c07d -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -87,14 +87,24 @@
             file. Passing step=-1 picks out the last dataframe. 
             Default is 0.
 
-        displacements : dictionary
-            This is a dictionary of scale factors that will be applied to the 
-            displacement fields in the ExodusII file. If no displacement fields
-            are present, then this dictionary is ignored. You can specify 
-            separate scale factors for each mesh. The scale factors can either be
-            floats, in which case the same scale will applied to each dimension,
-            or they can be iterables of 3 floats, in which case you can specify
-            anisotropic scale factors.
+        displacements : dictionary of tuples
+            This is a dictionary that controls whether or not displacement fields
+            will be used with the meshes in this dataset. The keys of the
+            displacements dictionary should the names of meshes in the file 
+            (e.g., "connect1", "connect2", etc... ), while the values should be 
+            tuples of the form (scale, offset), where "scale" is a floating point
+            value and "offset" is an array-like with one component for each spatial
+            dimension in the dataset. When the displacements for a given mesh are
+            turned on, the coordinates of the vertices in that mesh get transformed
+            as: 
+
+                  vertex_x = vertex_x + disp_x*scale + offset_x
+                  vertex_y = vertex_y + disp_y*scale + offset_y
+                  vertex_z = vertex_z + disp_z*scale + offset_z
+
+            If no displacement 
+            fields (assumed to be named 'disp_x', 'disp_y', etc... ) are detected in
+            the output file, then this dictionary is ignored.
 
         Examples
         --------
@@ -109,21 +119,28 @@
         >>> import yt
         >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
 
-        This will load the Dataset at index 10, scaling the 2nd mesh
-        by a factor of 5.0 in each direction.
+        This will load the Dataset at index 10, turning on displacement fields for 
+        the 2nd mesh without applying any scale or offset:
 
         >>> import yt
         >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
-                         displacements={'connect2': 5.0})
-        
-        This will load the Dataset at index 10, scaling the 2nd mesh
-        by a factor of 5.0 in each direction and the 1st mesh by an
-        anisotropic scale factor.
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+
+        This will load the Dataset at index 10, scaling the displacements
+        in the 2nd mesh by a factor of 5 while not applying an offset:
 
         >>> import yt
         >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
-                         displacements={'connect1': [1.0, 2.0, 3.0],
-                                        'connect2': 5.0})
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+        
+        This will load the Dataset at index 10, scaling the displacements for
+        the 2nd mesh by a factor of 5.0 and shifting all the vertices in 
+        the first mesh by 1.0 unit in the z direction.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect1': (0.0, [0.0, 0.0, 1.0]),
+                                        'connect2': (5.0, [0.0, 0.0, 0.0])})
 
         """
         self.parameter_filename = filename
@@ -299,17 +316,15 @@
             return new_coords
 
         new_coords = np.zeros_like(coords)
-        fac = self.displacements[mesh_name]
-
-        if not iterable(fac):
-            fac = [fac]*self.dimensionality
+        fac = self.displacements[mesh_name][0]
+        offset = self.displacements[mesh_name][1]
 
         coord_axes = 'xyz'[:self.dimensionality]
         for i, ax in enumerate(coord_axes):
             if "disp_%s" % ax in self.parameters['nod_names']:
                 ind = self.parameters['nod_names'].index("disp_%s" % ax)
-                offset = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
-                new_coords[:, i] = coords[:, i] + fac[i]*offset
+                disp = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
+                new_coords[:, i] = coords[:, i] + fac*disp + offset[i]
 
         return new_coords
         

diff -r 96af7a0cf1c68c7341dc8a53ecfa89bd0980c07d -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -78,8 +78,9 @@
 
 @requires_ds(big_data)
 def test_displacement_fields():
-    displacement_dicts =[{'connect2':5.0},
-                         {'connect1': [1.0, 2.0, 3.0], 'connect2': 5.0}]
+    displacement_dicts =[{'connect2': (5.0, [0.0, 0.0, 0.0])},
+                         {'connect1': (1.0, [1.0, 2.0, 3.0]), 
+                          'connect2': (0.0, [0.0, 0.0, 0.0])}]
     for disp in displacement_dicts:
         ds = data_dir_load(big_data, displacements=disp)
         for mesh in ds.index.meshes:


https://bitbucket.org/yt_analysis/yt/commits/74fb4618dc19/
Changeset:   74fb4618dc19
Branch:      yt
User:        atmyers
Date:        2016-02-02 00:50:33+00:00
Summary:     merging
Affected #:  196 files

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
-    """Insert an evaluated notebook into a document
-
-    This uses runipy and nbconvert to transform a path to an unevaluated notebook
-    into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 1
-    optional_arguments = 1
-    option_spec = {'skip_exceptions': directives.flag}
-    final_argument_whitespace = True
-
-    def run(self): # check if there are spaces in the notebook name
-        nb_path = self.arguments[0]
-        if ' ' in nb_path: raise ValueError(
-            "Due to issues with docutils stripping spaces from links, white "
-            "space is not allowed in notebook filenames '{0}'".format(nb_path))
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        # get path to notebook
-        nb_filename = self.arguments[0]
-        nb_basename = os.path.basename(nb_filename)
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
-        # Move files around.
-        rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
-        dest_path = os.path.join(dest_dir, nb_basename)
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Ensure desination build directory exists
-        thread_safe_mkdir(os.path.dirname(dest_path))
-
-        # Copy unevaluated notebook
-        shutil.copyfile(nb_abs_path, dest_path)
-
-        # Construct paths to versions getting copied over
-        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
-        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
-        rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
-        rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
-        # Create python script vesion
-        script_text = nb_to_python(nb_abs_path)
-        f = open(dest_path_script, 'w')
-        f.write(script_text.encode('utf8'))
-        f.close()
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        ret = evaluate_notebook(
-            nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
-        try:
-            evaluated_text, resources = ret
-            evaluated_text = write_notebook_output(
-                resources, image_dir, image_rel_dir, evaluated_text)
-        except ValueError:
-            # This happens when a notebook raises an unhandled exception
-            evaluated_text = ret
-
-        # Create link to notebook and script files
-        link_rst = "(" + \
-                   formatted_link(nb_basename) + "; " + \
-                   formatted_link(rel_path_eval) + "; " + \
-                   formatted_link(rel_path_script) + \
-                   ")"
-
-        self.state_machine.insert_input([link_rst], rst_file)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # add dependency
-        self.state.document.settings.record_dependencies.add(nb_abs_path)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-
-class notebook_node(nodes.raw):
-    pass
-
-def nb_to_python(nb_path):
-    """convert notebook to python script"""
-    exporter = python.PythonExporter()
-    output, resources = exporter.from_filename(nb_path)
-    return output
-
-def nb_to_html(nb_path):
-    """convert notebook to html"""
-    c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
-    exporter = html.HTMLExporter(template_file='full', config=c)
-    notebook = nbformat.read(open(nb_path), 'json')
-    output, resources = exporter.from_notebook_node(notebook)
-    header = output.split('<head>', 1)[1].split('</head>',1)[0]
-    body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
-    # http://imgur.com/eR9bMRH
-    header = header.replace('<style', '<style scoped="scoped"')
-    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n',
-                            '')
-    header = header.replace("code,pre{", "code{")
-
-    # Filter out styles that conflict with the sphinx theme.
-    filter_strings = [
-        'navbar',
-        'body{',
-        'alert{',
-        'uneditable-input{',
-        'collapse{',
-    ]
-
-    filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
-    line_begin = [
-        'pre{',
-        'p{margin'
-    ]
-
-    filterfunc = lambda x: not any([s in x for s in filter_strings])
-    header_lines = filter(filterfunc, header.split('\n'))
-
-    filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
-    header_lines = filter(filterfunc, header_lines)
-
-    header = '\n'.join(header_lines)
-
-    # concatenate raw html lines
-    lines = ['<div class="ipynotebook">']
-    lines.append(header)
-    lines.append(body)
-    lines.append('</div>')
-    return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
-    # Create evaluated version and save it to the dest path.
-    notebook = nbformat.read(open(nb_path), 'json')
-    nb_runner = NotebookRunner(notebook, pylab=False)
-    try:
-        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
-    except NotebookError as e:
-        print('')
-        print(e)
-        # Return the traceback, filtering out ANSI color codes.
-        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
-        return "Notebook conversion failed with the " \
-               "following traceback: \n%s" % \
-            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
-                   str(e))
-
-    if dest_path is None:
-        dest_path = 'temp_evaluated.ipynb'
-    nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
-    ret = nb_to_html(dest_path)
-    if dest_path is 'temp_evaluated.ipynb':
-        os.remove(dest_path)
-    return ret
-
-def formatted_link(path):
-    return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
-    self.visit_raw(node)
-
-def depart_notebook_node(self, node):
-    self.depart_raw(node)
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook', NotebookDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def make_image_dir(setup, rst_dir):
-    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
-    rel_dir = os.path.relpath(setup.confdir, rst_dir)
-    image_rel_dir = rel_dir + os.path.sep + '_images'
-    thread_safe_mkdir(image_dir)
-    return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
-    my_uuid = uuid.uuid4().hex
-
-    for output in resources['outputs']:
-        new_name = image_dir + os.path.sep + my_uuid + output
-        new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
-        evaluated_text = evaluated_text.replace(output, new_relative_name)
-        with open(new_name, 'wb') as f:
-            f.write(resources['outputs'][output])
-    return evaluated_text
-
-def thread_safe_mkdir(dirname):
-    try:
-        os.makedirs(dirname)
-    except OSError as e:
-        if e.errno != errno.EEXIST:
-            raise
-        pass

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
-    notebook_node, visit_notebook_node, depart_notebook_node, \
-    evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
-    """Insert an evaluated notebook cell into a document
-
-    This uses runipy and nbconvert to transform an inline python
-    script into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 0
-    optional_arguments = 1
-    has_content = True
-    option_spec = {'skip_exceptions': directives.flag}
-
-    def run(self):
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Construct notebook from cell content
-        content = "\n".join(self.content)
-        with open("temp.py", "w") as f:
-            f.write(content)
-
-        convert_to_ipynb('temp.py', 'temp.ipynb')
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        evaluated_text, resources = evaluate_notebook(
-            'temp.ipynb', skip_exceptions=skip_exceptions)
-
-        evaluated_text = write_notebook_output(
-            resources, image_dir, image_rel_dir, evaluated_text)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook-cell', NotebookCellDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
-    with io.open(py_file, 'r', encoding='utf-8') as f:
-        notebook = current.reads(f.read(), format='py')
-    with io.open(ipynb_file, 'w', encoding='utf-8') as f:
-        current.write(notebook, f, format='ipynb')

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
-    """ A comment block.
-    """
-    is_comment = True
-    def __init__(self, start_lineno, end_lineno, text):
-        # int : The first line number in the block. 1-indexed.
-        self.start_lineno = start_lineno
-        # int : The last line number. Inclusive!
-        self.end_lineno = end_lineno
-        # str : The text block including '#' character but not any leading spaces.
-        self.text = text
-
-    def add(self, string, start, end, line):
-        """ Add a new comment line.
-        """
-        self.start_lineno = min(self.start_lineno, start[0])
-        self.end_lineno = max(self.end_lineno, end[0])
-        self.text += string
-
-    def __repr__(self):
-        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno, self.text)
-
-
-class NonComment(object):
-    """ A non-comment block of code.
-    """
-    is_comment = False
-    def __init__(self, start_lineno, end_lineno):
-        self.start_lineno = start_lineno
-        self.end_lineno = end_lineno
-
-    def add(self, string, start, end, line):
-        """ Add lines to the block.
-        """
-        if string.strip():
-            # Only add if not entirely whitespace.
-            self.start_lineno = min(self.start_lineno, start[0])
-            self.end_lineno = max(self.end_lineno, end[0])
-
-    def __repr__(self):
-        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno)
-
-
-class CommentBlocker(object):
-    """ Pull out contiguous comment blocks.
-    """
-    def __init__(self):
-        # Start with a dummy.
-        self.current_block = NonComment(0, 0)
-
-        # All of the blocks seen so far.
-        self.blocks = []
-
-        # The index mapping lines of code to their associated comment blocks.
-        self.index = {}
-
-    def process_file(self, file):
-        """ Process a file object.
-        """
-        for token in tokenize.generate_tokens(file.next):
-            self.process_token(*token)
-        self.make_index()
-
-    def process_token(self, kind, string, start, end, line):
-        """ Process a single token.
-        """
-        if self.current_block.is_comment:
-            if kind == tokenize.COMMENT:
-                self.current_block.add(string, start, end, line)
-            else:
-                self.new_noncomment(start[0], end[0])
-        else:
-            if kind == tokenize.COMMENT:
-                self.new_comment(string, start, end, line)
-            else:
-                self.current_block.add(string, start, end, line)
-
-    def new_noncomment(self, start_lineno, end_lineno):
-        """ We are transitioning from a noncomment to a comment.
-        """
-        block = NonComment(start_lineno, end_lineno)
-        self.blocks.append(block)
-        self.current_block = block
-
-    def new_comment(self, string, start, end, line):
-        """ Possibly add a new comment.
-        
-        Only adds a new comment if this comment is the only thing on the line.
-        Otherwise, it extends the noncomment block.
-        """
-        prefix = line[:start[1]]
-        if prefix.strip():
-            # Oops! Trailing comment, not a comment block.
-            self.current_block.add(string, start, end, line)
-        else:
-            # A comment block.
-            block = Comment(start[0], end[0], string)
-            self.blocks.append(block)
-            self.current_block = block
-
-    def make_index(self):
-        """ Make the index mapping lines of actual code to their associated
-        prefix comments.
-        """
-        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
-            if not block.is_comment:
-                self.index[block.start_lineno] = prev
-
-    def search_for_comment(self, lineno, default=None):
-        """ Find the comment block just before the given line number.
-
-        Returns None (or the specified default) if there is no such block.
-        """
-        if not self.index:
-            self.make_index()
-        block = self.index.get(lineno, None)
-        text = getattr(block, 'text', default)
-        return text
-
-
-def strip_comment_marker(text):
-    """ Strip # markers at the front of a block of comment text.
-    """
-    lines = []
-    for line in text.splitlines():
-        lines.append(line.lstrip('#'))
-    text = textwrap.dedent('\n'.join(lines))
-    return text
-
-
-def get_class_traits(klass):
-    """ Yield all of the documentation for trait definitions on a class object.
-    """
-    # FIXME: gracefully handle errors here or in the caller?
-    source = inspect.getsource(klass)
-    cb = CommentBlocker()
-    cb.process_file(StringIO(source))
-    mod_ast = compiler.parse(source)
-    class_ast = mod_ast.node.nodes[0]
-    for node in class_ast.code.nodes:
-        # FIXME: handle other kinds of assignments?
-        if isinstance(node, compiler.ast.Assign):
-            name = node.nodes[0].name
-            rhs = unparse(node.expr).strip()
-            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
-            yield name, rhs, doc
-

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
-    The unparse method takes a compiler.ast tree and transforms it back into
-    valid python code.  It is incomplete and currently only works for
-    import statements, function calls, function definitions, assignments, and
-    basic expressions.
-
-    Inspired by python-2.5-svn/Demo/parser/unparse.py
-
-    fixme: We may want to move to using _ast trees because the compiler for
-           them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
-    s = cStringIO.StringIO()
-    UnparseCompilerAst(ast, s, single_line_functions)
-    return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
-                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
-    """ Methods in this class recursively traverse an AST and
-        output source code for the abstract syntax; original formatting
-        is disregarged.
-    """
-
-    #########################################################################
-    # object interface.
-    #########################################################################
-
-    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
-        """ Unparser(tree, file=sys.stdout) -> None.
-
-            Print the source for tree to file.
-        """
-        self.f = file
-        self._single_func = single_line_functions
-        self._do_indent = True
-        self._indent = 0
-        self._dispatch(tree)
-        self._write("\n")
-        self.f.flush()
-
-    #########################################################################
-    # Unparser private interface.
-    #########################################################################
-
-    ### format, output, and dispatch methods ################################
-
-    def _fill(self, text = ""):
-        "Indent a piece of text, according to the current indentation level"
-        if self._do_indent:
-            self._write("\n"+"    "*self._indent + text)
-        else:
-            self._write(text)
-
-    def _write(self, text):
-        "Append a piece of text to the current line."
-        self.f.write(text)
-
-    def _enter(self):
-        "Print ':', and increase the indentation."
-        self._write(": ")
-        self._indent += 1
-
-    def _leave(self):
-        "Decrease the indentation level."
-        self._indent -= 1
-
-    def _dispatch(self, tree):
-        "_dispatcher function, _dispatching tree type T to method _T."
-        if isinstance(tree, list):
-            for t in tree:
-                self._dispatch(t)
-            return
-        meth = getattr(self, "_"+tree.__class__.__name__)
-        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
-            return
-        meth(tree)
-
-
-    #########################################################################
-    # compiler.ast unparsing methods.
-    #
-    # There should be one method per concrete grammar type. They are
-    # organized in alphabetical order.
-    #########################################################################
-
-    def _Add(self, t):
-        self.__binary_op(t, '+')
-
-    def _And(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") and (")
-        self._write(")")
-               
-    def _AssAttr(self, t):
-        """ Handle assigning an attribute of an object
-        """
-        self._dispatch(t.expr)
-        self._write('.'+t.attrname)
- 
-    def _Assign(self, t):
-        """ Expression Assignment such as "a = 1".
-
-            This only handles assignment in expressions.  Keyword assignment
-            is handled separately.
-        """
-        self._fill()
-        for target in t.nodes:
-            self._dispatch(target)
-            self._write(" = ")
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write('; ')
-
-    def _AssName(self, t):
-        """ Name on left hand side of expression.
-
-            Treat just like a name on the right side of an expression.
-        """
-        self._Name(t)
-
-    def _AssTuple(self, t):
-        """ Tuple on left hand side of an expression.
-        """
-
-        # _write each elements, separated by a comma.
-        for element in t.nodes[:-1]:
-            self._dispatch(element)
-            self._write(", ")
-
-        # Handle the last one without writing comma
-        last_element = t.nodes[-1]
-        self._dispatch(last_element)
-
-    def _AugAssign(self, t):
-        """ +=,-=,*=,/=,**=, etc. operations
-        """
-        
-        self._fill()
-        self._dispatch(t.node)
-        self._write(' '+t.op+' ')
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write(';')
-            
-    def _Bitand(self, t):
-        """ Bit and operation.
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" & ")
-                
-    def _Bitor(self, t):
-        """ Bit or operation
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" | ")
-                
-    def _CallFunc(self, t):
-        """ Function call.
-        """
-        self._dispatch(t.node)
-        self._write("(")
-        comma = False
-        for e in t.args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._dispatch(e)
-        if t.star_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("*")
-            self._dispatch(t.star_args)
-        if t.dstar_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("**")
-            self._dispatch(t.dstar_args)
-        self._write(")")
-
-    def _Compare(self, t):
-        self._dispatch(t.expr)
-        for op, expr in t.ops:
-            self._write(" " + op + " ")
-            self._dispatch(expr)
-
-    def _Const(self, t):
-        """ A constant value such as an integer value, 3, or a string, "hello".
-        """
-        self._dispatch(t.value)
-
-    def _Decorators(self, t):
-        """ Handle function decorators (eg. @has_units)
-        """
-        for node in t.nodes:
-            self._dispatch(node)
-
-    def _Dict(self, t):
-        self._write("{")
-        for  i, (k, v) in enumerate(t.items):
-            self._dispatch(k)
-            self._write(": ")
-            self._dispatch(v)
-            if i < len(t.items)-1:
-                self._write(", ")
-        self._write("}")
-
-    def _Discard(self, t):
-        """ Node for when return value is ignored such as in "foo(a)".
-        """
-        self._fill()
-        self._dispatch(t.expr)
-
-    def _Div(self, t):
-        self.__binary_op(t, '/')
-
-    def _Ellipsis(self, t):
-        self._write("...")
-
-    def _From(self, t):
-        """ Handle "from xyz import foo, bar as baz".
-        """
-        # fixme: Are From and ImportFrom handled differently?
-        self._fill("from ")
-        self._write(t.modname)
-        self._write(" import ")
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-                
-    def _Function(self, t):
-        """ Handle function definitions
-        """
-        if t.decorators is not None:
-            self._fill("@")
-            self._dispatch(t.decorators)
-        self._fill("def "+t.name + "(")
-        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
-        for i, arg in enumerate(zip(t.argnames, defaults)):
-            self._write(arg[0])
-            if arg[1] is not None:
-                self._write('=')
-                self._dispatch(arg[1])
-            if i < len(t.argnames)-1:
-                self._write(', ')
-        self._write(")")
-        if self._single_func:
-            self._do_indent = False
-        self._enter()
-        self._dispatch(t.code)
-        self._leave()
-        self._do_indent = True
-
-    def _Getattr(self, t):
-        """ Handle getting an attribute of an object
-        """
-        if isinstance(t.expr, (Div, Mul, Sub, Add)):
-            self._write('(')
-            self._dispatch(t.expr)
-            self._write(')')
-        else:
-            self._dispatch(t.expr)
-            
-        self._write('.'+t.attrname)
-        
-    def _If(self, t):
-        self._fill()
-        
-        for i, (compare,code) in enumerate(t.tests):
-            if i == 0:
-                self._write("if ")
-            else:
-                self._write("elif ")
-            self._dispatch(compare)
-            self._enter()
-            self._fill()
-            self._dispatch(code)
-            self._leave()
-            self._write("\n")
-
-        if t.else_ is not None:
-            self._write("else")
-            self._enter()
-            self._fill()
-            self._dispatch(t.else_)
-            self._leave()
-            self._write("\n")
-            
-    def _IfExp(self, t):
-        self._dispatch(t.then)
-        self._write(" if ")
-        self._dispatch(t.test)
-
-        if t.else_ is not None:
-            self._write(" else (")
-            self._dispatch(t.else_)
-            self._write(")")
-
-    def _Import(self, t):
-        """ Handle "import xyz.foo".
-        """
-        self._fill("import ")
-        
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Keyword(self, t):
-        """ Keyword value assignment within function calls and definitions.
-        """
-        self._write(t.name)
-        self._write("=")
-        self._dispatch(t.expr)
-        
-    def _List(self, t):
-        self._write("[")
-        for  i,node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i < len(t.nodes)-1:
-                self._write(", ")
-        self._write("]")
-
-    def _Module(self, t):
-        if t.doc is not None:
-            self._dispatch(t.doc)
-        self._dispatch(t.node)
-
-    def _Mul(self, t):
-        self.__binary_op(t, '*')
-
-    def _Name(self, t):
-        self._write(t.name)
-
-    def _NoneType(self, t):
-        self._write("None")
-        
-    def _Not(self, t):
-        self._write('not (')
-        self._dispatch(t.expr)
-        self._write(')')
-        
-    def _Or(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") or (")
-        self._write(")")
-                
-    def _Pass(self, t):
-        self._write("pass\n")
-
-    def _Printnl(self, t):
-        self._fill("print ")
-        if t.dest:
-            self._write(">> ")
-            self._dispatch(t.dest)
-            self._write(", ")
-        comma = False
-        for node in t.nodes:
-            if comma: self._write(', ')
-            else: comma = True
-            self._dispatch(node)
-
-    def _Power(self, t):
-        self.__binary_op(t, '**')
-
-    def _Return(self, t):
-        self._fill("return ")
-        if t.value:
-            if isinstance(t.value, Tuple):
-                text = ', '.join([ name.name for name in t.value.asList() ])
-                self._write(text)
-            else:
-                self._dispatch(t.value)
-            if not self._do_indent:
-                self._write('; ')
-
-    def _Slice(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        if t.lower:
-            self._dispatch(t.lower)
-        self._write(":")
-        if t.upper:
-            self._dispatch(t.upper)
-        #if t.step:
-        #    self._write(":")
-        #    self._dispatch(t.step)
-        self._write("]")
-
-    def _Sliceobj(self, t):
-        for i, node in enumerate(t.nodes):
-            if i != 0:
-                self._write(":")
-            if not (isinstance(node, Const) and node.value is None):
-                self._dispatch(node)
-
-    def _Stmt(self, tree):
-        for node in tree.nodes:
-            self._dispatch(node)
-
-    def _Sub(self, t):
-        self.__binary_op(t, '-')
-
-    def _Subscript(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        for i, value in enumerate(t.subs):
-            if i != 0:
-                self._write(",")
-            self._dispatch(value)
-        self._write("]")
-
-    def _TryExcept(self, t):
-        self._fill("try")
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-
-        for handler in t.handlers:
-            self._fill('except ')
-            self._dispatch(handler[0])
-            if handler[1] is not None:
-                self._write(', ')
-                self._dispatch(handler[1])
-            self._enter()
-            self._dispatch(handler[2])
-            self._leave()
-            
-        if t.else_:
-            self._fill("else")
-            self._enter()
-            self._dispatch(t.else_)
-            self._leave()
-
-    def _Tuple(self, t):
-
-        if not t.nodes:
-            # Empty tuple.
-            self._write("()")
-        else:
-            self._write("(")
-
-            # _write each elements, separated by a comma.
-            for element in t.nodes[:-1]:
-                self._dispatch(element)
-                self._write(", ")
-
-            # Handle the last one without writing comma
-            last_element = t.nodes[-1]
-            self._dispatch(last_element)
-
-            self._write(")")
-            
-    def _UnaryAdd(self, t):
-        self._write("+")
-        self._dispatch(t.expr)
-        
-    def _UnarySub(self, t):
-        self._write("-")
-        self._dispatch(t.expr)        
-
-    def _With(self, t):
-        self._fill('with ')
-        self._dispatch(t.expr)
-        if t.vars:
-            self._write(' as ')
-            self._dispatch(t.vars.name)
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-        self._write('\n')
-        
-    def _int(self, t):
-        self._write(repr(t))
-
-    def __binary_op(self, t, symbol):
-        # Check if parenthesis are needed on left side and then dispatch
-        has_paren = False
-        left_class = str(t.left.__class__)
-        if (left_class in op_precedence.keys() and
-            op_precedence[left_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.left)
-        if has_paren:
-            self._write(')')
-        # Write the appropriate symbol for operator
-        self._write(symbol)
-        # Check if parenthesis are needed on the right side and then dispatch
-        has_paren = False
-        right_class = str(t.right.__class__)
-        if (right_class in op_precedence.keys() and
-            op_precedence[right_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.right)
-        if has_paren:
-            self._write(')')
-
-    def _float(self, t):
-        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
-        # We prefer str here.
-        self._write(str(t))
-
-    def _str(self, t):
-        self._write(repr(t))
-        
-    def _tuple(self, t):
-        self._write(str(t))
-
-    #########################################################################
-    # These are the methods from the _ast modules unparse.
-    #
-    # As our needs to handle more advanced code increase, we may want to
-    # modify some of the methods below so that they work for compiler.ast.
-    #########################################################################
-
-#    # stmt
-#    def _Expr(self, tree):
-#        self._fill()
-#        self._dispatch(tree.value)
-#
-#    def _Import(self, t):
-#        self._fill("import ")
-#        first = True
-#        for a in t.names:
-#            if first:
-#                first = False
-#            else:
-#                self._write(", ")
-#            self._write(a.name)
-#            if a.asname:
-#                self._write(" as "+a.asname)
-#
-##    def _ImportFrom(self, t):
-##        self._fill("from ")
-##        self._write(t.module)
-##        self._write(" import ")
-##        for i, a in enumerate(t.names):
-##            if i == 0:
-##                self._write(", ")
-##            self._write(a.name)
-##            if a.asname:
-##                self._write(" as "+a.asname)
-##        # XXX(jpe) what is level for?
-##
-#
-#    def _Break(self, t):
-#        self._fill("break")
-#
-#    def _Continue(self, t):
-#        self._fill("continue")
-#
-#    def _Delete(self, t):
-#        self._fill("del ")
-#        self._dispatch(t.targets)
-#
-#    def _Assert(self, t):
-#        self._fill("assert ")
-#        self._dispatch(t.test)
-#        if t.msg:
-#            self._write(", ")
-#            self._dispatch(t.msg)
-#
-#    def _Exec(self, t):
-#        self._fill("exec ")
-#        self._dispatch(t.body)
-#        if t.globals:
-#            self._write(" in ")
-#            self._dispatch(t.globals)
-#        if t.locals:
-#            self._write(", ")
-#            self._dispatch(t.locals)
-#
-#    def _Print(self, t):
-#        self._fill("print ")
-#        do_comma = False
-#        if t.dest:
-#            self._write(">>")
-#            self._dispatch(t.dest)
-#            do_comma = True
-#        for e in t.values:
-#            if do_comma:self._write(", ")
-#            else:do_comma=True
-#            self._dispatch(e)
-#        if not t.nl:
-#            self._write(",")
-#
-#    def _Global(self, t):
-#        self._fill("global")
-#        for i, n in enumerate(t.names):
-#            if i != 0:
-#                self._write(",")
-#            self._write(" " + n)
-#
-#    def _Yield(self, t):
-#        self._fill("yield")
-#        if t.value:
-#            self._write(" (")
-#            self._dispatch(t.value)
-#            self._write(")")
-#
-#    def _Raise(self, t):
-#        self._fill('raise ')
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.inst:
-#            self._write(", ")
-#            self._dispatch(t.inst)
-#        if t.tback:
-#            self._write(", ")
-#            self._dispatch(t.tback)
-#
-#
-#    def _TryFinally(self, t):
-#        self._fill("try")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#        self._fill("finally")
-#        self._enter()
-#        self._dispatch(t.finalbody)
-#        self._leave()
-#
-#    def _excepthandler(self, t):
-#        self._fill("except ")
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.name:
-#            self._write(", ")
-#            self._dispatch(t.name)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _ClassDef(self, t):
-#        self._write("\n")
-#        self._fill("class "+t.name)
-#        if t.bases:
-#            self._write("(")
-#            for a in t.bases:
-#                self._dispatch(a)
-#                self._write(", ")
-#            self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _FunctionDef(self, t):
-#        self._write("\n")
-#        for deco in t.decorators:
-#            self._fill("@")
-#            self._dispatch(deco)
-#        self._fill("def "+t.name + "(")
-#        self._dispatch(t.args)
-#        self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _For(self, t):
-#        self._fill("for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    def _While(self, t):
-#        self._fill("while ")
-#        self._dispatch(t.test)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    # expr
-#    def _Str(self, tree):
-#        self._write(repr(tree.s))
-##
-#    def _Repr(self, t):
-#        self._write("`")
-#        self._dispatch(t.value)
-#        self._write("`")
-#
-#    def _Num(self, t):
-#        self._write(repr(t.n))
-#
-#    def _ListComp(self, t):
-#        self._write("[")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write("]")
-#
-#    def _GeneratorExp(self, t):
-#        self._write("(")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write(")")
-#
-#    def _comprehension(self, t):
-#        self._write(" for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        for if_clause in t.ifs:
-#            self._write(" if ")
-#            self._dispatch(if_clause)
-#
-#    def _IfExp(self, t):
-#        self._dispatch(t.body)
-#        self._write(" if ")
-#        self._dispatch(t.test)
-#        if t.orelse:
-#            self._write(" else ")
-#            self._dispatch(t.orelse)
-#
-#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-#    def _UnaryOp(self, t):
-#        self._write(self.unop[t.op.__class__.__name__])
-#        self._write("(")
-#        self._dispatch(t.operand)
-#        self._write(")")
-#
-#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-#                    "FloorDiv":"//", "Pow": "**"}
-#    def _BinOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.left)
-#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-#        self._dispatch(t.right)
-#        self._write(")")
-#
-#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
-#    def _BoolOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.values[0])
-#        for v in t.values[1:]:
-#            self._write(" %s " % self.boolops[t.op.__class__])
-#            self._dispatch(v)
-#        self._write(")")
-#
-#    def _Attribute(self,t):
-#        self._dispatch(t.value)
-#        self._write(".")
-#        self._write(t.attr)
-#
-##    def _Call(self, t):
-##        self._dispatch(t.func)
-##        self._write("(")
-##        comma = False
-##        for e in t.args:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        for e in t.keywords:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        if t.starargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("*")
-##            self._dispatch(t.starargs)
-##        if t.kwargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("**")
-##            self._dispatch(t.kwargs)
-##        self._write(")")
-#
-#    # slice
-#    def _Index(self, t):
-#        self._dispatch(t.value)
-#
-#    def _ExtSlice(self, t):
-#        for i, d in enumerate(t.dims):
-#            if i != 0:
-#                self._write(': ')
-#            self._dispatch(d)
-#
-#    # others
-#    def _arguments(self, t):
-#        first = True
-#        nonDef = len(t.args)-len(t.defaults)
-#        for a in t.args[0:nonDef]:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a)
-#        for a,d in zip(t.args[nonDef:], t.defaults):
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a),
-#            self._write("=")
-#            self._dispatch(d)
-#        if t.vararg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("*"+t.vararg)
-#        if t.kwarg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("**"+t.kwarg)
-#
-##    def _keyword(self, t):
-##        self._write(t.arg)
-##        self._write("=")
-##        self._dispatch(t.value)
-#
-#    def _Lambda(self, t):
-#        self._write("lambda ")
-#        self._dispatch(t.args)
-#        self._write(": ")
-#        self._dispatch(t.body)
-
-
-

diff -r abe1ef6f58b2d050be81c106ad8aac4eca0b326b -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 doc/extensions/numpydocmod/docscrape.py
--- a/doc/extensions/numpydocmod/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self, docstring, config={}):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        if func.strip():
-                            push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        summary = self._doc.read_to_next_empty_line()
-        summary_str = " ".join([s.strip() for s in summary]).strip()
-        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-            self['Signature'] = summary_str
-            if not self._is_at_section():
-                self['Summary'] = self._doc.read_to_next_empty_line()
-        else:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Returns', 'Raises', 'Warns',
-                           'Other Parameters', 'Attributes', 'Methods'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                out += ['%s : %s' % (param, param_type)]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Other Parameters',
-                           'Raises', 'Warns'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        for param_list in ('Attributes', 'Methods'):
-            out += self._str_param_list(param_list)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func', doc=None, config={}):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-
-        if doc is None:
-            if func is None:
-                raise ValueError("No function or docstring given")
-            doc = inspect.getdoc(func) or ''
-        NumpyDocString.__init__(self, doc)
-
-        if not self['Signature'] and func is not None:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if not roles.has_key(self._role):
-                print("Warning: invalid role %s" % self._role)
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-    def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-                 config={}):
-        if not inspect.isclass(cls) and cls is not None:
-            raise ValueError("Expected a class or None, but got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-
-        if doc is None:
-            if cls is None:
-                raise ValueError("No class or documentation string given")
-            doc = pydoc.getdoc(cls)
-
-        NumpyDocString.__init__(self, doc)
-
-        if config.get('show_class_members', True):
-            if not self['Methods']:
-                self['Methods'] = [(name, '', '')
-                                   for name in sorted(self.methods)]
-            if not self['Attributes']:
-                self['Attributes'] = [(name, '', '')
-                                      for name in sorted(self.properties)]
-
-    @property
-    def methods(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and callable(func)]
-
-    @property
-    def properties(self):
-        if self._cls is None:
-            return []
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and func is None]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/67b3aeb748b7/
Changeset:   67b3aeb748b7
Branch:      yt
User:        atmyers
Date:        2016-02-02 01:03:40+00:00
Summary:     adding an example of using displacement fields to the mesh rendering docs.
Affected #:  1 file

diff -r 74fb4618dc190331fa9aac4b4826b5b9ae8e22c6 -r 67b3aeb748b7daca1cd95b70659030dd3a597e9e doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -248,6 +248,41 @@
     sc.annotate_mesh_lines()
     sc.save()
 
+The dataset in the above example contains displacement fields, so this is a good
+opportunity to demonstrate their use. The following example is exactly like the
+above, except we scale the displacements by a factor of a 10.0, and additionally 
+add an offset to the mesh by 1.0 unit in the x-direction:
+
+    import yt
+
+    # We load the last time frame
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1,
+                 displacements={'connect2': (10.0, [0.01, 0.0, 0.0])})
+
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect2", "temp"))
+
+    # override the default colormap. This time we also override
+    # the default color bounds
+    ms = sc.get_source(0)
+    ms.cmap = 'hot'
+    ms.color_bounds = (500.0, 1700.0)
+
+    # adjust the camera position and orientation
+    cam = sc.camera
+    camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
+    cam.set_position(camera_position, north_vector)
+    
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
+
 As with other volume renderings in yt, you can swap out different lenses. Here is 
 an example that uses a "perspective" lens, for which the rays diverge from the 
 camera position according to some opening angle:


https://bitbucket.org/yt_analysis/yt/commits/e93cac126e9c/
Changeset:   e93cac126e9c
Branch:      yt
User:        atmyers
Date:        2016-02-02 02:29:10+00:00
Summary:     remove unused import
Affected #:  1 file

diff -r 67b3aeb748b7daca1cd95b70659030dd3a597e9e -r e93cac126e9c9c4f3602e91da1968f13b07bd099 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -27,7 +27,7 @@
     ExodusIIFieldInfo
 from .util import \
     load_info_records, sanitize_string
-from yt.funcs import iterable
+
 
 class ExodusIIUnstructuredMesh(UnstructuredMesh):
     _index_offset = 1


https://bitbucket.org/yt_analysis/yt/commits/91a5b65ff969/
Changeset:   91a5b65ff969
Branch:      yt
User:        atmyers
Date:        2016-02-02 06:15:00+00:00
Summary:     fixing new docs
Affected #:  1 file

diff -r e93cac126e9c9c4f3602e91da1968f13b07bd099 -r 91a5b65ff969c79758a2c1a2062c0c3ac5a952b2 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -253,6 +253,8 @@
 above, except we scale the displacements by a factor of a 10.0, and additionally 
 add an offset to the mesh by 1.0 unit in the x-direction:
 
+.. python-script::
+
     import yt
 
     # We load the last time frame


https://bitbucket.org/yt_analysis/yt/commits/5b35020ba8fb/
Changeset:   5b35020ba8fb
Branch:      yt
User:        MatthewTurk
Date:        2016-02-03 17:17:10+00:00
Summary:     Merged in atmyers/yt (pull request #1946)

Displacement Fields for ExodusII Datasets.
Affected #:  4 files

diff -r 03adb8fb971da512e3b17bfb39ad595eb59819ec -r 5b35020ba8fbcdda7134a48c6edfd019368e9248 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -330,12 +330,17 @@
 Exodus II Data
 --------------
 
+.. note::
+   To load Exodus II data, you need to have the `netcdf4 <http://unidata.github.io/
+   netcdf4-python/>`_ python interface installed.
+
 Exodus II is a file format for Finite Element datasets that is used by the MOOSE
 framework for file IO. Support for this format (and for unstructured mesh data in 
-general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
-there to be some buggy features at present. Currently, yt can visualize first-order
-mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
-Development of higher-order visualization capability is a work in progress.
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we 
+also expect there to be some buggy features at present. Currently, yt can visualize 
+quads, hexes, triangles, and tetrahedral element types at first order. Additionally,
+there is experimental support for the high-order visualization of 20-node hex elements.
+Development of more high-order visualization capability is a work in progress.
 
 To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
 file:
@@ -348,14 +353,15 @@
 Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
 picard iterations, non-linear solve iterations, etc...), you can also specify a step
 argument when you load an Exodus II data that defines the index at which to look when
-you read data from the file.
+you read data from the file. Omitting this argument is the same as passing in 0, and
+setting ``step=-1`` selects the last time output in the file.
 
 You can access the connectivity information directly by doing:
 
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=-1)
    print(ds.index.meshes[0].connectivity_coords)
    print(ds.index.meshes[0].connectivity_indices)
    print(ds.index.meshes[1].connectivity_coords)
@@ -368,7 +374,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    print(ds.field_list)
 
 This will give you a list of field names like ``('connect1', 'diffused')`` and 
@@ -380,7 +386,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()  # geometric selection, this just grabs everything
    print(ad['connect1', 'convected'])
 
@@ -390,7 +396,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'convected'].shape)
 
@@ -401,7 +407,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'vertex_x'])
 
@@ -411,7 +417,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'conv_indicator'].shape)
 
@@ -420,6 +426,61 @@
 For information about visualizing unstructured mesh data, including Exodus II datasets, 
 please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
 
+Displacement Fields
+^^^^^^^^^^^^^^^^^^^
+
+Finite element codes often solve for the displacement of each vertex from its 
+original position as a node variable, rather than updating the actual vertex 
+positions with time. For analysis and visualization, it is often useful to turn 
+these displacements on or off, and to be able to scale them arbitrarily to 
+emphasize certain features of the solution. To allow this, if ``yt`` detects 
+displacement fields in an Exodus II dataset (using the convention that they will
+ be named ``disp_x``, ``disp_y``, etc...), it will add optionally add these to 
+the mesh vertex positions for the purposes of visualization. Displacement fields 
+can be controlled when a dataset is loaded by passing in an optional dictionary 
+to the ``yt.load`` command. This feature is turned off by default, meaning that 
+a dataset loaded as 
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+will not include the displacements in the vertex positions. The displacements can
+be turned on separately for each mesh in the file by passing in a a tuple of 
+(scale, offset) pairs for the meshes you want to enable displacements for. 
+For example, the following code snippet turns displacements on for the second 
+mesh, but not the first:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+
+The displacements can also be scaled by an arbitrary factor before they are 
+added in to the vertex positions. The following code turns on displacements
+for both ``connect1`` and ``connect2``, scaling the former by a factor of 5.0
+and the later by a factor of 10.0:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect1': (5.0, [0.0, 0.0, 0.0]),
+                                'connect2': (10.0, [0.0, 0.0, 0.0])})
+
+Finally, we can also apply an arbitrary offset to the mesh vertices after 
+the scale factor is applied. For example, the following code scales all
+displacements in the second mesh by a factor of 5.0, and then shifts
+each vertex in the mesh by 1.0 unit in the z-direction:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                  displacements={'connect2': (5.0, [0.0, 0.0, 1.0])})
+
 
 FITS Data
 ---------

diff -r 03adb8fb971da512e3b17bfb39ad595eb59819ec -r 5b35020ba8fbcdda7134a48c6edfd019368e9248 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -248,6 +248,43 @@
     sc.annotate_mesh_lines()
     sc.save()
 
+The dataset in the above example contains displacement fields, so this is a good
+opportunity to demonstrate their use. The following example is exactly like the
+above, except we scale the displacements by a factor of a 10.0, and additionally 
+add an offset to the mesh by 1.0 unit in the x-direction:
+
+.. python-script::
+
+    import yt
+
+    # We load the last time frame
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1,
+                 displacements={'connect2': (10.0, [0.01, 0.0, 0.0])})
+
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect2", "temp"))
+
+    # override the default colormap. This time we also override
+    # the default color bounds
+    ms = sc.get_source(0)
+    ms.cmap = 'hot'
+    ms.color_bounds = (500.0, 1700.0)
+
+    # adjust the camera position and orientation
+    cam = sc.camera
+    camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
+    cam.set_position(camera_position, north_vector)
+    
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
+
 As with other volume renderings in yt, you can swap out different lenses. Here is 
 an example that uses a "perspective" lens, for which the rays diverge from the 
 camera position according to some opening angle:

diff -r 03adb8fb971da512e3b17bfb39ad595eb59819ec -r 5b35020ba8fbcdda7134a48c6edfd019368e9248 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -42,10 +42,16 @@
 
     def _initialize_mesh(self):
         coords = self.ds._read_coordinates()
-        self.meshes = [ExodusIIUnstructuredMesh(
-            mesh_id, self.index_filename, conn_ind, coords, self)
-                       for mesh_id, conn_ind in
-                       enumerate(self.ds._read_connectivity())]
+        connectivity = self.ds._read_connectivity()
+        self.meshes = []
+        for mesh_id, conn_ind in enumerate(connectivity):
+            displaced_coords = self.ds._apply_displacement(coords, mesh_id)
+            mesh = ExodusIIUnstructuredMesh(mesh_id, 
+                                            self.index_filename,
+                                            conn_ind, 
+                                            displaced_coords, 
+                                            self)
+            self.meshes.append(mesh)
 
     def _detect_output_fields(self):
         elem_names = self.dataset.parameters['elem_names']
@@ -63,13 +69,87 @@
     def __init__(self,
                  filename,
                  step=0,
+                 displacements=None,
                  dataset_type='exodus_ii',
                  storage_filename=None,
                  units_override=None):
+        """
 
+        A class used to represent an on-disk ExodusII dataset. The initializer takes 
+        two extra optional parameters, "step" and "displacements."
+
+        Parameters
+        ----------
+
+        step : integer
+            The step tells which time index to slice at. It throws an Error if
+            the index is larger than the number of time outputs in the ExodusII
+            file. Passing step=-1 picks out the last dataframe. 
+            Default is 0.
+
+        displacements : dictionary of tuples
+            This is a dictionary that controls whether or not displacement fields
+            will be used with the meshes in this dataset. The keys of the
+            displacements dictionary should the names of meshes in the file 
+            (e.g., "connect1", "connect2", etc... ), while the values should be 
+            tuples of the form (scale, offset), where "scale" is a floating point
+            value and "offset" is an array-like with one component for each spatial
+            dimension in the dataset. When the displacements for a given mesh are
+            turned on, the coordinates of the vertices in that mesh get transformed
+            as: 
+
+                  vertex_x = vertex_x + disp_x*scale + offset_x
+                  vertex_y = vertex_y + disp_y*scale + offset_y
+                  vertex_z = vertex_z + disp_z*scale + offset_z
+
+            If no displacement 
+            fields (assumed to be named 'disp_x', 'disp_y', etc... ) are detected in
+            the output file, then this dictionary is ignored.
+
+        Examples
+        --------
+
+        This will load the Dataset at time index '0' with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+        This will load the Dataset at the final index with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
+
+        This will load the Dataset at index 10, turning on displacement fields for 
+        the 2nd mesh without applying any scale or offset:
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+
+        This will load the Dataset at index 10, scaling the displacements
+        in the 2nd mesh by a factor of 5 while not applying an offset:
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+        
+        This will load the Dataset at index 10, scaling the displacements for
+        the 2nd mesh by a factor of 5.0 and shifting all the vertices in 
+        the first mesh by 1.0 unit in the z direction.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect1': (0.0, [0.0, 0.0, 1.0]),
+                                        'connect2': (5.0, [0.0, 0.0, 0.0])})
+
+        """
         self.parameter_filename = filename
         self.fluid_types += self._get_fluid_types()
         self.step = step
+        if displacements is None:
+            self.displacements = {}
+        else:
+            self.displacements = displacements
         super(ExodusIIDataset, self).__init__(filename, dataset_type,
                                               units_override=units_override)
         self.index_filename = filename
@@ -102,8 +182,7 @@
         self.parameters['num_meshes'] = self._vars['eb_status'].shape[0]
         self.parameters['elem_names'] = self._get_elem_names()
         self.parameters['nod_names'] = self._get_nod_names()
-        self.domain_left_edge = self._load_domain_edge(0)
-        self.domain_right_edge = self._load_domain_edge(1)
+        self.domain_left_edge, self.domain_right_edge = self._load_domain_edge()
 
         # set up psuedo-3D for lodim datasets here
         if self.dimensionality == 2:
@@ -223,12 +302,33 @@
 
         mylog.info("Loading coordinates")
         if "coord" not in self._vars:
-            return np.array([self._vars["coord%s" % ax][:]
-                             for ax in coord_axes]).transpose().copy()
+            coords = np.array([self._vars["coord%s" % ax][:]
+                               for ax in coord_axes]).transpose().copy()
         else:
-            return np.array([coord for coord in
-                             self._vars["coord"][:]]).transpose().copy()
+            coords = np.array([coord for coord in
+                               self._vars["coord"][:]]).transpose().copy()
+        return coords
 
+    def _apply_displacement(self, coords, mesh_id):
+        
+        mesh_name = "connect%d" % (mesh_id + 1)
+        if mesh_name not in self.displacements:
+            new_coords = coords.copy()
+            return new_coords
+
+        new_coords = np.zeros_like(coords)
+        fac = self.displacements[mesh_name][0]
+        offset = self.displacements[mesh_name][1]
+
+        coord_axes = 'xyz'[:self.dimensionality]
+        for i, ax in enumerate(coord_axes):
+            if "disp_%s" % ax in self.parameters['nod_names']:
+                ind = self.parameters['nod_names'].index("disp_%s" % ax)
+                disp = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
+                new_coords[:, i] = coords[:, i] + fac*disp + offset[i]
+
+        return new_coords
+        
     def _read_connectivity(self):
         """
         Loads the connectivity data for the mesh
@@ -239,17 +339,22 @@
             connectivity.append(self._vars["connect%d" % (i+1)][:].astype("i8"))
         return connectivity
 
-    def _load_domain_edge(self, domain_idx):
+    def _load_domain_edge(self):
         """
         Loads the boundaries for the domain edge
 
-        Parameters:
-        - domain_idx: 0 corresponds to the left edge, 1 corresponds to the right edge
         """
-        if domain_idx == 0:
-            return self._read_coordinates().min(axis=0)
-        if domain_idx == 1:
-            return self._read_coordinates().max(axis=0)
+        
+        coords = self._read_coordinates()
+        connectivity = self._read_connectivity()
+
+        mi = 1e300
+        ma = -1e300
+        for mesh_id, _ in enumerate(connectivity):
+            displaced_coords = self._apply_displacement(coords, mesh_id)
+            mi = np.minimum(displaced_coords.min(axis=0), mi)
+            ma = np.maximum(displaced_coords.max(axis=0), ma)
+        return mi, ma
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 03adb8fb971da512e3b17bfb39ad595eb59819ec -r 5b35020ba8fbcdda7134a48c6edfd019368e9248 yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -18,7 +18,10 @@
     assert_array_equal, \
     requires_file
 from yt.utilities.answer_testing.framework import \
-    data_dir_load
+    data_dir_load, \
+    requires_ds, \
+    GenericArrayTest
+
 
 out = "ExodusII/out.e"
 
@@ -69,3 +72,18 @@
     field_list = [('connect1', 'forced')]
     yield assert_equal, str(ds), "gold.e"
     yield assert_array_equal, ds.field_list, field_list 
+
+big_data = "MOOSE_sample_data/mps_out.e"
+
+
+ at requires_ds(big_data)
+def test_displacement_fields():
+    displacement_dicts =[{'connect2': (5.0, [0.0, 0.0, 0.0])},
+                         {'connect1': (1.0, [1.0, 2.0, 3.0]), 
+                          'connect2': (0.0, [0.0, 0.0, 0.0])}]
+    for disp in displacement_dicts:
+        ds = data_dir_load(big_data, displacements=disp)
+        for mesh in ds.index.meshes:
+            def array_func():
+                return mesh.connectivity_coords
+            yield GenericArrayTest(ds, array_func, 12)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list