[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 7 11:31:36 PDT 2016


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/9e6996751749/
Changeset:   9e6996751749
Branch:      yt
User:        xarthisius
Date:        2016-08-15 19:57:35+00:00
Summary:     [opt] Pass list of fields instead of single field to get_vertex_centered_data
Affected #:  5 files

diff -r 56c0b53b459c4cb6c6a3bdf67634e142ec4c0f52 -r 9e6996751749e1859cce6dec78fc7cb94d09f745 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -1123,12 +1123,15 @@
                                        mask, sample_values = None,
                                        sample_type = "face",
                                        no_ghost = False):
-        vals = grid.get_vertex_centered_data(field, no_ghost = no_ghost)
+        # TODO: check if multiple fields can be passed here
+        vals = grid.get_vertex_centered_data([field], no_ghost=no_ghost)[field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
+            # TODO: is no_ghost=False correct here?
+            svals = grid.get_vertex_centered_data([sample_values])[sample_values]
         else:
             svals = None
-        sample_type = {"face":1, "vertex":2}[sample_type]
+
+        sample_type = {"face": 1, "vertex": 2}[sample_type]
         my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
                                     grid.dds, svals, sample_type)
         return my_verts
@@ -1200,15 +1203,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask,
             field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(self.surface_field)
+
+        vc_fields = [self.surface_field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[self.surface_field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f)
-                      for f in [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(
+            self.field_value, vc_data[self.surface_field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     @property
     def triangles(self):

diff -r 56c0b53b459c4cb6c6a3bdf67634e142ec4c0f52 -r 9e6996751749e1859cce6dec78fc7cb94d09f745 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1595,13 +1595,18 @@
 
     def _extract_isocontours_from_grid(self, grid, mask, field, value,
                                        sample_values=None):
-        vals = grid.get_vertex_centered_data(field, no_ghost=False)
+        vc_fields = [field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
-        else:
+            vc_fields.append(sample_values)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False)
+        try:
+            svals = vc_data[sample_values]
+        except KeyError:
             svals = None
-        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
-                                    grid.dds, svals)
+
+        my_verts = march_cubes_grid(value, vc_data[field], mask,
+            grid.LeftEdge, grid.dds, svals)
         return my_verts
 
     def calculate_isocontour_flux(self, field, value,
@@ -1673,15 +1678,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(field)
+        
+        vc_fields = [field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
+
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
-                     [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(value, vc_data[field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                log_space=True, cumulative=True):

diff -r 56c0b53b459c4cb6c6a3bdf67634e142ec4c0f52 -r 9e6996751749e1859cce6dec78fc7cb94d09f745 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -251,33 +251,39 @@
         cube._base_grid = self
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
+        # Make sure the field list has only unique entries
+        fields = list(set(fields))
+        new_fields = {}
+        for field in fields:
+            new_fields[field] = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
-            # Ensure we have the native endianness in this array.  Avoid making
-            # a copy if possible.
-            old_field = np.asarray(self[field], dtype="=f8")
-            # We'll use the ghost zone routine, which will naturally
-            # extrapolate here.
-            input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
-            output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
-            # rf = 1 here
-            ghost_zone_interpolate(1, old_field, input_left,
-                                   new_field, output_left)
+            for field in fields:
+                # Ensure we have the native endianness in this array.  Avoid making
+                # a copy if possible.
+                old_field = np.asarray(self[field], dtype="=f8")
+                # We'll use the ghost zone routine, which will naturally
+                # extrapolate here.
+                input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
+                output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
+                # rf = 1 here
+                ghost_zone_interpolate(1, old_field, input_left,
+                                       new_fields[field], output_left)
         else:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            np.multiply(new_field, 0.125, new_field)
+            cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
+            for field in fields:
+                np.add(new_fields[field], cg[field][1: ,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field])
+                np.multiply(new_fields[field], 0.125, new_fields[field])
 
-        return new_field
+        return new_fields
 
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)

diff -r 56c0b53b459c4cb6c6a3bdf67634e142ec4c0f52 -r 9e6996751749e1859cce6dec78fc7cb94d09f745 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -304,10 +304,13 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
+            vcd = grid.get_vertex_centered_data(self.fields, smoothed=True,
+                                                no_ghost=self.no_ghost)
             for i, field in enumerate(self.fields):
-                vcd = grid.get_vertex_centered_data(field, smoothed=True, no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = np.log10(vcd)
-                dds.append(vcd)
+                if self.log_fields[i]:
+                    dds.append(np.log10(vcd[field].astype('float64')))
+                else:
+                    dds.append(vcd[field].astype('float64'))
                 self.current_saved_grids.append(grid)
                 self.current_vcds.append(dds)
 

diff -r 56c0b53b459c4cb6c6a3bdf67634e142ec4c0f52 -r 9e6996751749e1859cce6dec78fc7cb94d09f745 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -74,10 +74,10 @@
     ds = fake_random_ds(16)
 
     g = ds.index.grids[0]
+    vec = g.get_vertex_centered_data(['x', 'y', 'z'], no_ghost=True)
     for i, ax in enumerate('xyz'):
         xc = g[ax]
 
-        xv = g.get_vertex_centered_data(ax, no_ghost=True)
         tf = lin.TrilinearFieldInterpolator(xc,
                 (g.LeftEdge[0] + g.dds[0]/2.0,
                     g.RightEdge[0] - g.dds[0]/2.0,
@@ -97,6 +97,6 @@
                                   xz, np.array([0.0, 0.0, 0.0], dtype="f8"))
 
         ii = (lx, ly, lz)[i]
-        yield assert_array_equal, ii, xv
+        yield assert_array_equal, ii, vec[ax]
         yield assert_array_equal, ii, xi
         yield assert_array_equal, ii, xz


https://bitbucket.org/yt_analysis/yt/commits/748e83119dd9/
Changeset:   748e83119dd9
Branch:      yt
User:        xarthisius
Date:        2016-08-24 18:50:29+00:00
Summary:     Retain API compatibility, add DeprecationWarning
Affected #:  2 files

diff -r 9e6996751749e1859cce6dec78fc7cb94d09f745 -r 748e83119dd9ba0b98af888db613d8b69e54e558 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,8 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import warnings
 import weakref
 import numpy as np
+from six import string_types
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
@@ -252,6 +254,15 @@
         return cube
 
     def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
+        _old_api = isinstance(fields, string_types)
+        if _old_api:
+            message = (
+                'get_vertex_centered_data() requires list of fields, rather than '
+                'a single string as an argument.'
+            )
+            warnings.warn(message, DeprecationWarning, stacklevel=2)
+            fields = [fields]
+
         # Make sure the field list has only unique entries
         fields = list(set(fields))
         new_fields = {}
@@ -283,6 +294,8 @@
                 np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field])
                 np.multiply(new_fields[field], 0.125, new_fields[field])
 
+        if _old_api:
+            return new_fields[fields[0]]
         return new_fields
 
     def select_icoords(self, dobj):

diff -r 9e6996751749e1859cce6dec78fc7cb94d09f745 -r 748e83119dd9ba0b98af888db613d8b69e54e558 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,3 +1,4 @@
+import warnings
 import numpy as np
 
 from yt.testing import \
@@ -100,3 +101,17 @@
         yield assert_array_equal, ii, vec[ax]
         yield assert_array_equal, ii, xi
         yield assert_array_equal, ii, xz
+
+
+def test_get_vertex_centered_data():
+    ds = fake_random_ds(16)
+    g = ds.index.grids[0]
+
+    vec_list = g.get_vertex_centered_data(['density'], no_ghost=True)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        vec_str = g.get_vertex_centered_data('density', no_ghost=True)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert 'requires list of fields' in str(w[-1].message)
+    assert_array_equal(vec_list['density'], vec_str)


https://bitbucket.org/yt_analysis/yt/commits/7eb2ba55b84d/
Changeset:   7eb2ba55b84d
Branch:      yt
User:        xarthisius
Date:        2016-08-24 18:53:37+00:00
Summary:     Field can be a tuple
Affected #:  2 files

diff -r 748e83119dd9ba0b98af888db613d8b69e54e558 -r 7eb2ba55b84d117f2b79b9ef6429205723a7ddae yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -254,11 +254,11 @@
         return cube
 
     def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
-        _old_api = isinstance(fields, string_types)
+        _old_api = isinstance(fields, (string_types, tuple))
         if _old_api:
             message = (
                 'get_vertex_centered_data() requires list of fields, rather than '
-                'a single string as an argument.'
+                'a single field as an argument.'
             )
             warnings.warn(message, DeprecationWarning, stacklevel=2)
             fields = [fields]

diff -r 748e83119dd9ba0b98af888db613d8b69e54e558 -r 7eb2ba55b84d117f2b79b9ef6429205723a7ddae yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -107,11 +107,13 @@
     ds = fake_random_ds(16)
     g = ds.index.grids[0]
 
-    vec_list = g.get_vertex_centered_data(['density'], no_ghost=True)
+    vec_list = g.get_vertex_centered_data([('gas', 'density')], no_ghost=True)
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
         vec_str = g.get_vertex_centered_data('density', no_ghost=True)
         assert len(w) == 1
         assert issubclass(w[-1].category, DeprecationWarning)
         assert 'requires list of fields' in str(w[-1].message)
-    assert_array_equal(vec_list['density'], vec_str)
+    vec_tuple = g.get_vertex_centered_data(('gas', 'density'), no_ghost=True) 
+    assert_array_equal(vec_list[('gas', 'density')], vec_str)
+    assert_array_equal(vec_list[('gas', 'density')], vec_tuple)


https://bitbucket.org/yt_analysis/yt/commits/efbc6a51b132/
Changeset:   efbc6a51b132
Branch:      yt
User:        ngoldbaum
Date:        2016-09-07 18:31:08+00:00
Summary:     Merged in xarthisius/yt (pull request #2341)

[opt] Pass a list of fields instead of a single field to get_vertex_centered_data()
Affected #:  5 files

diff -r c81f66373903260f489c508f89f95f2196cd265e -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -1132,12 +1132,15 @@
                                        mask, sample_values = None,
                                        sample_type = "face",
                                        no_ghost = False):
-        vals = grid.get_vertex_centered_data(field, no_ghost = no_ghost)
+        # TODO: check if multiple fields can be passed here
+        vals = grid.get_vertex_centered_data([field], no_ghost=no_ghost)[field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
+            # TODO: is no_ghost=False correct here?
+            svals = grid.get_vertex_centered_data([sample_values])[sample_values]
         else:
             svals = None
-        sample_type = {"face":1, "vertex":2}[sample_type]
+
+        sample_type = {"face": 1, "vertex": 2}[sample_type]
         my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
                                     grid.dds, svals, sample_type)
         return my_verts
@@ -1209,15 +1212,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask,
             field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(self.surface_field)
+
+        vc_fields = [self.surface_field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[self.surface_field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f)
-                      for f in [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(
+            self.field_value, vc_data[self.surface_field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     @property
     def triangles(self):

diff -r c81f66373903260f489c508f89f95f2196cd265e -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1696,13 +1696,18 @@
 
     def _extract_isocontours_from_grid(self, grid, mask, field, value,
                                        sample_values=None):
-        vals = grid.get_vertex_centered_data(field, no_ghost=False)
+        vc_fields = [field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
-        else:
+            vc_fields.append(sample_values)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False)
+        try:
+            svals = vc_data[sample_values]
+        except KeyError:
             svals = None
-        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
-                                    grid.dds, svals)
+
+        my_verts = march_cubes_grid(value, vc_data[field], mask,
+            grid.LeftEdge, grid.dds, svals)
         return my_verts
 
     def calculate_isocontour_flux(self, field, value,
@@ -1774,15 +1779,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(field)
+        
+        vc_fields = [field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
+
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
-                     [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(value, vc_data[field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                log_space=True, cumulative=True):

diff -r c81f66373903260f489c508f89f95f2196cd265e -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,8 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import warnings
 import weakref
 import numpy as np
+from six import string_types
 
 from yt.data_objects.data_containers import \
     YTSelectionContainer
@@ -252,33 +254,50 @@
         cube._base_grid = self
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
+        _old_api = isinstance(fields, (string_types, tuple))
+        if _old_api:
+            message = (
+                'get_vertex_centered_data() requires list of fields, rather than '
+                'a single field as an argument.'
+            )
+            warnings.warn(message, DeprecationWarning, stacklevel=2)
+            fields = [fields]
+
+        # Make sure the field list has only unique entries
+        fields = list(set(fields))
+        new_fields = {}
+        for field in fields:
+            new_fields[field] = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
-            # Ensure we have the native endianness in this array.  Avoid making
-            # a copy if possible.
-            old_field = np.asarray(self[field], dtype="=f8")
-            # We'll use the ghost zone routine, which will naturally
-            # extrapolate here.
-            input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
-            output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
-            # rf = 1 here
-            ghost_zone_interpolate(1, old_field, input_left,
-                                   new_field, output_left)
+            for field in fields:
+                # Ensure we have the native endianness in this array.  Avoid making
+                # a copy if possible.
+                old_field = np.asarray(self[field], dtype="=f8")
+                # We'll use the ghost zone routine, which will naturally
+                # extrapolate here.
+                input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
+                output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
+                # rf = 1 here
+                ghost_zone_interpolate(1, old_field, input_left,
+                                       new_fields[field], output_left)
         else:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            np.multiply(new_field, 0.125, new_field)
+            cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
+            for field in fields:
+                np.add(new_fields[field], cg[field][1: ,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field])
+                np.multiply(new_fields[field], 0.125, new_fields[field])
 
-        return new_field
+        if _old_api:
+            return new_fields[fields[0]]
+        return new_fields
 
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)

diff -r c81f66373903260f489c508f89f95f2196cd265e -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -304,10 +304,13 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
+            vcd = grid.get_vertex_centered_data(self.fields, smoothed=True,
+                                                no_ghost=self.no_ghost)
             for i, field in enumerate(self.fields):
-                vcd = grid.get_vertex_centered_data(field, smoothed=True, no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = np.log10(vcd)
-                dds.append(vcd)
+                if self.log_fields[i]:
+                    dds.append(np.log10(vcd[field].astype('float64')))
+                else:
+                    dds.append(vcd[field].astype('float64'))
                 self.current_saved_grids.append(grid)
                 self.current_vcds.append(dds)
 

diff -r c81f66373903260f489c508f89f95f2196cd265e -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,3 +1,4 @@
+import warnings
 import numpy as np
 
 from yt.testing import \
@@ -74,10 +75,10 @@
     ds = fake_random_ds(16)
 
     g = ds.index.grids[0]
+    vec = g.get_vertex_centered_data(['x', 'y', 'z'], no_ghost=True)
     for i, ax in enumerate('xyz'):
         xc = g[ax]
 
-        xv = g.get_vertex_centered_data(ax, no_ghost=True)
         tf = lin.TrilinearFieldInterpolator(xc,
                 (g.LeftEdge[0] + g.dds[0]/2.0,
                     g.RightEdge[0] - g.dds[0]/2.0,
@@ -97,6 +98,22 @@
                                   xz, np.array([0.0, 0.0, 0.0], dtype="f8"))
 
         ii = (lx, ly, lz)[i]
-        yield assert_array_equal, ii, xv
+        yield assert_array_equal, ii, vec[ax]
         yield assert_array_equal, ii, xi
         yield assert_array_equal, ii, xz
+
+
+def test_get_vertex_centered_data():
+    ds = fake_random_ds(16)
+    g = ds.index.grids[0]
+
+    vec_list = g.get_vertex_centered_data([('gas', 'density')], no_ghost=True)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        vec_str = g.get_vertex_centered_data('density', no_ghost=True)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert 'requires list of fields' in str(w[-1].message)
+    vec_tuple = g.get_vertex_centered_data(('gas', 'density'), no_ghost=True) 
+    assert_array_equal(vec_list[('gas', 'density')], vec_str)
+    assert_array_equal(vec_list[('gas', 'density')], vec_tuple)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list