[yt-svn] commit/yt: 10 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Oct 30 13:34:35 PDT 2012


10 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/92234a1bc9ec/
changeset:   92234a1bc9ec
branch:      yt
user:        samskillman
date:        2012-10-26 00:46:53
summary:     Move FRBs to creating ImageArrays, decorate with basic information.
affected #:  1 file

diff -r 56c2d60a99c72bb9cf58f1c1f264787999ba7c01 -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 



https://bitbucket.org/yt_analysis/yt/changeset/f06057cc09ed/
changeset:   f06057cc09ed
branch:      yt
user:        samskillman
date:        2012-10-26 00:48:20
summary:     Fix up streamlines a bit.
affected #:  2 files

diff -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c -r f06057cc09ed9769046b349914ebeb60475ee5d3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -749,7 +749,7 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 92234a1bc9ec7e60b862e053703ae0b22aa6952c -r f06057cc09ed9769046b349914ebeb60475ee5d3 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:



https://bitbucket.org/yt_analysis/yt/changeset/24c207c7f548/
changeset:   24c207c7f548
branch:      yt
user:        samskillman
date:        2012-10-26 00:48:33
summary:     Merging
affected #:  20 files



diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -154,7 +154,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -54,6 +54,7 @@
     pasteboard_repo = '',
     reconstruct_hierarchy = 'False',
     test_storage_dir = '/does/not/exist',
+    test_data_dir = '/does/not/exist',
     enzo_db = '',
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,7 +5,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
-Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Author: Geoffrey So <gsiisg at gmail.com>
 Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
@@ -71,7 +71,7 @@
 def force_array(item, shape):
     try:
         sh = item.shape
-        return item
+        return item.copy()
     except AttributeError:
         if item:
             return np.ones(shape, dtype='bool')
@@ -3502,10 +3502,7 @@
         for gi, g in enumerate(grids): self._grids[gi] = g
 
     def _is_fully_enclosed(self, grid):
-        r = np.abs(grid._corners - self.center)
-        r = np.minimum(r, np.abs(self.DW[None,:]-r))
-        corner_radius = np.sqrt((r**2.0).sum(axis=1))
-        return np.all(corner_radius <= self.radius)
+        return False
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field=None):
@@ -3531,17 +3528,45 @@
                  pf=None, **kwargs):
         """
         By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
-        can define a ellipsoid of any proportion.  Only cells whose centers are
-        within the ellipsoid will be selected.
+        can define a ellipsoid of any proportion.  Only cells whose
+        centers are within the ellipsoid will be selected.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the ellipsoid.
+        A : float
+            The magnitude of the largest semi-major axis of the ellipsoid.
+        B : float
+            The magnitude of the medium semi-major axis of the ellipsoid.
+        C : float
+            The magnitude of the smallest semi-major axis of the ellipsoid.
+        e0 : array_like (automatically normalized)
+            the direction of the largest semi-major axis of the ellipsoid
+        tilt : float
+            After the rotation about the z-axis to allign e0 to x in the x-y
+            plane, and then rotating about the y-axis to align e0 completely
+            to the x-axis, tilt is the angle in radians remaining to
+            rotate about the x-axis to align both e1 to the y-axis and e2 to
+            the z-axis.
+        Examples
+        --------
+        >>> pf = load("DD####/DD####")
+        >>> c = [0.5,0.5,0.5]
+        >>> ell = pf.h.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
         """
+
         AMR3DData.__init__(self, np.array(center), fields, pf, **kwargs)
+        # make sure the magnitudes of semi-major axes are in order
+        if A<B or B<C:
+            raise YTEllipsoidOrdering(pf, A, B, C)
         # make sure the smallest side is not smaller than dx
         if C < self.hierarchy.get_smallest_dx():
             raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
         self._A = A
         self._B = B
         self._C = C
-        self._e0 = e0
+        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
         
         # find the t1 angle needed to rotate about z axis to align e0 to x


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,353 @@
+from yt.testing import *
+from yt.data_objects.api import add_field
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    def _ID(field, data):
+        width = data.pf.domain_right_edge - data.pf.domain_left_edge
+        min_dx = 1.0/8192
+        delta = width / min_dx
+        x = data['x'] - min_dx / 2.
+        y = data['y'] - min_dx / 2.
+        z = data['z'] - min_dx / 2.
+        xi = x / min_dx
+        yi = y / min_dx
+        zi = z / min_dx
+        index = xi + delta[0] * (yi + delta[1] * zi)
+        index = index.astype('int64')
+        return index
+
+    add_field("ID", function=_ID)
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.25, 0.25, 0.25], 0.15)
+        sp2 = pf.h.sphere([0.75, 0.75, 0.75], 0.15)
+        # Store the original indices
+        i1 = sp1['ID']
+        i1.sort()
+        i2 = sp2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # empty
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # only sp1
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = sp1['ID']
+        new_i1.sort()
+        new_i2 = sp2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        sp1 = pf.h.sphere([0.45, 0.45, 0.45], 0.15)
+        sp2 = pf.h.sphere([0.55, 0.55, 0.55], 0.15)
+        # Get indices of both.
+        i1 = sp1['ID']
+        i2 = sp2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([sp1, "AND", sp2]) # overlap (a lens)
+        bo2 = pf.h.boolean([sp1, "NOT", sp2]) # sp1 - sp2 (sphere with bite)
+        bo3 = pf.h.boolean([sp1, "OR", sp2]) # combination (H2)
+        # Now make sure the indices also behave as we expect.
+        lens = np.intersect1d(i1, i2)
+        apple = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, lens
+        yield assert_array_equal, b2, apple
+        yield assert_array_equal, b3, both
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.25]*3, [0.2]*3, [0.3]*3)
+        re2 = pf.h.region([0.65]*3, [0.6]*3, [0.7]*3)
+        # Store the original indices
+        i1 = re1['ID']
+        i1.sort()
+        i2 = re2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # empty
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # only re1
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = re1['ID']
+        new_i1.sort()
+        new_i2 = re2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1 
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re1 = pf.h.region([0.55]*3, [0.5]*3, [0.6]*3)
+        re2 = pf.h.region([0.6]*3, [0.55]*3, [0.65]*3)
+        # Get indices of both.
+        i1 = re1['ID']
+        i2 = re2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([re1, "AND", re2]) # overlap (small cube)
+        bo2 = pf.h.boolean([re1, "NOT", re2]) # sp1 - sp2 (large cube with bite)
+        bo3 = pf.h.boolean([re1, "OR", re2]) # combination (merged large cubes)
+        # Now make sure the indices also behave as we expect.
+        cube = np.intersect1d(i1, i2)
+        bite_cube = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, cube
+        yield assert_array_equal, b2, bite_cube
+        yield assert_array_equal, b3, both
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+        cyl2 = pf.h.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+        # Store the original indices
+        i1 = cyl1['ID']
+        i1.sort()
+        i2 = cyl2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # empty
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # only cyl1
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = cyl1['ID']
+        new_i1.sort()
+        new_i2 = cyl2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        cyl1 = pf.h.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+        cyl2 = pf.h.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+        # Get indices of both.
+        i1 = cyl1['ID']
+        i2 = cyl2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([cyl1, "AND", cyl2]) # overlap (vertically extened lens)
+        bo2 = pf.h.boolean([cyl1, "NOT", cyl2]) # sp1 - sp2 (disk minus a bite)
+        bo3 = pf.h.boolean([cyl1, "OR", cyl2]) # combination (merged disks)
+        # Now make sure the indices also behave as we expect.
+        vlens = np.intersect1d(i1, i2)
+        bite_disk = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, vlens
+        yield assert_array_equal, b2, bite_disk
+        yield assert_array_equal, b3, both
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Store the original indices
+        i1 = ell1['ID']
+        i1.sort()
+        i2 = ell2['ID']
+        i2.sort()
+        ii = np.concatenate((i1, i2))
+        ii.sort()
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # empty
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # only cyl1
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # This makes sure the original containers didn't change.
+        new_i1 = ell1['ID']
+        new_i1.sort()
+        new_i2 = ell2['ID']
+        new_i2.sort()
+        yield assert_array_equal, new_i1, i1 
+        yield assert_array_equal, new_i2, i2
+        # Now make sure the indices also behave as we expect.
+        empty = np.array([])
+        yield assert_array_equal, bo1['ID'], empty
+        b2 = bo2['ID']
+        b2.sort()
+        yield assert_array_equal, b2, i1
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b3, ii
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        ell1 = pf.h.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        ell2 = pf.h.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3),
+            np.array([0.1]*3))
+        # Get indices of both.
+        i1 = ell1['ID']
+        i2 = ell2['ID']
+        # Make some booleans
+        bo1 = pf.h.boolean([ell1, "AND", ell2]) # overlap
+        bo2 = pf.h.boolean([ell1, "NOT", ell2]) # ell1 - ell2
+        bo3 = pf.h.boolean([ell1, "OR", ell2]) # combination
+        # Now make sure the indices also behave as we expect.
+        overlap = np.intersect1d(i1, i2)
+        diff = np.setdiff1d(i1, i2)
+        both = np.union1d(i1, i2)
+        b1 = bo1['ID']
+        b1.sort()
+        b2 = bo2['ID']
+        b2.sort()
+        b3 = bo3['ID']
+        b3.sort()
+        yield assert_array_equal, b1, overlap
+        yield assert_array_equal, b2, diff
+        yield assert_array_equal, b3, both
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    for n in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=n)
+        pf.h
+        re = pf.h.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+        sp = pf.h.sphere([0.95]*3, 0.3) # wraps around
+        cyl = pf.h.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+        # Get original indices
+        rei = re['ID']
+        spi = sp['ID']
+        cyli = cyl['ID']
+        # Make some booleans
+        # whole box minux spherical bites at corners
+        bo1 = pf.h.boolean([re, "NOT", sp])
+        # sphere plus cylinder
+        bo2 = pf.h.boolean([sp, "OR", cyl])
+        # a jumble, the region minus the sp+cyl
+        bo3 = pf.h.boolean([re, "NOT", "(", sp, "OR", cyl, ")"])
+        # Now make sure the indices also behave as we expect.
+        expect = np.setdiff1d(rei, spi)
+        ii = bo1['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        ii = bo2['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+        #
+        expect = np.union1d(spi, cyli)
+        expect = np.setdiff1d(rei, expect)
+        ii = bo3['ID']
+        ii.sort()
+        yield assert_array_equal, expect, ii
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_ellipsoid.py
--- /dev/null
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -0,0 +1,35 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","loglevel"] = "50"
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_ellipsoid():
+    # We decompose in different ways
+    cs = [np.array([0.5, 0.5, 0.5]),
+          np.array([0.1, 0.2, 0.3]),
+          np.array([0.8, 0.8, 0.8])]
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs)
+        min_dx = 2.0/pf.domain_dimensions
+        ABC = np.random.random((3, 12)) * 0.1
+        e0s = np.random.random((3, 12))
+        tilts = np.random.random(12)
+        ABC[:,0] = 0.1
+        for i in range(12):
+            for c in cs:
+                A, B, C = reversed(sorted(ABC[:,i]))
+                A = max(A, min_dx[0])
+                B = max(B, min_dx[1])
+                C = max(C, min_dx[2])
+                e0 = e0s[:,i]
+                tilt = tilts[i]
+                ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
+                yield assert_equal, np.all(ell["Radius"] <= A), True
+                p = np.array([ell[ax] for ax in 'xyz'])
+                v  = np.zeros_like(ell["Radius"])
+                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
+                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
+                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
+                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_extract_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -0,0 +1,53 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_cut_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
+                             "grid['Density'] < 0.75",
+                             "grid['x-velocity'] > 0.25" ])
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        t2 = (r["Temperature"] < 0.75)
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+
+def test_extract_region():
+    # We decompose in different ways
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs,
+            fields = ("Density", "Temperature", "x-velocity"))
+        # We'll test two objects
+        dd = pf.h.all_data()
+        t = ( (dd["Temperature"] > 0.5 ) 
+            & (dd["Density"] < 0.75 )
+            & (dd["x-velocity"] > 0.25 ) )
+        r = dd.extract_region(t)
+        yield assert_equal, np.all(r["Temperature"] > 0.5), True
+        yield assert_equal, np.all(r["Density"] < 0.75), True
+        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
+        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
+        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
+        t2 = (r["Temperature"] < 0.75)
+        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
+        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
+        t3 = (r["Temperature"] < 0.75)
+        r3 = r.extract_region( t3 )
+        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
+        yield assert_equal, np.all(r3["Temperature"] < 0.75), True


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -21,5 +21,5 @@
                    (np.abs(my_all[axes[my_axes[1]]] - ocoord[1]) <= 
                     0.5 * dx[my_axes[1]])
 
-        assert_equal(my_oray['Density'].sum(),
-                     my_all['Density'][my_cells].sum())
+        yield assert_equal, my_oray['Density'].sum(), \
+                            my_all['Density'][my_cells].sum()


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -1,31 +1,33 @@
 from yt.testing import *
 
 def test_ray():
-    pf = fake_random_pf(64, nprocs=8)
-    dx = (pf.domain_right_edge - pf.domain_left_edge) / \
-      pf.domain_dimensions
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs=nproc)
+        dx = (pf.domain_right_edge - pf.domain_left_edge) / \
+          pf.domain_dimensions
 
-    p1 = np.random.random(3)
-    p2 = np.random.random(3)
+        p1 = np.random.random(3)
+        p2 = np.random.random(3)
 
-    my_ray = pf.h.ray(p1, p2)
-    assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
-    ray_cells = my_ray['dts'] > 0
+        my_ray = pf.h.ray(p1, p2)
+        assert_rel_equal(my_ray['dts'].sum(), 1.0, 14)
+        ray_cells = my_ray['dts'] > 0
 
-    # find cells intersected by the ray
-    my_all = pf.h.all_data()
-    
-    dt = np.abs(dx / (p2 - p1))
-    tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
-    tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
-                           [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
-                           [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
-    tin = tin.max(axis=0)
-    tout = tout.min(axis=0)
-    my_cells = (tin < tout) & (tin < 1) & (tout > 0)
+        # find cells intersected by the ray
+        my_all = pf.h.all_data()
+        
+        dt = np.abs(dx / (p2 - p1))
+        tin  = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] - 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] - 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] - 0.5 * dt[2]]])
+        tout = np.concatenate([[(my_all['x'] - p1[0]) / (p2 - p1)[0] + 0.5 * dt[0]],
+                               [(my_all['y'] - p1[1]) / (p2 - p1)[1] + 0.5 * dt[1]],
+                               [(my_all['z'] - p1[2]) / (p2 - p1)[2] + 0.5 * dt[2]]])
+        tin = tin.max(axis=0)
+        tout = tout.min(axis=0)
+        my_cells = (tin < tout) & (tin < 1) & (tout > 0)
 
-    assert_rel_equal(ray_cells.sum(), my_cells.sum(), 14)
-    assert_rel_equal(my_ray['Density'][ray_cells].sum(),
-                     my_all['Density'][my_cells].sum(), 14)
+        yield assert_rel_equal, ray_cells.sum(), my_cells.sum(), 14
+        yield assert_rel_equal, my_ray['Density'][ray_cells].sum(), \
+                                my_all['Density'][my_cells].sum(), 14
+        yield assert_rel_equal, my_ray['dts'].sum(), 1.0, 14


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,8 +258,11 @@
 
         """
         if isinstance(filenames, types.StringTypes):
+            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
+            if len(filenames) == 0:
+                raise YTNoFilenamesMatchPattern(pattern)
         obj = cls(filenames[:], parallel = parallel)
         return obj
 


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/frontends/enzo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -0,0 +1,51 @@
+"""
+Enzo frontend tests using moving7
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.enzo.api import EnzoStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
+           "particle_density")
+
+m7 = "DD0010/moving7_0010"
+ at requires_pf(m7)
+def test_moving7():
+    pf = data_dir_load(m7)
+    yield assert_equal, str(pf), "moving7_0010"
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_pf(g30, big_data=True)
+def test_galaxy0030():
+    pf = data_dir_load(g30)
+    yield assert_equal, str(pf), "galaxy0030"
+    for test in big_patch_amr(g30, _fields):
+        yield test


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,6 +29,15 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
 
 def assert_rel_equal(a1, a2, decimals):
+    # We have nan checks in here because occasionally we have fields that get
+    # weighted without non-zero weights.  I'm looking at you, particle fields!
+    if isinstance(a1, np.ndarray):
+        assert(a1.size == a2.size)
+        # Mask out NaNs
+        a1[np.isnan(a1)] = 1.0
+        a2[np.isnan(a2)] = 1.0
+    elif np.isnan(a1) and np.isnan(a2):
+        return True
     return assert_almost_equal(a1/a2, 1.0, decimals)
 
 def amrspace(extent, levels=7, cells=8):


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -22,10 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
-import runner
-import output_tests
-from runner import RegressionTestRunner
-
-from output_tests import RegressionTest, SingleOutputTest, \
-    MultipleOutputTest, YTStaticOutputTest, create_test




diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/default_tests.py
--- a/yt/utilities/answer_testing/default_tests.py
+++ b/yt/utilities/answer_testing/default_tests.py
@@ -67,3 +67,4 @@
         for field in sorted(self.result):
             for p1, p2 in zip(self.result[field], old_result[field]):
                 self.compare_data_arrays(p1, p2, self.tolerance)
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/framework.py
--- /dev/null
+++ b/yt/utilities/answer_testing/framework.py
@@ -0,0 +1,396 @@
+"""
+Answer Testing using Nose as a starting point
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import logging
+import os
+import hashlib
+import contextlib
+import urllib2
+import cPickle
+
+from nose.plugins import Plugin
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+import cPickle
+
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
+mylog = logging.getLogger('nose.plugins.answer-testing')
+run_big_data = False
+
+_latest = "gold001"
+_url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+
+class AnswerTesting(Plugin):
+    name = "answer-testing"
+
+    def options(self, parser, env=os.environ):
+        super(AnswerTesting, self).options(parser, env=env)
+        parser.add_option("--answer-compare", dest="compare_name",
+            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-big-data", dest="big_data",
+            default=False, help="Should we run against big data, too?",
+            action="store_true")
+        parser.add_option("--answer-name", dest="this_name",
+            default=None,
+            help="The name we'll call this set of tests")
+        parser.add_option("--answer-store", dest="store_results",
+            default=False, action="store_true")
+
+    def configure(self, options, conf):
+        super(AnswerTesting, self).configure(options, conf)
+        if not self.enabled:
+            return
+        disable_stream_logging()
+        try:
+            my_hash = get_yt_version()
+        except:
+            my_hash = "UNKNOWN%s" % (time.time())
+        if options.this_name is None: options.this_name = my_hash
+        from yt.config import ytcfg
+        ytcfg["yt","__withintesting"] = "True"
+        AnswerTestingTest.result_storage = \
+            self.result_storage = defaultdict(dict)
+        if options.compare_name is not None:
+            # Now we grab from our S3 store
+            if options.compare_name == "latest":
+                options.compare_name = _latest
+            AnswerTestingTest.reference_storage = \
+                AnswerTestOpener(options.compare_name)
+        self.answer_name = options.this_name
+        self.store_results = options.store_results
+        global run_big_data
+        run_big_data = options.big_data
+
+    def finalize(self, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        if self.store_results is False: return
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in self.result_storage:
+            rs = cPickle.dumps(self.result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestOpener(object):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def get(self, pf_name, default = None):
+        if pf_name in self.cache: return self.cache[pf_name]
+        url = _url_path % (self.reference_name, pf_name)
+        try:
+            resp = urllib2.urlopen(url)
+            # This is dangerous, but we have a controlled S3 environment
+            data = resp.read()
+            rv = cPickle.loads(data)
+        except urllib2.HTTPError as ex:
+            raise YTNoOldAnswer(url)
+            mylog.warning("Missing %s (%s)", url, ex)
+            rv = default
+        self.cache[pf_name] = rv
+        return rv
+
+ at contextlib.contextmanager
+def temp_cwd(cwd):
+    oldcwd = os.getcwd()
+    os.chdir(cwd)
+    yield
+    os.chdir(oldcwd)
+
+def can_run_pf(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        try:
+            load(pf_fn)
+        except:
+            return False
+    return AnswerTestingTest.result_storage is not None
+
+def data_dir_load(pf_fn):
+    path = ytcfg.get("yt", "test_data_dir")
+    with temp_cwd(path):
+        pf = load(pf_fn)
+        pf.h
+        return pf
+
+class AnswerTestingTest(object):
+    reference_storage = None
+    def __init__(self, pf_fn):
+        self.pf = data_dir_load(pf_fn)
+
+    def __call__(self):
+        nv = self.run()
+        if self.reference_storage is not None:
+            dd = self.reference_storage.get(str(self.pf))
+            if dd is None: raise YTNoOldAnswer()
+            ov = dd[self.description]
+            self.compare(nv, ov)
+        else:
+            ov = None
+        self.result_storage[str(self.pf)][self.description] = nv
+
+    def compare(self, new_result, old_result):
+        raise RuntimeError
+
+    def create_obj(self, pf, obj_type):
+        # obj_type should be tuple of
+        #  ( obj_name, ( args ) )
+        if obj_type is None:
+            return pf.h.all_data()
+        cls = getattr(pf.h, obj_type[0])
+        obj = cls(*obj_type[1])
+        return obj
+
+    @property
+    def sim_center(self):
+        """
+        This returns the center of the domain.
+        """
+        return 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
+
+    @property
+    def max_dens_location(self):
+        """
+        This is a helper function to return the location of the most dense
+        point.
+        """
+        return self.pf.h.find_max("Density")[1]
+
+    @property
+    def entire_simulation(self):
+        """
+        Return an unsorted array of values that cover the entire domain.
+        """
+        return self.pf.h.all_data()
+
+    @property
+    def description(self):
+        obj_type = getattr(self, "obj_type", None)
+        if obj_type is None:
+            oname = "all"
+        else:
+            oname = "_".join((str(s) for s in obj_type))
+        args = [self._type_name, str(self.pf), oname]
+        args += [str(getattr(self, an)) for an in self._attrs]
+        return "_".join(args)
+        
+class FieldValuesTest(AnswerTestingTest):
+    _type_name = "FieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None):
+        super(FieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        avg = obj.quantities["WeightedAverageQuantity"](self.field,
+                             weight="Ones")
+        (mi, ma), = obj.quantities["Extrema"](self.field)
+        return np.array([avg, mi, ma])
+
+    def compare(self, new_result, old_result):
+        assert_equal(new_result, old_result)
+
+class ProjectionValuesTest(AnswerTestingTest):
+    _type_name = "ProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(ProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        return proj.field_data
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class PixelizedProjectionValuesTest(AnswerTestingTest):
+    _type_name = "PixelizedProjectionValues"
+    _attrs = ("field", "axis", "weight_field")
+
+    def __init__(self, pf_fn, axis, field, weight_field = None,
+                 obj_type = None):
+        super(PixelizedProjectionValuesTest, self).__init__(pf_fn)
+        self.axis = axis
+        self.field = field
+        self.weight_field = field
+        self.obj_type = obj_type
+
+    def run(self):
+        if self.obj_type is not None:
+            obj = self.create_obj(self.pf, self.obj_type)
+        else:
+            obj = None
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field,
+                              data_source = obj)
+        frb = proj.to_frb((1.0, 'unitary'), 256)
+        frb[self.field]
+        frb[self.weight_field]
+        d = frb.data
+        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
+                         for f in proj.field_data.keys()) ) )
+        return d
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_rel_equal(new_result[k], old_result[k], 10)
+
+class GridValuesTest(AnswerTestingTest):
+    _type_name = "GridValues"
+    _attrs = ("field",)
+
+    def __init__(self, pf_fn, field):
+        super(GridValuesTest, self).__init__(pf_fn)
+        self.field = field
+
+    def run(self):
+        hashes = {}
+        for g in self.pf.h.grids:
+            hashes[g.id] = hashlib.md5(g[self.field].tostring()).hexdigest()
+            g.clear_data()
+        return hashes
+
+    def compare(self, new_result, old_result):
+        assert(len(new_result) == len(old_result))
+        for k in new_result:
+            assert (k in old_result)
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class GridHierarchyTest(AnswerTestingTest):
+    _type_name = "GridHierarchy"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        result["grid_dimensions"] = self.pf.h.grid_dimensions
+        result["grid_left_edges"] = self.pf.h.grid_left_edge
+        result["grid_right_edges"] = self.pf.h.grid_right_edge
+        result["grid_levels"] = self.pf.h.grid_levels
+        result["grid_particle_count"] = self.pf.h.grid_particle_count
+        return result
+
+    def compare(self, new_result, old_result):
+        for k in new_result:
+            assert_equal(new_result[k], old_result[k])
+
+class ParentageRelationshipsTest(AnswerTestingTest):
+    _type_name = "ParentageRelationships"
+    _attrs = ()
+    def run(self):
+        result = {}
+        result["parents"] = []
+        result["children"] = []
+        for g in self.pf.h.grids:
+            p = g.Parent
+            if p is None:
+                result["parents"].append(None)
+            elif hasattr(p, "id"):
+                result["parents"].append(p.id)
+            else:
+                result["parents"].append([pg.id for pg in p])
+            result["children"].append([c.id for c in g.Children])
+        return result
+
+    def compare(self, new_result, old_result):
+        for newp, oldp in zip(new_result["parents"], old_result["parents"]):
+            assert(newp == oldp)
+        for newc, oldc in zip(new_result["children"], old_result["children"]):
+            assert(newp == oldp)
+
+def requires_pf(pf_fn, big_data = False):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if run_big_data == False and big_data == True:
+        return ffalse
+    elif not can_run_pf(pf_fn):
+        return ffalse
+    else:
+        return ftrue
+
+def small_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+
+def big_patch_amr(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -29,14 +29,12 @@
 # We first create our dictionary of tests to run.  This starts out empty, and
 # as tests are imported it will be filled.
 if "TestRegistry" not in locals():
-    print "Initializing TestRegistry"
     class TestRegistry(dict):
         def __new__(cls, *p, **k):
             if not '_the_instance' in cls.__dict__:
                 cls._the_instance = dict.__new__(cls)
                 return cls._the_instance
 if "test_registry" not in locals():
-    print "Initializing test_registry"
     test_registry = TestRegistry()
 
 # The exceptions we raise, related to the character of the failure.


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -146,3 +146,29 @@
     def __str__(self):
         return "You must create an API key before uploading.  See " + \
                "https://data.yt-project.org/getting_started.html"
+
+class YTNoFilenamesMatchPattern(YTException):
+    def __init__(self, pattern):
+        self.pattern = pattern
+
+    def __str__(self):
+        return "No filenames were found to match the pattern: " + \
+               "'%s'" % (self.pattern)
+
+class YTNoOldAnswer(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "There is no old answer available.\n" + \
+               str(self.path)
+
+class YTEllipsoidOrdering(YTException):
+    def __init__(self, pf, A, B, C):
+        YTException.__init__(self, pf)
+        self._A = A
+        self._B = B
+        self._C = C
+
+    def __str__(self):
+        return "Must have A>=B>=C"


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -7,6 +7,8 @@
 Affiliation: UC Berkeley
 Author: Stephen Skory <s at skory.us>
 Affiliation: UC San Diego
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Chicago
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk, JS Oishi, Stephen Skory.  All Rights Reserved.
@@ -211,7 +213,7 @@
 class ContourCallback(PlotCallback):
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None):
+                 plot_args = None, label = False, label_args = None):
         """
         annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
                          plot_args = None):
@@ -230,6 +232,10 @@
         self.clim = clim
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
+        self.label = label
+        if label_args is None:
+            label_args = {}
+        self.label_args = label_args
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -288,12 +294,16 @@
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 
-            self.ncont = np.linspace(self.clim[0], self.clim[1], ncont)
+            self.ncont = np.linspace(self.clim[0], self.clim[1], self.ncont)
         
-        plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
+        cset = plot._axes.contour(xi,yi,zi,self.ncont, **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
+        
+        if self.label:
+            plot._axes.clabel(cset, **self.label_args)
+        
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
@@ -440,30 +450,6 @@
         plot._axes.set_xlabel(self.label)
         plot._axes.set_ylabel(self.label)
 
-class TimeCallback(PlotCallback):
-    _type_name = "time"
-    def __init__(self, format_code='10.7e'):
-        """
-        This annotates the plot with the current simulation time.
-        For now, the time is displayed in seconds.
-        *format_code* can be optionally set, allowing a custom 
-        c-style format code for the time display.
-        """
-        self.format_code = format_code
-        PlotCallback.__init__(self)
-    
-    def __call__(self, plot):
-        current_time = plot.pf.current_time/plot.pf['Time']
-        timestring = format(current_time,self.format_code)
-        base = timestring[:timestring.find('e')]
-        exponent = timestring[timestring.find('e')+1:]
-        if exponent[0] == '+':
-            exponent = exponent[1:]
-        timestring = r'$t\/=\/'+base+''+r'\times\,10^{'+exponent+r'}\, \rm{s}$'
-        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
-        at = AnchoredText(timestring, prop=dict(size=12), frameon=True, loc=4)
-        plot._axes.add_artist(at)
-
 def get_smallest_appropriate_unit(v, pf):
     max_nu = 1e30
     good_u = None
@@ -1103,3 +1089,152 @@
     def __call__(self,plot):
         plot._axes.set_title(self.title)
 
+class FlashRayDataCallback(PlotCallback):
+    _type_name = "flash_ray_data"
+    def __init__(self, cmap_name='bone', sample=None):
+        """ 
+        annotate_flash_ray_data(cmap_name='bone', sample=None)
+
+        Adds ray trace data to the plot.  *cmap_name* is the name of the color map 
+        ('bone', 'jet', 'hot', etc).  *sample* dictates the amount of down sampling 
+        to do to prevent all of the rays from being  plotted.  This may be None 
+        (plot all rays, default), an integer (step size), or a slice object.
+        """
+        self.cmap_name = cmap_name
+        self.sample = sample if isinstance(sample, slice) else slice(None, None, sample)
+
+    def __call__(self, plot):
+        ray_data = plot.data.pf._handle["RayData"][:]
+        idx = ray_data[:,0].argsort(kind="mergesort")
+        ray_data = ray_data[idx]
+
+        tags = ray_data[:,0]
+        coords = ray_data[:,1:3]
+        power = ray_data[:,4]
+        power /= power.max()
+        cx, cy = self.convert_to_plot(plot, coords.T)
+        coords[:,0], coords[:,1] = cx, cy
+        splitidx = np.argwhere(0 < (tags[1:] - tags[:-1])) + 1
+        coords = np.split(coords, splitidx.flat)[self.sample]
+        power = np.split(power, splitidx.flat)[self.sample]
+        cmap = matplotlib.cm.get_cmap(self.cmap_name)
+
+        plot._axes.hold(True)
+        colors = [cmap(p.max()) for p in power]
+        lc = matplotlib.collections.LineCollection(coords, colors=colors)
+        plot._axes.add_collection(lc)
+        plot._axes.hold(False)
+
+
+class TimestampCallback(PlotCallback):
+    _type_name = "timestamp"
+    _time_conv = {
+          'as': 1e-18,
+          'attosec': 1e-18,
+          'attosecond': 1e-18,
+          'attoseconds': 1e-18,
+          'fs': 1e-15,
+          'femtosec': 1e-15,
+          'femtosecond': 1e-15,
+          'femtoseconds': 1e-15,
+          'ps': 1e-12,
+          'picosec': 1e-12,
+          'picosecond': 1e-12,
+          'picoseconds': 1e-12,
+          'ns': 1e-9,
+          'nanosec': 1e-9,
+          'nanosecond':1e-9,
+          'nanoseconds' : 1e-9,
+          'us': 1e-6,
+          'microsec': 1e-6,
+          'microsecond': 1e-6,
+          'microseconds': 1e-6,
+          'ms': 1e-3,
+          'millisec': 1e-3,
+          'millisecond': 1e-3,
+          'milliseconds': 1e-3,
+          's': 1.0,
+          'sec': 1.0,
+          'second':1.0,
+          'seconds': 1.0,
+          'm': 60.0,
+          'min': 60.0,
+          'minute': 60.0,
+          'minutes': 60.0,
+          'h': 3600.0,
+          'hour': 3600.0,
+          'hours': 3600.0,
+          'd': 86400.0,
+          'day': 86400.0,
+          'days': 86400.0,
+          'y': 86400.0*365.25,
+          'year': 86400.0*365.25,
+          'years': 86400.0*365.25,
+          'ev': 1e-9 * 7.6e-8 / 6.03,
+          'kev': 1e-12 * 7.6e-8 / 6.03,
+          'mev': 1e-15 * 7.6e-8 / 6.03,
+          }
+
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+        """ 
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+
+        Adds the current time to the plot at point given by *x* and *y*.  If *units* 
+        is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
+        *units* is None, it will attempt to figure out the correct value by which to 
+        scale.  The *format* keyword is a template string that will be evaluated and 
+        displayed on the plot.  All other *kwargs* will be passed to the text() 
+        method on the plot axes.  See matplotlib's text() functions for more 
+        information.
+        """
+        self.x = x
+        self.y = y
+        self.format = format
+        self.units = units
+        self.kwargs = {'color': 'w'}
+        self.kwargs.update(kwargs)
+
+    def __call__(self, plot):
+        if self.units is None:
+            t = plot.data.pf.current_time
+            scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+            self.units = 's'
+            for k in scale_keys:
+                if t < self._time_conv[k]:
+                    break
+                self.units = k
+        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        if self.units == 'us':
+            self.units = '$\\mu s$'
+        s = self.format.format(time=t, units=self.units)
+        plot._axes.hold(True)
+        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        plot._axes.hold(False)
+
+
+class MaterialBoundaryCallback(ContourCallback):
+    _type_name = "material_boundary"
+    def __init__(self, field='targ', ncont=1, factor=4, clim=(0.9, 1.0), **kwargs):
+        """ 
+        annotate_material_boundary(self, field='targ', ncont=1, factor=4, 
+                                   clim=(0.9, 1.0), **kwargs):
+
+        Add the limiting contours of *field* to the plot.  Nominally, *field* is 
+        the target material but may be any other field present in the hierarchy.
+        The number of contours generated is given by *ncount*, *factor* governs 
+        the number of points used in the interpolation, and *clim* gives the 
+        (upper, lower) limits for contouring.  For this to truly be the boundary
+        *clim* should be close to the edge.  For example the default is (0.9, 1.0)
+        for 'targ' which is defined on the range [0.0, 1.0].  All other *kwargs* 
+        will be passed to the contour() method on the plot axes.  See matplotlib
+        for more information.
+        """
+        plot_args = {'colors': 'w'}
+        plot_args.update(kwargs)
+        super(MaterialBoundaryCallback, self).__init__(field=field, ncont=ncont,
+                                                       factor=factor, clim=clim,
+                                                       plot_args=plot_args)
+
+    def __call__(self, plot):
+        super(MaterialBoundaryCallback, self).__call__(plot)
+


diff -r f06057cc09ed9769046b349914ebeb60475ee5d3 -r 24c207c7f548823bfe5a14c82c4a199eb5dae8b6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1449,7 +1449,7 @@
             yield self.snapshot()
 
 def allsky_projection(pf, center, radius, nside, field, weight = None,
-                      inner_radius = 10, rotation = None):
+                      inner_radius = 10, rotation = None, source = None):
     r"""Project through a parameter file, through an allsky-method
     decomposition from HEALpix, and return the image plane.
 
@@ -1484,6 +1484,9 @@
         If supplied, the vectors will be rotated by this.  You can construct
         this by, for instance, calling np.array([v1,v2,v3]) where those are the
         three reference planes of an orthogonal frame (see ortho_find).
+    source : data container, default None
+        If this is supplied, this gives the data source from which the all sky
+        projection pulls its data from.
 
     Returns
     -------
@@ -1527,12 +1530,20 @@
     positions += inner_radius * dx * vs
     vs *= radius
     uv = np.ones(3, dtype='float64')
-    grids = pf.h.sphere(center, radius)._grids
+    if source is not None:
+        grids = source._grids
+    else:
+        grids = pf.h.sphere(center, radius)._grids
     sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
                                 image, uv, uv, np.zeros(3, dtype='float64'))
     pb = get_pbar("Sampling ", len(grids))
     for i,grid in enumerate(grids):
-        data = [grid[field] * grid.child_mask.astype('float64')
+        if source is not None:
+            data = [grid[field] * source._get_cut_mask(grid) * \
+                grid.child_mask.astype('float64')
+                for field in fields]
+        else:
+            data = [grid[field] * grid.child_mask.astype('float64')
                 for field in fields]
         pg = PartitionedGrid(
             grid.id, data,



https://bitbucket.org/yt_analysis/yt/changeset/30892e3e9006/
changeset:   30892e3e9006
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:25:21
summary:     Sum the dts in a given cell.  It's important to note that this does not (yet)
correctly convey when a cell has been visited multiple times.
affected #:  2 files

diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 30892e3e900620f83eeb338202309c488bcff06e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -788,11 +788,10 @@
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
+            if not mask[ci[0], ci[1], ci[2]]:
+                ts[ci[0], ci[1], ci[2]] = self.ts[i]
             mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            dts[ci[0], ci[1], ci[2]] += self.dts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 30892e3e900620f83eeb338202309c488bcff06e yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            assert_rel_equal(path['dts'].sum(), 1.0, 14)
+            yield assert_array_less, path['t'], 1.0



https://bitbucket.org/yt_analysis/yt/changeset/6ae44687aeca/
changeset:   6ae44687aeca
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:26:33
summary:     Merging with Sam's changes
affected #:  3 files

diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -749,7 +749,7 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 30892e3e900620f83eeb338202309c488bcff06e -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:



https://bitbucket.org/yt_analysis/yt/changeset/a67f87642c83/
changeset:   a67f87642c83
branch:      yt
user:        MatthewTurk
date:        2012-10-26 13:43:09
summary:     Switch out the call to Streamlines to a call to the hierarchy.  Add a path
normalization option to get back the correct dts in a streamlines.  Tests all
pass.
affected #:  3 files

diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -750,6 +752,8 @@
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
         self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])


diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -14,9 +14,8 @@
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
-        streams = Streamlines(pf, cs)
+        streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
-            assert_rel_equal(path['dts'].sum(), 1.0, 14)
-            yield assert_array_less, path['t'], 1.0
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True


diff -r 6ae44687aeca2f8cae9269865be39c9a8cfffe3a -r a67f87642c8306f18102c905f4cc3c663e1782b8 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -208,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)
         



https://bitbucket.org/yt_analysis/yt/changeset/b54dc03a34ee/
changeset:   b54dc03a34ee
branch:      yt
user:        MatthewTurk
date:        2012-10-26 14:12:59
summary:     Allow cells to appear multiple times in a streamline.  This can probably be
vectorized.
affected #:  2 files

diff -r a67f87642c8306f18102c905f4cc3c663e1782b8 -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -772,30 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if not mask[ci[0], ci[1], ci[2]]:
-                ts[ci[0], ci[1], ci[2]] = self.ts[i]
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] += self.dts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r a67f87642c8306f18102c905f4cc3c663e1782b8 -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -19,3 +19,4 @@
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]



https://bitbucket.org/yt_analysis/yt/changeset/3d8fd5d2b393/
changeset:   3d8fd5d2b393
branch:      yt
user:        samskillman
date:        2012-10-30 20:46:57
summary:     Fixing weight field issue.
affected #:  1 file

diff -r b54dc03a34eec3fe3b472dcb993bd93dbd5f8f54 -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -164,7 +164,9 @@
         except AttributeError:
             pass
         try:
-            info['weight_field'] = self.data_source.weight_field
+            weight = self.data_source.weight_field
+            if weight is None: weight = 'None'
+            info['weight_field'] = weight
         except AttributeError:
             pass
         return info



https://bitbucket.org/yt_analysis/yt/changeset/3972786cc04e/
changeset:   3972786cc04e
branch:      yt
user:        samskillman
date:        2012-10-30 21:21:35
summary:     Adding tests for FRB generated ImageArrays, and adding a test_slice.py that currently does not check the slice's correct values.
affected #:  3 files

diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 3d8fd5d2b393a0f07df381ec534d88f64bad97c1 -r 3972786cc04e9e66747a2f49196675e45ee66408 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -164,9 +164,7 @@
         except AttributeError:
             pass
         try:
-            weight = self.data_source.weight_field
-            if weight is None: weight = 'None'
-            info['weight_field'] = weight
+            info['weight_field'] = self.data_source.weight_field
         except AttributeError:
             pass
         return info



https://bitbucket.org/yt_analysis/yt/changeset/7211b81e0ac6/
changeset:   7211b81e0ac6
branch:      yt
user:        MatthewTurk
date:        2012-10-30 21:34:32
summary:     Merged in samskillman/yt (pull request #317)
affected #:  6 files

diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -749,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -768,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 


diff -r 8db15e870d38beaf2ee597db41f076dc0b946aa4 -r 7211b81e0ac61db6682f987a2838caa6bb743e16 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -206,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list