[yt-svn] commit/yt: 5 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Nov 9 14:48:30 PST 2012


5 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/269d51fb1aa7/
changeset:   269d51fb1aa7
branch:      yt
user:        samskillman
date:        2012-11-09 20:12:10
summary:     Attempting to add local storage of results.
affected #:  1 file

diff -r 5c4bcee82cc5fc730aa3c8d478db82eae801f6a9 -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -82,35 +85,33 @@
             # Now we grab from our S3 store
             if options.compare_name == "latest":
                 options.compare_name = _latest
+        if options.store_local_results:
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
+                self.storage = AnswerTestLocalStorage(options.compare_name)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name)
+
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
     def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage, result)        
 
-class AnswerTestOpener(object):
+class AnswerTestStorage(object):
     def __init__(self, reference_name):
         self.reference_name = reference_name
         self.cache = {}
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
@@ -126,6 +127,49 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage, result):
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def __init__(self, reference_name):
+        self.reference_name = reference_name
+        self.cache = {}
+
+    def dump(self, result_storage, result):
+        # Store data using shelve
+        if self.store_results is False: return
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s_%s" % (self.reference_name, pf_name)
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        # Read data using shelve
+        answer_name = "%s_%s" % (self.reference_name, pf_name)
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,6 +208,7 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
@@ -172,7 +217,7 @@
         nv = self.run()
         if self.reference_storage is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:



https://bitbucket.org/yt_analysis/yt/changeset/578f64273856/
changeset:   578f64273856
branch:      yt
user:        samskillman
date:        2012-11-09 20:12:32
summary:     Merging
affected #:  14 files

diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -30,7 +30,8 @@
       StreamStaticOutput, \
       StreamHandler, \
       load_uniform_grid, \
-      load_amr_grids
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -495,3 +497,68 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -498,6 +498,22 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 
                  left_edges, right_edges, rtol, atol):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -172,3 +172,11 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, testname):
+        self.testname = testname
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -633,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -662,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


diff -r 269d51fb1aa78b291b05fe2a144ccf0bcffc3956 -r 578f64273856f2e6dba89a1c393d375875347313 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -213,7 +213,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -257,6 +257,7 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
@@ -802,8 +803,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -827,7 +830,9 @@
                     raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -954,7 +959,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1010,6 +1015,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -1036,7 +1043,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1096,6 +1103,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1121,7 +1130,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1158,7 +1167,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1197,7 +1207,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and



https://bitbucket.org/yt_analysis/yt/changeset/27a4ab8ac1cc/
changeset:   27a4ab8ac1cc
branch:      yt
user:        samskillman
date:        2012-11-09 23:27:20
summary:     Updating framework.py to allow for local storage.  Now controlled by --local-store=[False/True]
affected #:  1 file

diff -r 578f64273856f2e6dba89a1c393d375875347313 -r 27a4ab8ac1ccbac0fe5053128a490e4a75f15bd9 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -81,31 +81,36 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
         if options.store_local_results:
             AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestLocalStorage(options.compare_name)
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
         else:
             AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name)
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
 
+        self.storage_compare_name = options.compare_name
+        self.output_dir = options.output_dir
         self.answer_name = options.this_name
         self.store_results = options.store_results
         self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
+    def finalize(self):
         if self.store_results is False: return
-        self.storage.dump(self.result_storage, result)        
+        self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
     def dump(self, result_storage, result):
         pass
     def get(self, pf_name, default=None):
@@ -113,6 +118,7 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -127,7 +133,8 @@
         self.cache[pf_name] = rv
         return rv
 
-    def dump(self, result_storage, result):
+    def dump(self, result_storage):
+        if self.read: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,27 +151,24 @@
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
-    def __init__(self, reference_name):
-        self.reference_name = reference_name
-        self.cache = {}
-
-    def dump(self, result_storage, result):
+    def dump(self, result_storage):
+        if self.read: return 
         # Store data using shelve
-        if self.store_results is False: return
         ds = shelve.open(self.reference_name, protocol=-1)
         for pf_name in result_storage:
-            answer_name = "%s_%s" % (self.reference_name, pf_name)
+            answer_name = "%s" % pf_name
             if name in ds:
                 mylog.info("Overwriting %s", answer_name)
             ds[answer_name] = result_storage[pf_name]
         ds.close()
 
     def get(self, pf_name, default=None):
+        if not self.read: return default
         # Read data using shelve
-        answer_name = "%s_%s" % (self.reference_name, pf_name)
+        answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
         try:
-            result = ds[name]
+            result = ds[answer_name]
         except KeyError:
             result = default
         ds.close()
@@ -215,7 +219,8 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and \
+           self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt/changeset/4ea60322c0c4/
changeset:   4ea60322c0c4
branch:      yt
user:        samskillman
date:        2012-11-09 23:47:06
summary:     Quick fix.  Now both local and cloud work.
affected #:  1 file

diff -r 27a4ab8ac1ccbac0fe5053128a490e4a75f15bd9 -r 4ea60322c0c434459a522973f9f2be732ed66555 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -85,7 +85,7 @@
             options.compare_name = _latest
 
         # We only either store or test.
-        if options.store_local_results:
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
                 self.storage = \
                     AnswerTestLocalStorage("%s/%s" % \
@@ -219,8 +219,7 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None and \
-           self.reference_storage.read:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]



https://bitbucket.org/yt_analysis/yt/changeset/4d55b13a56ab/
changeset:   4d55b13a56ab
branch:      yt
user:        MatthewTurk
date:        2012-11-09 23:48:29
summary:     Merged in samskillman/yt (pull request #332)
affected #:  1 file

diff -r 5f6163709ed86827de0b2db599113dba8f1b1e96 -r 4d55b13a56ab0a70c0c5922f4e392e1edb9546ea yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -61,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False)
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -78,40 +81,44 @@
             self.result_storage = defaultdict(dict)
         if options.compare_name == "SKIP":
             options.compare_name = None
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results == 'True':
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
+                self.storage = \
+                    AnswerTestLocalStorage("%s/%s" % \
+                        (os.path.realpath(options.output_dir), options.compare_name), not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
+        self.storage_compare_name = options.compare_name
+        self.output_dir = options.output_dir
         self.answer_name = options.this_name
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        pass
+    def get(self, pf_name, default=None):
+        pass
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -126,6 +133,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -164,15 +212,16 @@
 
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
     prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
+        if self.reference_storage is not None and self.reference_storage.read:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer()
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list