[yt-svn] commit/yt-3.0: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Sep 9 13:21:35 PDT 2013


5 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/6cdf51a81849/
Changeset:   6cdf51a81849
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 20:56:47
Summary:     Initial creation of ChunkDataCache object.
Affected #:  1 file

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -607,3 +607,31 @@
             cdt[ind:ind+gdt.size] = gdt
             ind += gt.size
         return cdt
+
+class ChunkDataCache(object):
+    def __init__(self, base_iter, preload_fields, geometry_handler,
+                 max_length = 256):
+        # At some point, max_length should instead become a heuristic function,
+        # potentially looking at estimated memory usage.  Note that this never
+        # initializes the iterator; it assumes the iterator is already created,
+        # and it calls next() on it.
+        self.base_iter = base_iter
+        self.queue = []
+        self.max_length = max_length
+        self.preload_fields = preload_fields
+        self.geometry_handler = geometry_handler
+
+    def __iter__(self):
+        return self
+    
+    def next(self):
+        if len(self.queue) == 0:
+            for i in range(self.max_length):
+                try:
+                    self.queue.append(self.base_iter.next())
+                except StopIteration:
+                    break
+        if len(self.queue) == 0:
+            # If it's still zero ...
+            raise StopIteration
+        return self.queue.pop(0)


https://bitbucket.org/yt_analysis/yt-3.0/commits/21835af6a5dd/
Changeset:   21835af6a5dd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 22:54:43
Summary:     Implementing caching that works for Enzo.
Affected #:  6 files

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -47,7 +47,7 @@
     ParameterFileStore
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
-    NeedsGridType
+    NeedsGridType, ValidateSpatial
 import yt.geometry.selection_routines
 
 def force_array(item, shape):
@@ -92,6 +92,7 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _field_cache = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -192,13 +193,17 @@
         Returns a single field.  Will add if necessary.
         """
         f = self._determine_fields(key)[0]
-        if f not in self.field_data:
+        if f not in self.field_data and key not in self.field_data:
             if f in self._container_fields:
                 self.field_data[f] = self._generate_container_field(f)
                 return self.field_data[f]
             else:
                 self.get_data(f)
-        return self.field_data[f]
+        # Note that this is less succinct so that we can account for the case
+        # when there are, for example, no elements in the object.
+        rv = self.field_data.get(f, None)
+        if rv is None: rv = self.field_data[key]
+        return rv
 
     def __setitem__(self, key, val):
         """
@@ -249,10 +254,14 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
+            deps = self._identify_dependencies([field], spatial = True)
+            deps = self._determine_fields(deps)
             for io_chunk in self.chunks([], "io", cache = False):
-                for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
-                    ind += self._current_chunk.objs[0].select(
-                            self.selector, self[field], rv, ind)
+                for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0,
+                                                    preload_fields = deps)):
+                    o = self._current_chunk.objs[0]
+                    with o._activate_cache():
+                        ind += o.select(self.selector, self[field], rv, ind)
         else:
             chunks = self.hierarchy._chunk(self, "spatial", ngz = ngz)
             for i, chunk in enumerate(chunks):
@@ -454,12 +463,18 @@
                 # NOTE: we yield before releasing the context
                 yield self
 
-    def _identify_dependencies(self, fields_to_get):
+    def _identify_dependencies(self, fields_to_get, spatial = False):
         inspected = 0
         fields_to_get = fields_to_get[:]
         for field in itertools.cycle(fields_to_get):
             if inspected >= len(fields_to_get): break
             inspected += 1
+            fi = self.pf._get_field_info(*field)
+            if not spatial and any(
+                    isinstance(v, ValidateSpatial) for v in fi.validators):
+                # We don't want to pre-fetch anything that's spatial, as that
+                # will be done later.
+                continue
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
             if fd is None: continue
@@ -570,6 +585,25 @@
         self._current_chunk = old_chunk
         self._locked = old_locked
 
+    @contextmanager
+    def _activate_cache(self):
+        cache = self._field_cache or {}
+        old_fields = {}
+        for field in (f for f in cache if f in self.field_data):
+            old_fields[field] = self.field_data[field]
+        self.field_data.update(cache)
+        yield
+        for field in cache:
+            self.field_data.pop(field)
+            if field in old_fields:
+                self.field_data[field] = old_fields.pop(field)
+        self._field_cache = None
+
+    def _initialize_cache(self, cache):
+        # Wipe out what came before
+        self._field_cache = {}
+        self._field_cache.update(cache)
+
     @property
     def icoords(self):
         if self._current_chunk is None:

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -201,6 +201,7 @@
 
     _strip_path = False
     grid = EnzoGrid
+    _preload_implemented = True
 
     def __init__(self, pf, data_style):
         

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -196,6 +196,8 @@
             elif g.filename is None:
                 continue
             grids_by_file[g.filename].append(g.id)
+        #if len(chunk.objs) == 1 and len(grids_by_file) > 0:
+        #    raise RuntimeError
         sets = [fname for ftype, fname in fields]
         for filename in grids_by_file:
             nodes = grids_by_file[filename]

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -615,11 +615,12 @@
         # potentially looking at estimated memory usage.  Note that this never
         # initializes the iterator; it assumes the iterator is already created,
         # and it calls next() on it.
-        self.base_iter = base_iter
+        self.base_iter = base_iter.__iter__()
         self.queue = []
         self.max_length = max_length
         self.preload_fields = preload_fields
         self.geometry_handler = geometry_handler
+        self.cache = {}
 
     def __iter__(self):
         return self
@@ -631,7 +632,11 @@
                     self.queue.append(self.base_iter.next())
                 except StopIteration:
                     break
-        if len(self.queue) == 0:
             # If it's still zero ...
-            raise StopIteration
-        return self.queue.pop(0)
+            if len(self.queue) == 0: raise StopIteration
+            chunk = YTDataChunk(None, "cache", self.queue, cache=False)
+            self.cache = self.geometry_handler.io._read_chunk_data(
+                chunk, self.preload_fields)
+        g = self.queue.pop(0)
+        g._initialize_cache(self.cache.pop(g.id, {}))
+        return g

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -35,7 +35,8 @@
 from yt.arraytypes import blankRecordArray
 from yt.config import ytcfg
 from yt.data_objects.field_info_container import NullFunc
-from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
+from yt.geometry.geometry_handler import \
+    GeometryHandler, YTDataChunk, ChunkDataCache
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.physical_constants import sec_per_year
 from yt.utilities.io_handler import io_registry
@@ -47,6 +48,7 @@
 
 class GridGeometryHandler(GeometryHandler):
     float_type = 'float64'
+    _preload_implemented = False
 
     def _setup_geometry(self):
         mylog.debug("Counting grids.")
@@ -256,7 +258,7 @@
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         if sort in ("+level", "level"):
             giter = sorted(gobjs, key = g.Level)
@@ -264,7 +266,9 @@
             giter = sorted(gobjs, key = -g.Level)
         elif sort is None:
             giter = gobjs
-        for i,og in enumerate(giter):
+        if self._preload_implemented and preload_fields is not None and ngz == 0:
+            giter = ChunkDataCache(list(giter), preload_fields, self)
+        for i, og in enumerate(giter):
             if ngz > 0:
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
@@ -284,3 +288,4 @@
             gs = gfiles[fn]
             yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
                               cache = cache)
+

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -118,6 +118,9 @@
     def _read_exception(self):
         return None
 
+    def _read_chunk_data(self, chunk, fields):
+        return None
+
 class IOHandlerExtracted(BaseIOHandler):
 
     _data_style = 'extracted'


https://bitbucket.org/yt_analysis/yt-3.0/commits/b311ba8d5d5d/
Changeset:   b311ba8d5d5d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:52:41
Summary:     Merging from the frontends test
Affected #:  101 files

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -832,8 +832,8 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o &>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
@@ -844,7 +844,7 @@
 	    echo "Building LAPACK"
 	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
@@ -877,6 +877,11 @@
 mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
 echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
 echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
 do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
@@ -943,10 +948,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only
@@ -2215,11 +2216,11 @@
                 self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
+            self._data_source = self.hierarchy.region([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+            self._data_source = self.hierarchy.region([0.5] * 3,
                 LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case where the full box is what we want.
@@ -2305,8 +2306,7 @@
                 np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
-                ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -2503,7 +2503,7 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
         else:
             self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
@@ -2599,7 +2599,7 @@
             linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+            self._data_source = pf.h.region([0.] * 3, ds_LE,
                 ds_RE)
         else:
             self._data_source = pf.h.all_data()

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -54,8 +54,8 @@
 import glob
 
 from yt.funcs import *
-from yt.utilities.pykdtree import KDTree
-import yt.utilities.pydot as pydot
+from yt.extern.pykdtree import KDTree
+import yt.extern.pydot as pydot
 
 # We don't currently use this, but we may again find a use for it in the
 # future.

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -36,7 +36,7 @@
     HaloProfiler
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
-import yt.utilities.pydot as pydot
+import yt.extern.pydot as pydot
 from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -811,10 +811,10 @@
                     need_per = True
                     break
 
-            if need_per:
-                region = self.pf.h.periodic_region(halo['center'], leftEdge, rightEdge)
-            else:
-                region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
+            # We use the same type of region regardless.  The selection will be
+            # correct, but we need the need_per variable for projection
+            # shifting.
+            region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
 
             # Make projections.
             if not isinstance(axes, types.ListType): axes = list([axes])
@@ -1254,7 +1254,7 @@
                 mylog.error("Output directory exists, but is not a directory: %s." % my_output_dir)
                 raise IOError(my_output_dir)
         else:
-            os.mkdir(my_output_dir)
+            os.makedirs(my_output_dir)
 
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -159,8 +159,7 @@
             # This ds business below has to do with changes made for halo
             # finding on subvolumes and serves no purpose here except
             # compatibility. This is not the best policy, if I'm honest.
-            ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
-                self.right_edge)
+            ds = pf.h.region([0.]*3, self.left_edge, self.right_edge)
             padded, self.LE, self.RE, self.ds = \
             self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold310',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -58,8 +58,19 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
-            else False for arg in args]
+    valid_file = []
+    for argno, arg in enumerate(args):
+        if isinstance(arg, types.StringTypes):
+            if os.path.exists(arg):
+                valid_file.append(True)
+            else:
+                if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
+                    valid_file.append(True)
+                    args[argno] = os.path.join(ytcfg.get("yt", "test_data_dir"), arg)
+                else:
+                    valid_file.append(False)
+        else:
+            valid_file.append(False)
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import TimeSeriesData

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,12 +36,12 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
@@ -49,7 +49,8 @@
     _domain_offset = 0
     _num_octs = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -126,6 +127,7 @@
 
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
+        if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
@@ -145,6 +147,29 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        if fields is None: fields = []
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset, self.pf.periodicity)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
@@ -206,8 +231,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -122,16 +122,6 @@
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
-class ParticleIOHandlerRegionStrict(ParticleIOHandlerRegion):
-    _source_type = "region_strict"
-
-class ParticleIOHandlerPeriodicRegion(ParticleIOHandlerRegion):
-    periodic = True
-    _source_type = "periodic_region"
-
-class ParticleIOHandlerPeriodicRegionStrict(ParticleIOHandlerPeriodicRegion):
-    _source_type = "periodic_region_strict"
-
 class ParticleIOHandlerSphere(ParticleIOHandlerImplemented):
     _source_type = "sphere"
 

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_cutting_plane():
     for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
         yield assert_equal, cut["Ones"].min(), 1.0
         yield assert_equal, cut["Ones"].max(), 1.0
         pw = cut.to_pw()
-        fns += pw.save()
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
             yield assert_equal, frb[cut_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
 import numpy as np
 import os
 import tempfile
 import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+    assert_equal
+
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
+    ytcfg["yt", "__withintesting"] = "True"
+    np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+    im = np.zeros([64, 128, nlayers])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+    return im
+
 
 def test_rgba_rescale():
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-    im_arr = ImageArray(im)
+    im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
-    yield assert_equal, im_arr[:,:,3].max(), 3*10.
-    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
-    yield assert_equal, new_im[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, new_im[:, :, 3].max(), 1.0
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:, :, 3].max(), 1.0
 
-def test_image_array_hdf5():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+    tmpdir = None
+    curdir = None
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+    def test_image_array_hdf5(self):
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+        im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+        im_arr.save('test_3d_ImageArray')
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
+        im = np.zeros([64, 128])
+        for i in xrange(im.shape[0]):
+            im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-def test_image_array_rgb_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+        im_arr = ImageArray(im, info=myinfo)
+        im_arr.save('test_2d_ImageArray')
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    def test_image_array_rgb_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 3))
+        im_arr.write_png('standard.png')
 
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
+    def test_image_array_rgba_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.write_png('standard.png')
+        im_arr.write_png('non-scaled.png', rescale=False)
+        im_arr.write_png('black_bg.png', background='black')
+        im_arr.write_png('white_bg.png', background='white')
+        im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+        im_arr.write_png('transparent_bg.png', background=None)
 
-def test_image_array_rgba_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    def test_image_array_background(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.rescale()
+        new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+        new_im.write_png('red_bg.png')
+        im_arr.add_background_color('black')
+        im_arr.write_png('black_bg2.png')
 
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
-    im_arr.write_png('non-scaled.png', rescale=False)
-    im_arr.write_png('black_bg.png', background='black')
-    im_arr.write_png('white_bg.png', background='white')
-    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
-    im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
-
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.rescale()
-    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
-    new_im.write_png('red_bg.png')
-    im_arr.add_background_color('black')
-    im_arr.write_png('black_bg2.png')
- 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+    def tearDown(self):
+        os.chdir(self.curdir)
+        # clean up
+        shutil.rmtree(self.tmpdir)

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_projection():
     for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
                 pw = proj.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
 """
 import os
 import numpy as np
+import tempfile
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 
 def test_slice():
@@ -72,7 +76,9 @@
                 yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
                 yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -347,6 +347,7 @@
         # Figure out the starting and stopping times and redshift.
         self._calculate_simulation_bounds()
         # Get all possible datasets.
+        self.all_time_outputs = []
         self._get_all_outputs(find_outputs=find_outputs)
         
         self.print_key_parameters()

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/9833389da475/
Changeset:   9833389da475
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 21:14:15
Summary:     Adding preload_fields to _chunk_spatial definitions.
Affected #:  4 files

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -171,7 +171,7 @@
         # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for i,og in enumerate(sobjs):
             if ngz > 0:

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -379,7 +379,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz):
+    def _chunk_spatial(self, dobj, ngz, preload_fields = None):
         if ngz > 0:
             raise NotImplementedError
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -344,7 +344,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for i,og in enumerate(sobjs):
             if ngz > 0:

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -158,7 +158,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We actually do not really use the data files except as input to the
         # ParticleOctreeSubset.


https://bitbucket.org/yt_analysis/yt-3.0/commits/e2a577d25a69/
Changeset:   e2a577d25a69
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 22:12:24
Summary:     Merging a few updates for patch IO.
Affected #:  4 files

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -718,6 +718,7 @@
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
+        cdef int j, oct_ind, level
         cdef np.int64_t sfc
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -275,6 +275,7 @@
     def __init__(self, filename, data_style="OWLS", n_ref = 64,
                  over_refine_factor = 1):
         self.storage_filename = None
+        filename = os.path.abspath(filename)
         super(OWLSStaticOutput, self).__init__(
                                filename, data_style,
                                unit_base = None, n_ref = n_ref,

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -394,7 +394,7 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        err_msg = "Field values for %s not equal." % self.field
+        err_msg = "Field values for %s not equal." % (self.field,)
         if self.decimals is None:
             assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1112,16 +1112,18 @@
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
+            if weight is not None:
+                weight = weight.replace(' ', '_')
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if isinstance(k, types.TupleType):
                 k = k[1]
             if axis:
-                n = "%s_%s_%s_%s" % (name, type, axis, k)
+                n = "%s_%s_%s_%s" % (name, type, axis, k.replace(' ', '_'))
             else:
                 # for cutting planes
-                n = "%s_%s_%s" % (name, type, k)
+                n = "%s_%s_%s" % (name, type, k.replace(' ', '_'))
             if weight:
                 if isinstance(weight, tuple):
                     weight = weight[1]
@@ -1848,8 +1850,17 @@
 class WindowPlotMPL(ImagePlotMPL):
     def __init__(
             self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
-                figure, axes, cax):
-        fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
+            figure, axes, cax):
+        self._draw_colorbar = True
+        self._draw_axes = True
+        self._cache_layout(size, fontsize)
+
+        # Make room for a colorbar
+        self.input_size = size
+        self.fsize = [size[0] + self._cbar_inches[self._draw_colorbar], size[1]]
+
+        # Compute layout
+        axrect, caxrect = self._get_best_layout(fontsize)
         if np.any(np.array(axrect) < 0):
             msg = 'The axis ratio of the requested plot is very narrow. ' \
                   'There is a good chance the plot will not look very good, ' \
@@ -1859,7 +1870,7 @@
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(
-            self, fsize, axrect, caxrect, zlim, figure, axes, cax)
+            self, self.fsize, axrect, caxrect, zlim, figure, axes, cax)
         self._init_image(data, cbname, cmap, extent, aspect)
         self.image.axes.ticklabel_format(scilimits=(-2,3))
         if cbname == 'linear':
@@ -1867,31 +1878,74 @@
             self.cb.formatter.set_powerlimits((-2,3))
             self.cb.update_ticks()
 
-    def _get_best_layout(self, size, fontsize=18):
-        aspect = 1.0*size[0]/size[1]
-        fontscale = fontsize / 18.0
+    def _toggle_axes(self, choice):
+        self._draw_axes = choice
+        self.axes.get_xaxis().set_visible(choice)
+        self.axes.get_yaxis().set_visible(choice)
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
 
-        # add room for a colorbar
-        cbar_inches = fontscale*0.7
-        newsize = [size[0] + cbar_inches, size[1]]
+    def _toggle_colorbar(self, choice):
+        self._draw_colorbar = choice
+        self.cax.set_visible(choice)
+        self.fsize = [self.input_size[0] + self._cbar_inches[choice], self.input_size[1]]
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
+
+    def hide_axes(self):
+        self._toggle_axes(False)
+        return self
+
+    def show_axes(self):
+        self._toggle_axes(True)
+        return self
+
+    def hide_colorbar(self):
+        self._toggle_colorbar(False)
+        return self
+
+    def show_colorbar(self):
+        self._toggle_colorbar(True)
+        return self
+
+    def _cache_layout(self, size, fontsize):
+        self._cbar_inches = {}
+        self._text_buffx = {}
+        self._text_bottomy = {}
+        self._text_topy = {}
+
+        self._aspect = 1.0*size[0]/size[1]
+        self._fontscale = fontsize / 18.0
+
+        # Leave room for a colorbar, if we are drawing it.
+        self._cbar_inches[True] = self._fontscale*0.7
+        self._cbar_inches[False] = 0
 
         # add buffers for text, and a bit of whitespace on top
-        text_buffx = fontscale * 1.0/(newsize[0])
-        text_bottomy = fontscale * 0.7/size[1]
-        text_topy = fontscale * 0.3/size[1]
+        self._text_buffx[True] = self._fontscale * 1.0/(size[0] + self._cbar_inches[True])
+        self._text_bottomy[True] = self._fontscale * 0.7/size[1]
+        self._text_topy[True] = self._fontscale * 0.3/size[1]
 
+        # No buffer for text if we're not drawing axes
+        self._text_buffx[False] = 0
+        self._text_bottomy[False] = 0
+        self._text_topy[False] = 0
+
+    def _get_best_layout(self, fontsize=18):
         # calculate how much room the colorbar takes
-        cbar_frac = cbar_inches/newsize[0]
+        cbar_frac = self._cbar_inches[self._draw_colorbar]/self.fsize[0]
 
         # Calculate y fraction, then use to make x fraction.
-        yfrac = 1.0-text_bottomy-text_topy
-        ysize = yfrac*size[1]
-        xsize = aspect*ysize
-        xfrac = xsize/newsize[0]
+        yfrac = 1.0-self._text_bottomy[self._draw_axes]-self._text_topy[self._draw_axes]
+        ysize = yfrac*self.fsize[1]
+        xsize = self._aspect*ysize
+        xfrac = xsize/self.fsize[0]
 
         # Now make sure it all fits!
-        xbig = xfrac + text_buffx + 2.0*cbar_frac
-        ybig = yfrac + text_bottomy + text_topy
+        xbig = xfrac + self._text_buffx[self._draw_axes] + 2.0*cbar_frac
+        ybig = yfrac + self._text_bottomy[self._draw_axes] + self._text_topy[self._draw_axes]
 
         if xbig > 1:
             xsize /= xbig
@@ -1899,9 +1953,20 @@
         if ybig > 1:
             xsize /= ybig
             ysize /= ybig
-        xfrac = xsize/newsize[0]
-        yfrac = ysize/newsize[1]
+        xfrac = xsize/self.fsize[0]
+        yfrac = ysize/self.fsize[1]
 
-        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
-        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
-        return newsize, axrect, caxrect
+        axrect = (
+            self._text_buffx[self._draw_axes],
+            self._text_bottomy[self._draw_axes],
+            xfrac,
+            yfrac
+        )
+
+        caxrect = (
+            self._text_buffx[self._draw_axes]+xfrac,
+            self._text_bottomy[self._draw_axes],
+            cbar_frac/4.,
+            yfrac
+        )
+        return axrect, caxrect

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list