[yt-svn] commit/yt: 28 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 16 09:28:36 PDT 2015


28 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2ed5df6ba44b/
Changeset:   2ed5df6ba44b
Branch:      yt
User:        jzuhone
Date:        2015-07-13 21:03:56+00:00
Summary:     Python 3 bugfix
Affected #:  1 file

diff -r c014ad925d83b6f1a7d2124cd941861a87e20d19 -r 2ed5df6ba44bb080034d0377a650ae3c0507de0a yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -535,7 +535,7 @@
             self.profiles[i] = \
                 create_profile(p.data_source, p.x_field,
                                n_bins=len(p.x_bins)-1,
-                               fields=p.field_map.values(),
+                               fields=list(p.field_map.values()),
                                weight_field=p.weight_field,
                                accumulation=p.accumulation,
                                fractional=p.fractional,
@@ -1155,7 +1155,7 @@
         self.profile = create_profile(
             p.data_source,
             [p.x_field, p.y_field],
-            p.field_map.values(),
+            list(p.field_map.values()),
             n_bins=[len(p.x_bins)-1, len(p.y_bins)-1],
             weight_field=p.weight_field,
             units=units,
@@ -1210,7 +1210,7 @@
         self.profile = create_profile(
             p.data_source,
             [p.x_field, p.y_field],
-            p.field_map.values(),
+            list(p.field_map.values()),
             n_bins=[len(p.x_bins)-1, len(p.y_bins)-1],
             weight_field=p.weight_field,
             units=units,


https://bitbucket.org/yt_analysis/yt/commits/dcf373241152/
Changeset:   dcf373241152
Branch:      yt
User:        jzuhone
Date:        2015-07-14 13:45:59+00:00
Summary:     Unified 2/3 codebase for colormaps
Affected #:  1 file

diff -r 2ed5df6ba44bb080034d0377a650ae3c0507de0a -r dcf37324115296430a818e9bd4b8de2b4ffbc2ae yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -11,7 +11,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import numpy as np
-from yt.extern.six.moves import zip as izip
 
 import matplotlib
 import matplotlib.colors as cc
@@ -86,9 +85,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
-         'green':zip(_vs,_kamae_grn,_kamae_grn),
-         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':np.transpose([_vs,_kamae_red,_kamae_red]),
+         'green':np.transpose([_vs,_kamae_grn,_kamae_grn]),
+         'blue':np.transpose([_vs,_kamae_blu,_kamae_blu])}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -151,9 +150,9 @@
 _vs = np.linspace(0,1,256)
 for k,v in list(_cm.color_map_luts.items()):
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': zip(_vs,v[0],v[0]),
-                  'green': zip(_vs,v[1],v[1]),
-                  'blue': zip(_vs,v[2],v[2]) }
+        cdict = { 'red': np.transpose([_vs,v[0],v[0]]),
+                  'green': np.transpose([_vs,v[1],v[1]]),
+                  'blue': np.transpose([_vs,v[2],v[2]]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):
@@ -393,9 +392,9 @@
     #   Second number is the (0..1) number to interpolate to when coming *from below*
     #   Third number is the (0..1) number to interpolate to when coming *from above*
     _vs = np.linspace(0,1,256)
-    cdict = {'red':   zip(_vs, cmap[:,0], cmap[:,0]),
-             'green': zip(_vs, cmap[:,1], cmap[:,1]),
-             'blue':  zip(_vs, cmap[:,2], cmap[:,2])}
+    cdict = {'red':   np.transpose([_vs, cmap[:,0], cmap[:,0]]),
+             'green': np.transpose([_vs, cmap[:,1], cmap[:,1]]),
+             'blue':  np.transpose([_vs, cmap[:,2], cmap[:,2]])}
 
     if name is not None:
         add_cmap(name, cdict)


https://bitbucket.org/yt_analysis/yt/commits/111d46ff2faa/
Changeset:   111d46ff2faa
Branch:      yt
User:        jzuhone
Date:        2015-07-14 13:58:46+00:00
Summary:     If we re-create the profile, we need to make sure we do it with the current log settings
Affected #:  1 file

diff -r dcf37324115296430a818e9bd4b8de2b4ffbc2ae -r 111d46ff2faa66de17edf22a0ce4b14707806515 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -530,6 +530,7 @@
                 xma = xmax
             extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
             units = {p.x_field: str(p.x.units)}
+            logs = {p.x_field: self.x_log}
             for field in p.field_map.values():
                 units[field] = str(p.field_data[field].units)
             self.profiles[i] = \
@@ -539,6 +540,7 @@
                                weight_field=p.weight_field,
                                accumulation=p.accumulation,
                                fractional=p.fractional,
+                               logs=logs,
                                extrema=extrema, units=units)
         return self
 
@@ -1146,6 +1148,8 @@
         extrema = {p.x_field: ((xmin, str(p.x.units)), (xmax, str(p.x.units))),
                    p.y_field: ((p.y_bins.min(), str(p.y.units)),
                                (p.y_bins.max(), str(p.y.units)))}
+        logs = {p.x_field: self.x_log,
+                p.y_field: self.y_log}
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1160,6 +1164,7 @@
             weight_field=p.weight_field,
             units=units,
             extrema=extrema,
+            logs=logs,
             **additional_kwargs)
         for field in zunits:
             self.profile.set_field_unit(field, zunits[field])
@@ -1201,6 +1206,8 @@
         extrema = {p.x_field: ((p.x_bins.min(), str(p.x.units)),
                                (p.x_bins.max(), str(p.x.units))),
                    p.y_field: ((ymin, str(p.y.units)), (ymax, str(p.y.units)))}
+        logs = {p.x_field: self.x_log,
+                p.y_field: self.y_log}
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1215,6 +1222,7 @@
             weight_field=p.weight_field,
             units=units,
             extrema=extrema,
+            logs=logs,
             **additional_kwargs)
         for field in zunits:
             self.profile.set_field_unit(field, zunits[field])


https://bitbucket.org/yt_analysis/yt/commits/9bf5604e45e3/
Changeset:   9bf5604e45e3
Branch:      yt
User:        jzuhone
Date:        2015-07-14 15:44:10+00:00
Summary:     Decode key for Python 3
Affected #:  1 file

diff -r 111d46ff2faa66de17edf22a0ce4b14707806515 -r 9bf5604e45e38f48e9840aa0ce436b0a5a9db25f yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -76,7 +76,7 @@
         for key, val in self._handle.attrs.items():
             if key.startswith('component_'):
                 comp_number = int(re.match('component_(\d+)', key).groups()[0])
-                field_dict[val] = comp_number
+                field_dict[val.decode('utf-8')] = comp_number
         self._field_dict = field_dict
         return self._field_dict
 


https://bitbucket.org/yt_analysis/yt/commits/92d1e3dd94ee/
Changeset:   92d1e3dd94ee
Branch:      yt
User:        jzuhone
Date:        2015-07-14 16:16:26+00:00
Summary:     Fix Ramses for Python 3
Affected #:  1 file

diff -r 9bf5604e45e38f48e9840aa0ce436b0a5a9db25f -r 92d1e3dd94ee3264fcb0a90d713258cf87098a58 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -20,7 +20,10 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.fortran_utils as fpu
-from yt.extern.six.moves import cStringIO
+try:
+    from cStringIO import StringIO as IO
+except ImportError:
+    from io import BytesIO as IO
 
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
@@ -37,7 +40,7 @@
                 f = open(subset.domain.hydro_fn, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                content = cStringIO(f.read())
+                content = IO(f.read())
                 rv = subset.fill(content, fields, selector)
                 for ft, f in fields:
                     d = rv.pop(f)


https://bitbucket.org/yt_analysis/yt/commits/79e0f1a2c7c2/
Changeset:   79e0f1a2c7c2
Branch:      yt
User:        xarthisius
Date:        2015-07-13 20:23:25+00:00
Summary:     [owls] sort ParticleFiles by filename, fixes py3 issue
Affected #:  1 file

diff -r c014ad925d83b6f1a7d2124cd941861a87e20d19 -r 79e0f1a2c7c21367179eb8c6a781b391e0de164e yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -70,7 +70,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -88,7 +88,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:


https://bitbucket.org/yt_analysis/yt/commits/0c147ea5ec53/
Changeset:   0c147ea5ec53
Branch:      yt
User:        xarthisius
Date:        2015-07-14 13:21:31+00:00
Summary:     [py3] use cmp_to_key for custom sort()
Affected #:  1 file

diff -r 79e0f1a2c7c21367179eb8c6a781b391e0de164e -r 0c147ea5ec5358116a4dcc05c302a8bbe863d98f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -22,6 +22,7 @@
 import glob
 import os
 import os.path as path
+from functools import cmp_to_key
 from collections import defaultdict
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import zip as izip
@@ -39,7 +40,7 @@
     TINY
 from yt.utilities.physical_ratios import \
      rho_crit_g_cm3_h2
-    
+
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
 
@@ -138,9 +139,9 @@
         c[2] = self["particle_position_z"] - self.ds.domain_left_edge[2]
         com = []
         for i in range(3):
-            # A halo is likely periodic around a boundary if the distance 
+            # A halo is likely periodic around a boundary if the distance
             # between the max and min particle
-            # positions are larger than half the box. 
+            # positions are larger than half the box.
             # So skip the rest if the converse is true.
             # Note we might make a change here when periodicity-handling is
             # fully implemented.
@@ -444,7 +445,7 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1, 
+        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1,
                                               dtype='float64'),'Msun')
         dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
@@ -475,7 +476,7 @@
         self.overdensity = self.mass_bins * Msun2g / \
             (4./3. * math.pi * rho_crit * \
             (self.radial_bins )**3.0)
-        
+
     def _get_ellipsoid_parameters_basic(self):
         np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
@@ -501,7 +502,7 @@
         for axis in range(np.size(DW)):
             cases = np.array([position[axis],
                                 position[axis] + DW[axis],
-                              position[axis] - DW[axis]])        
+                              position[axis] - DW[axis]])
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
         # find the furthest particle's index
@@ -571,7 +572,7 @@
     _name = "RockstarHalo"
     # See particle_mask
     _radjust = 4.
-    
+
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -635,11 +636,11 @@
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
         the particles that constitute the halo.
-        
+
         Parameters
         ----------
         None
-        
+
         Returns
         -------
         tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
@@ -650,7 +651,7 @@
               #. mag_C as a float.
               #. e0_vector as an array.
               #. tilt as a float.
-        
+
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
@@ -662,22 +663,22 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
         r"""Returns an ellipsoidal data object.
-        
+
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -686,7 +687,7 @@
         ell = self.data.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3],
             ep[4], ep[5])
         return ell
-    
+
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -763,14 +764,14 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                #this is an index for turning data sorted by particle index 
+                #this is an index for turning data sorted by particle index
                 #into the same order as the fields on disk
                 self._pid_sort = field_data.argsort().argsort()
             #convert to YTArray using the data from disk
             if key == 'particle_mass':
                 field_data = self.ds.arr(field_data, 'Msun')
             else:
-                field_data = self.ds.arr(field_data, 
+                field_data = self.ds.arr(field_data,
                     self.ds._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
@@ -856,21 +857,21 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
-        r"""Returns an ellipsoidal data object.        
+        r"""Returns an ellipsoidal data object.
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -947,11 +948,11 @@
     def maximum_density(self):
         r"""Undefined for text halos."""
         return -1
-    
+
     def maximum_density_location(self):
         r"""Undefined, default to CoM"""
         return self.center_of_mass()
-    
+
     def get_size(self):
         # Have to just get it from the sphere.
         return self["particle_position_x"].size
@@ -964,8 +965,8 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, 
-        otherwise on all particles.  Returns an iterable collection of 
+        *dm_only* is True (default), only run it on the dark matter particles,
+        otherwise on all particles.  Returns an iterable collection of
         *HopGroup* items.
         """
         self._data_source = data_source
@@ -1051,7 +1052,7 @@
         ellipsoid_data : bool.
             Whether to print the ellipsoidal information to the file.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1144,10 +1145,10 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-        ('vmax_r', np.float32), 
+        ('vmax_r', np.float32),
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
-        ('klypin_rs', np.float32), 
+        ('klypin_rs', np.float32),
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
         ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
@@ -1221,9 +1222,9 @@
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
-        
+
         ds = self.ds
-        # In order to read the binary data, we need to figure out which 
+        # In order to read the binary data, we need to figure out which
         # binary files belong to this output.
         basedir = os.path.dirname(self.out_list)
         s = self.out_list.split('_')[-1]
@@ -1523,12 +1524,14 @@
                 id += 1
 
         def haloCmp(h1, h2):
+            def cmp(a, b):
+                return (a > b) - (a < b)
             c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
                 return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
-        self._groups.sort(haloCmp)
+        self._groups.sort(key=cmp_to_key(haloCmp))
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
             if halo.id in self._max_dens:
@@ -1873,7 +1876,7 @@
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
     r"""Load a text file of halos.
-    
+
     Like LoadHaloes, but when all that is available is a plain
     text file. This assumes the text file has the 3-positions of halos
     along with a radius. The halo objects created are spheres.
@@ -1882,7 +1885,7 @@
     ----------
     fname : String
         The name of the text file to read in.
-    
+
     columns : dict
         A dict listing the column name : column number pairs for data
         in the text file. It is zero-based (like Python).
@@ -1890,7 +1893,7 @@
         Any column name outside of ['x', 'y', 'z', 'r'] will be attached
         to each halo object in the supplementary dict 'supp'. See
         example.
-    
+
     comment : String
         If the first character of a line is equal to this, the line is
         skipped. Default = "#".
@@ -1915,7 +1918,7 @@
     Parameters
     ----------
     fname : String
-        The name of the Rockstar file to read in. Default = 
+        The name of the Rockstar file to read in. Default =
         "rockstar_halos/out_0.list'.
 
     Examples


https://bitbucket.org/yt_analysis/yt/commits/3f8f108118f9/
Changeset:   3f8f108118f9
Branch:      yt
User:        jzuhone
Date:        2015-07-14 18:09:29+00:00
Summary:     Merge
Affected #:  2 files

diff -r 92d1e3dd94ee3264fcb0a90d713258cf87098a58 -r 3f8f108118f90a45492abb63786e59662c7deb71 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -22,6 +22,7 @@
 import glob
 import os
 import os.path as path
+from functools import cmp_to_key
 from collections import defaultdict
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import zip as izip
@@ -39,7 +40,7 @@
     TINY
 from yt.utilities.physical_ratios import \
      rho_crit_g_cm3_h2
-    
+
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
 
@@ -138,9 +139,9 @@
         c[2] = self["particle_position_z"] - self.ds.domain_left_edge[2]
         com = []
         for i in range(3):
-            # A halo is likely periodic around a boundary if the distance 
+            # A halo is likely periodic around a boundary if the distance
             # between the max and min particle
-            # positions are larger than half the box. 
+            # positions are larger than half the box.
             # So skip the rest if the converse is true.
             # Note we might make a change here when periodicity-handling is
             # fully implemented.
@@ -444,7 +445,7 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1, 
+        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1,
                                               dtype='float64'),'Msun')
         dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
@@ -475,7 +476,7 @@
         self.overdensity = self.mass_bins * Msun2g / \
             (4./3. * math.pi * rho_crit * \
             (self.radial_bins )**3.0)
-        
+
     def _get_ellipsoid_parameters_basic(self):
         np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
@@ -501,7 +502,7 @@
         for axis in range(np.size(DW)):
             cases = np.array([position[axis],
                                 position[axis] + DW[axis],
-                              position[axis] - DW[axis]])        
+                              position[axis] - DW[axis]])
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
         # find the furthest particle's index
@@ -571,7 +572,7 @@
     _name = "RockstarHalo"
     # See particle_mask
     _radjust = 4.
-    
+
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -635,11 +636,11 @@
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
         the particles that constitute the halo.
-        
+
         Parameters
         ----------
         None
-        
+
         Returns
         -------
         tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
@@ -650,7 +651,7 @@
               #. mag_C as a float.
               #. e0_vector as an array.
               #. tilt as a float.
-        
+
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
@@ -662,22 +663,22 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
         r"""Returns an ellipsoidal data object.
-        
+
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -686,7 +687,7 @@
         ell = self.data.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3],
             ep[4], ep[5])
         return ell
-    
+
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -763,14 +764,14 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                #this is an index for turning data sorted by particle index 
+                #this is an index for turning data sorted by particle index
                 #into the same order as the fields on disk
                 self._pid_sort = field_data.argsort().argsort()
             #convert to YTArray using the data from disk
             if key == 'particle_mass':
                 field_data = self.ds.arr(field_data, 'Msun')
             else:
-                field_data = self.ds.arr(field_data, 
+                field_data = self.ds.arr(field_data,
                     self.ds._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
@@ -856,21 +857,21 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
-        r"""Returns an ellipsoidal data object.        
+        r"""Returns an ellipsoidal data object.
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -947,11 +948,11 @@
     def maximum_density(self):
         r"""Undefined for text halos."""
         return -1
-    
+
     def maximum_density_location(self):
         r"""Undefined, default to CoM"""
         return self.center_of_mass()
-    
+
     def get_size(self):
         # Have to just get it from the sphere.
         return self["particle_position_x"].size
@@ -964,8 +965,8 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, 
-        otherwise on all particles.  Returns an iterable collection of 
+        *dm_only* is True (default), only run it on the dark matter particles,
+        otherwise on all particles.  Returns an iterable collection of
         *HopGroup* items.
         """
         self._data_source = data_source
@@ -1051,7 +1052,7 @@
         ellipsoid_data : bool.
             Whether to print the ellipsoidal information to the file.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1144,10 +1145,10 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-        ('vmax_r', np.float32), 
+        ('vmax_r', np.float32),
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
-        ('klypin_rs', np.float32), 
+        ('klypin_rs', np.float32),
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
         ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
@@ -1221,9 +1222,9 @@
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
-        
+
         ds = self.ds
-        # In order to read the binary data, we need to figure out which 
+        # In order to read the binary data, we need to figure out which
         # binary files belong to this output.
         basedir = os.path.dirname(self.out_list)
         s = self.out_list.split('_')[-1]
@@ -1523,12 +1524,14 @@
                 id += 1
 
         def haloCmp(h1, h2):
+            def cmp(a, b):
+                return (a > b) - (a < b)
             c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
                 return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
-        self._groups.sort(haloCmp)
+        self._groups.sort(key=cmp_to_key(haloCmp))
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
             if halo.id in self._max_dens:
@@ -1873,7 +1876,7 @@
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
     r"""Load a text file of halos.
-    
+
     Like LoadHaloes, but when all that is available is a plain
     text file. This assumes the text file has the 3-positions of halos
     along with a radius. The halo objects created are spheres.
@@ -1882,7 +1885,7 @@
     ----------
     fname : String
         The name of the text file to read in.
-    
+
     columns : dict
         A dict listing the column name : column number pairs for data
         in the text file. It is zero-based (like Python).
@@ -1890,7 +1893,7 @@
         Any column name outside of ['x', 'y', 'z', 'r'] will be attached
         to each halo object in the supplementary dict 'supp'. See
         example.
-    
+
     comment : String
         If the first character of a line is equal to this, the line is
         skipped. Default = "#".
@@ -1915,7 +1918,7 @@
     Parameters
     ----------
     fname : String
-        The name of the Rockstar file to read in. Default = 
+        The name of the Rockstar file to read in. Default =
         "rockstar_halos/out_0.list'.
 
     Examples

diff -r 92d1e3dd94ee3264fcb0a90d713258cf87098a58 -r 3f8f108118f90a45492abb63786e59662c7deb71 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -70,7 +70,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -88,7 +88,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:


https://bitbucket.org/yt_analysis/yt/commits/ee13f761abdd/
Changeset:   ee13f761abdd
Branch:      yt
User:        jzuhone
Date:        2015-07-15 13:28:01+00:00
Summary:     Don't pass *args and **kwargs to object()
Affected #:  1 file

diff -r 3f8f108118f90a45492abb63786e59662c7deb71 -r ee13f761abdd6c4957b1017f7b0eab5fe1c947ed yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -130,7 +130,7 @@
     def __new__(cls, outputs, *args, **kwargs):
         if isinstance(outputs, string_types):
             outputs = get_filenames_from_glob_pattern(outputs)
-        ret = super(DatasetSeries, cls).__new__(cls, *args, **kwargs)
+        ret = super(DatasetSeries, cls).__new__(cls)
         try:
             ret._pre_outputs = outputs[:]
         except TypeError:


https://bitbucket.org/yt_analysis/yt/commits/b140e78916d4/
Changeset:   b140e78916d4
Branch:      yt
User:        jzuhone
Date:        2015-07-15 13:28:52+00:00
Summary:     We no longer need this recipe--I created it, it's ancient, and doesn't work with 3.0
Affected #:  1 file

diff -r ee13f761abdd6c4957b1017f7b0eab5fe1c947ed -r b140e78916d4149442f09362fb979b5fdb86890c doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ /dev/null
@@ -1,105 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import numpy as np
-import yt
-# Need to grab the proton mass from the constants database
-from yt.utilities.physical_constants import mp
-
-exit()
-# Define the emission field
-
-keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08  # Convert degrees Kelvin to degrees keV
-sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799  # Exponential of Euler's constant
-
-
-def _FreeFree_Emission(field, data):
-
-    if data.has_field_parameter("Z"):
-        Z = data.get_field_parameter("Z")
-    else:
-        Z = 1.077  # Primordial H/He plasma
-
-    if data.has_field_parameter("mue"):
-        mue = data.get_field_parameter("mue")
-    else:
-        mue = 1./0.875  # Primordial H/He plasma
-
-    if data.has_field_parameter("mui"):
-        mui = data.get_field_parameter("mui")
-    else:
-        mui = 1./0.8125  # Primordial H/He plasma
-
-    if data.has_field_parameter("Ephoton"):
-        Ephoton = data.get_field_parameter("Ephoton")
-    else:
-        Ephoton = 1.0  # in keV
-
-    if data.has_field_parameter("photon_emission"):
-        photon_emission = data.get_field_parameter("photon_emission")
-    else:
-        photon_emission = False  # Flag for energy or photon emission
-
-    n_e = data["density"]/(mue*mp)
-    n_i = data["density"]/(mui*mp)
-    kT = data["temperature"]*KtokeV
-
-    # Compute the Gaunt factor
-
-    g_ff = np.zeros(kT.shape)
-    g_ff[Ephoton/kT > 1.] = np.sqrt((3./np.pi)*kT[Ephoton/kT > 1.]/Ephoton)
-    g_ff[Ephoton/kT < 1.] = (sqrt3/np.pi)*np.log((4./expgamma) *
-                                                 kT[Ephoton/kT < 1.]/Ephoton)
-
-    eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
-        np.exp(-Ephoton/kT)*g_ff
-
-    if photon_emission:
-        eps_E /= (Ephoton*keVtoerg)
-
-    return eps_E
-
-yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
-
-# Define the luminosity derived quantity
-def _FreeFreeLuminosity(data):
-    return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-
-
-def _combFreeFreeLuminosity(data, luminosity):
-    return luminosity.sum()
-
-yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
-                combine_function=_combFreeFreeLuminosity, n_ret=1)
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-sphere = ds.sphere(ds.domain_center, (100., "kpc"))
-
-# Print out the total luminosity at 1 keV for the sphere
-
-print("L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# The defaults for the field assume a H/He primordial plasma.
-# Let's set the appropriate parameters for a pure hydrogen plasma.
-
-sphere.set_field_parameter("mue", 1.0)
-sphere.set_field_parameter("mui", 1.0)
-sphere.set_field_parameter("Z", 1.0)
-
-print("L_E (1 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Now let's print the luminosity at an energy of E = 10 keV
-
-sphere.set_field_parameter("Ephoton", 10.0)
-
-print("L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Finally, let's set the flag for photon emission, to get the total number
-# of photons emitted at this energy:
-
-sphere.set_field_parameter("photon_emission", True)
-
-print("L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())


https://bitbucket.org/yt_analysis/yt/commits/793a9fe1209a/
Changeset:   793a9fe1209a
Branch:      yt
User:        jzuhone
Date:        2015-07-15 13:32:09+00:00
Summary:     Unified codebase for cookbook scripts
Affected #:  4 files

diff -r b140e78916d4149442f09362fb979b5fdb86890c -r 793a9fe1209a7bed24123b00b27d5cafb962f3e8 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -10,10 +10,10 @@
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
-# Define a function that will accept a ds and add the new field 
+# Define a function that will accept a ds and add the new field
 # defined above.  This will be given to the LightRay below.
 def setup_ds(ds):
-    ds.add_field("O_p5_number_density", 
+    ds.add_field(("gas","O_p5_number_density"),
                  function=_OVI_number_density,
                  units="cm**-3")
 
@@ -62,7 +62,7 @@
 
 # Get all fields that need to be added to the light ray
 fields = ['temperature']
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     fields.append(params['field'])
 
 # Make a light ray, and set njobs to -1 to use one core
@@ -79,7 +79,7 @@
 sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
 
 # Iterate over species
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     # Iterate over transitions for a single species
     for i in range(params['numLines']):
         # Add the lines to the spectrum

diff -r b140e78916d4149442f09362fb979b5fdb86890c -r 793a9fe1209a7bed24123b00b27d5cafb962f3e8 doc/source/cookbook/simulation_analysis.py
--- a/doc/source/cookbook/simulation_analysis.py
+++ b/doc/source/cookbook/simulation_analysis.py
@@ -2,11 +2,11 @@
 yt.enable_parallelism()
 import collections
 
-# Enable parallelism in the script (assuming it was called with 
+# Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
 yt.enable_parallelism()
 
-# By using wildcards such as ? and * with the load command, we can load up a 
+# By using wildcards such as ? and * with the load command, we can load up a
 # Time Series containing all of these datasets simultaneously.
 ts = yt.load('enzo_tiny_cosmology/DD????/DD????')
 
@@ -16,7 +16,7 @@
 # Create an empty dictionary
 data = {}
 
-# Iterate through each dataset in the Time Series (using piter allows it 
+# Iterate through each dataset in the Time Series (using piter allows it
 # to happen in parallel automatically across available processors)
 for ds in ts.piter():
     ad = ds.all_data()
@@ -31,6 +31,6 @@
 # Print out all the values we calculated.
 print("Dataset      Redshift        Density Min      Density Max")
 print("---------------------------------------------------------")
-for key, val in od.iteritems(): 
+for key, val in od.items(): 
     print("%s       %05.3f          %5.3g g/cm^3   %5.3g g/cm^3" % \
            (key, val[1], val[0][0], val[0][1]))

diff -r b140e78916d4149442f09362fb979b5fdb86890c -r 793a9fe1209a7bed24123b00b27d5cafb962f3e8 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1011,7 +1011,7 @@
 
     """
     f = h5py.File(file_name, 'w')
-    for ion, params in lineDic.iteritems():
+    for ion, params in lineDic.items():
         f.create_dataset("{0}/N".format(ion),data=params['N'])
         f.create_dataset("{0}/b".format(ion),data=params['b'])
         f.create_dataset("{0}/z".format(ion),data=params['z'])

diff -r b140e78916d4149442f09362fb979b5fdb86890c -r 793a9fe1209a7bed24123b00b27d5cafb962f3e8 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -343,7 +343,7 @@
             del output["object"]
 
         # Combine results from each slice.
-        all_slices = all_storage.keys()
+        all_slices = list(all_storage.keys())
         all_slices.sort()
         for my_slice in all_slices:
             if save_slice_images:


https://bitbucket.org/yt_analysis/yt/commits/cf1d78d7041b/
Changeset:   cf1d78d7041b
Branch:      yt
User:        jzuhone
Date:        2015-07-15 13:38:18+00:00
Summary:     Unified codebase for clump finding
Affected #:  1 file

diff -r 793a9fe1209a7bed24123b00b27d5cafb962f3e8 -r cf1d78d7041ba1863cab798809cbc09231723732 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -20,7 +20,8 @@
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.funcs import mylog
-    
+from yt.extern.six import string_types
+
 from .clump_info_items import \
     clump_info_registry
 from .clump_validators import \
@@ -268,7 +269,7 @@
 
 def write_clump_index(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     for q in range(level):
@@ -285,7 +286,7 @@
 
 def write_clumps(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     if ((clump.children is None) or (len(clump.children) == 0)):


https://bitbucket.org/yt_analysis/yt/commits/d8828a093b4a/
Changeset:   d8828a093b4a
Branch:      yt
User:        jzuhone
Date:        2015-07-15 13:41:48+00:00
Summary:     Unified codebase
Affected #:  1 file

diff -r cf1d78d7041ba1863cab798809cbc09231723732 -r d8828a093b4a3524a6b6c4375806dbdebb57b8a3 doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -1,6 +1,7 @@
 import yt
 import matplotlib.pyplot as plt
 import numpy as np
+from IPython import embed
 
 # Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
@@ -12,7 +13,7 @@
 
 storage = {}
 
-# By using the piter() function, we can iterate on every dataset in 
+# By using the piter() function, we can iterate on every dataset in
 # the TimeSeries object.  By using the storage keyword, we can populate
 # a dictionary where the dataset is the key, and sto.result is the value
 # for later use when the loop is complete.
@@ -25,13 +26,13 @@
     sphere = ds.sphere("c", (100., "kpc"))
     # Calculate the entropy within that sphere
     entr = sphere["entropy"].sum()
-    # Store the current time and sphere entropy for this dataset in our 
+    # Store the current time and sphere entropy for this dataset in our
     # storage dictionary as a tuple
     store.result = (ds.current_time.in_units('Gyr'), entr)
 
 # Convert the storage dictionary values to a Nx2 array, so the can be easily
 # plotted
-arr = np.array(storage.values())
+arr = np.array(list(storage.values()))
 
 # Plot up the results: time versus entropy
 plt.semilogy(arr[:,0], arr[:,1], 'r-')


https://bitbucket.org/yt_analysis/yt/commits/77fff3da85f3/
Changeset:   77fff3da85f3
Branch:      yt
User:        jzuhone
Date:        2015-07-15 14:01:42+00:00
Summary:     Unified codebase
Affected #:  1 file

diff -r d8828a093b4a3524a6b6c4375806dbdebb57b8a3 -r 77fff3da85f3c4e95bc2ee9cb63165d13a722f0d yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -19,6 +19,7 @@
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
+from io import BytesIO
 
 import os
 
@@ -255,12 +256,12 @@
 
     def _repr_html_(self):
         ret = ''
-        for k, v in self.plots.iteritems():
+        for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)
-            f = StringIO()
+            f = BytesIO()
             canvas.print_figure(f)
             f.seek(0)
-            img = base64.b64encode(f.read())
+            img = base64.b64encode(f.read()).decode()
             ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
                    r'src="data:image/png;base64,%s"><br>' % img
         return ret


https://bitbucket.org/yt_analysis/yt/commits/1b1e464fe2df/
Changeset:   1b1e464fe2df
Branch:      yt
User:        jzuhone
Date:        2015-07-15 14:21:06+00:00
Summary:     Unified codebase
Affected #:  1 file

diff -r 77fff3da85f3c4e95bc2ee9cb63165d13a722f0d -r 1b1e464fe2df4d73e20f889d8d7766cb82a728a7 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -32,7 +32,7 @@
     def _read_particle_coords(self, chunks, ptf):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")
         z = np.ones(x.shape)
@@ -43,7 +43,7 @@
     def _read_particle_fields(self, chunks, ptf, selector):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         field_list = ptf[ptype]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")


https://bitbucket.org/yt_analysis/yt/commits/6ae70176ba5c/
Changeset:   6ae70176ba5c
Branch:      yt
User:        jzuhone
Date:        2015-07-15 14:21:27+00:00
Summary:     Fixing some problems I introduced earlier
Affected #:  1 file

diff -r 1b1e464fe2df4d73e20f889d8d7766cb82a728a7 -r 6ae70176ba5cdde2b43f66842f075664b7e415de yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -222,7 +222,7 @@
             plot_spec = [plot_spec.copy() for p in profiles]
 
         ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)
-
+        
     @validate_plot
     def save(self, name=None, suffix=None):
         r"""
@@ -530,7 +530,10 @@
                 xma = xmax
             extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
             units = {p.x_field: str(p.x.units)}
-            logs = {p.x_field: self.x_log}
+            if self.x_log is None:
+                logs = None
+            else:
+                logs = {p.x_field: self.x_log}
             for field in p.field_map.values():
                 units[field] = str(p.field_data[field].units)
             self.profiles[i] = \
@@ -1148,8 +1151,14 @@
         extrema = {p.x_field: ((xmin, str(p.x.units)), (xmax, str(p.x.units))),
                    p.y_field: ((p.y_bins.min(), str(p.y.units)),
                                (p.y_bins.max(), str(p.y.units)))}
-        logs = {p.x_field: self.x_log,
-                p.y_field: self.y_log}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1206,8 +1215,14 @@
         extrema = {p.x_field: ((p.x_bins.min(), str(p.x.units)),
                                (p.x_bins.max(), str(p.x.units))),
                    p.y_field: ((ymin, str(p.y.units)), (ymax, str(p.y.units)))}
-        logs = {p.x_field: self.x_log,
-                p.y_field: self.y_log}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,


https://bitbucket.org/yt_analysis/yt/commits/42208efdce8b/
Changeset:   42208efdce8b
Branch:      yt
User:        jzuhone
Date:        2015-07-15 15:22:01+00:00
Summary:     Unified codebase
Affected #:  1 file

diff -r 6ae70176ba5cdde2b43f66842f075664b7e415de -r 42208efdce8b0421996c44949d5b620bc629c87d yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -27,7 +27,6 @@
 import numpy as np
 import weakref
 import re
-import string
 
 class FixedResolutionBuffer(object):
     r"""
@@ -178,13 +177,13 @@
             pstr = m.string[m.start()+1:m.end()-1]
             segments = fname.split("_")
             for i,s in enumerate(segments):
-                segments[i] = string.capitalize(s)
+                segments[i] = s.capitalize()
                 if s == pstr:
                     ipstr = i
             element = segments[ipstr-1]
             roman = pnum2rom[pstr[1:]]
             label = element + '\ ' + roman + '\ ' + \
-                string.join(segments[ipstr+1:], '\ ')
+                '\ '.join(segments[ipstr+1:])
         else:
             label = fname
         return label


https://bitbucket.org/yt_analysis/yt/commits/0e986fadfefd/
Changeset:   0e986fadfefd
Branch:      yt
User:        jzuhone
Date:        2015-07-15 18:03:52+00:00
Summary:     Add a note about this only working in Python 2
Affected #:  1 file

diff -r 42208efdce8b0421996c44949d5b620bc629c87d -r 0e986fadfefda5a699418b870dd200011a8f1b7a doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},


https://bitbucket.org/yt_analysis/yt/commits/a3f89cae255a/
Changeset:   a3f89cae255a
Branch:      yt
User:        jzuhone
Date:        2015-07-15 16:42:56+00:00
Summary:     Halo catalog and rockstar unified codebase
Affected #:  2 files

diff -r 42208efdce8b0421996c44949d5b620bc629c87d -r a3f89cae255a6e180c6b4bc192f6b9d95fe232f8 yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -39,7 +39,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
@@ -57,7 +57,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)

diff -r 42208efdce8b0421996c44949d5b620bc629c87d -r a3f89cae255a6e180c6b4bc192f6b9d95fe232f8 yt/frontends/rockstar/io.py
--- a/yt/frontends/rockstar/io.py
+++ b/yt/frontends/rockstar/io.py
@@ -28,6 +28,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+from operator import attrgetter
 
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
@@ -45,12 +46,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -66,11 +66,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):


https://bitbucket.org/yt_analysis/yt/commits/32a7d4ac0f9b/
Changeset:   32a7d4ac0f9b
Branch:      yt
User:        jzuhone
Date:        2015-07-15 17:44:39+00:00
Summary:     Better way of handling these
Affected #:  2 files

diff -r a3f89cae255a6e180c6b4bc192f6b9d95fe232f8 -r 32a7d4ac0f9b6b138703e0e2ab86b06b5920c058 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -12,15 +12,18 @@
 
 import numpy as np
 import base64
-from yt.extern.six.moves import StringIO
+from yt.extern.six import PY3
 from yt.fields.derived_field import ValidateSpatial
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
-from io import BytesIO
-
+if PY3:
+    from io import BytesIO as IO
+else:
+    from yt.extern.six.moves import StringIO as IO
+    
 import os
 
 def _make_counts(emin, emax):
@@ -258,7 +261,7 @@
         ret = ''
         for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)
-            f = BytesIO()
+            f = IO()
             canvas.print_figure(f)
             f.seek(0)
             img = base64.b64encode(f.read()).decode()

diff -r a3f89cae255a6e180c6b4bc192f6b9d95fe232f8 -r 32a7d4ac0f9b6b138703e0e2ab86b06b5920c058 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -20,10 +20,12 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.fortran_utils as fpu
-try:
+from yt.extern.six import PY3
+
+if PY3:
+    from io import BytesIO as IO
+else:
     from cStringIO import StringIO as IO
-except ImportError:
-    from io import BytesIO as IO
 
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"


https://bitbucket.org/yt_analysis/yt/commits/06f11431edb6/
Changeset:   06f11431edb6
Branch:      yt
User:        jzuhone
Date:        2015-07-15 18:06:00+00:00
Summary:     Merge
Affected #:  1 file

diff -r 32a7d4ac0f9b6b138703e0e2ab86b06b5920c058 -r 06f11431edb6c114f5f13ca56dc79df3ff17d77e doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},


https://bitbucket.org/yt_analysis/yt/commits/2cde834170ca/
Changeset:   2cde834170ca
Branch:      yt
User:        jzuhone
Date:        2015-07-15 20:34:31+00:00
Summary:     Keep this as unit object so we can retain unit registries
Affected #:  1 file

diff -r 111d46ff2faa66de17edf22a0ce4b14707806515 -r 2cde834170caa48af78ef852ff084173accf615a yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -122,7 +122,7 @@
         for key in fields:
             if key not in exclude_fields:
                 if hasattr(img_data[key], "units"):
-                    self.field_units[key] = str(img_data[key].units)
+                    self.field_units[key] = img_data[key].units
                 else:
                     self.field_units[key] = "dimensionless"
                 mylog.info("Making a FITS image of field %s" % key)


https://bitbucket.org/yt_analysis/yt/commits/46df01c5b71d/
Changeset:   46df01c5b71d
Branch:      yt
User:        jzuhone
Date:        2015-07-15 20:35:17+00:00
Summary:     Merge
Affected #:  19 files

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -10,10 +10,10 @@
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
-# Define a function that will accept a ds and add the new field 
+# Define a function that will accept a ds and add the new field
 # defined above.  This will be given to the LightRay below.
 def setup_ds(ds):
-    ds.add_field("O_p5_number_density", 
+    ds.add_field(("gas","O_p5_number_density"),
                  function=_OVI_number_density,
                  units="cm**-3")
 
@@ -62,7 +62,7 @@
 
 # Get all fields that need to be added to the light ray
 fields = ['temperature']
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     fields.append(params['field'])
 
 # Make a light ray, and set njobs to -1 to use one core
@@ -79,7 +79,7 @@
 sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
 
 # Iterate over species
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     # Iterate over transitions for a single species
     for i in range(params['numLines']):
         # Add the lines to the spectrum

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ /dev/null
@@ -1,105 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import numpy as np
-import yt
-# Need to grab the proton mass from the constants database
-from yt.utilities.physical_constants import mp
-
-exit()
-# Define the emission field
-
-keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08  # Convert degrees Kelvin to degrees keV
-sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799  # Exponential of Euler's constant
-
-
-def _FreeFree_Emission(field, data):
-
-    if data.has_field_parameter("Z"):
-        Z = data.get_field_parameter("Z")
-    else:
-        Z = 1.077  # Primordial H/He plasma
-
-    if data.has_field_parameter("mue"):
-        mue = data.get_field_parameter("mue")
-    else:
-        mue = 1./0.875  # Primordial H/He plasma
-
-    if data.has_field_parameter("mui"):
-        mui = data.get_field_parameter("mui")
-    else:
-        mui = 1./0.8125  # Primordial H/He plasma
-
-    if data.has_field_parameter("Ephoton"):
-        Ephoton = data.get_field_parameter("Ephoton")
-    else:
-        Ephoton = 1.0  # in keV
-
-    if data.has_field_parameter("photon_emission"):
-        photon_emission = data.get_field_parameter("photon_emission")
-    else:
-        photon_emission = False  # Flag for energy or photon emission
-
-    n_e = data["density"]/(mue*mp)
-    n_i = data["density"]/(mui*mp)
-    kT = data["temperature"]*KtokeV
-
-    # Compute the Gaunt factor
-
-    g_ff = np.zeros(kT.shape)
-    g_ff[Ephoton/kT > 1.] = np.sqrt((3./np.pi)*kT[Ephoton/kT > 1.]/Ephoton)
-    g_ff[Ephoton/kT < 1.] = (sqrt3/np.pi)*np.log((4./expgamma) *
-                                                 kT[Ephoton/kT < 1.]/Ephoton)
-
-    eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
-        np.exp(-Ephoton/kT)*g_ff
-
-    if photon_emission:
-        eps_E /= (Ephoton*keVtoerg)
-
-    return eps_E
-
-yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
-
-# Define the luminosity derived quantity
-def _FreeFreeLuminosity(data):
-    return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-
-
-def _combFreeFreeLuminosity(data, luminosity):
-    return luminosity.sum()
-
-yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
-                combine_function=_combFreeFreeLuminosity, n_ret=1)
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-sphere = ds.sphere(ds.domain_center, (100., "kpc"))
-
-# Print out the total luminosity at 1 keV for the sphere
-
-print("L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# The defaults for the field assume a H/He primordial plasma.
-# Let's set the appropriate parameters for a pure hydrogen plasma.
-
-sphere.set_field_parameter("mue", 1.0)
-sphere.set_field_parameter("mui", 1.0)
-sphere.set_field_parameter("Z", 1.0)
-
-print("L_E (1 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Now let's print the luminosity at an energy of E = 10 keV
-
-sphere.set_field_parameter("Ephoton", 10.0)
-
-print("L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Finally, let's set the flag for photon emission, to get the total number
-# of photons emitted at this energy:
-
-sphere.set_field_parameter("photon_emission", True)
-
-print("L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 doc/source/cookbook/simulation_analysis.py
--- a/doc/source/cookbook/simulation_analysis.py
+++ b/doc/source/cookbook/simulation_analysis.py
@@ -2,11 +2,11 @@
 yt.enable_parallelism()
 import collections
 
-# Enable parallelism in the script (assuming it was called with 
+# Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
 yt.enable_parallelism()
 
-# By using wildcards such as ? and * with the load command, we can load up a 
+# By using wildcards such as ? and * with the load command, we can load up a
 # Time Series containing all of these datasets simultaneously.
 ts = yt.load('enzo_tiny_cosmology/DD????/DD????')
 
@@ -16,7 +16,7 @@
 # Create an empty dictionary
 data = {}
 
-# Iterate through each dataset in the Time Series (using piter allows it 
+# Iterate through each dataset in the Time Series (using piter allows it
 # to happen in parallel automatically across available processors)
 for ds in ts.piter():
     ad = ds.all_data()
@@ -31,6 +31,6 @@
 # Print out all the values we calculated.
 print("Dataset      Redshift        Density Min      Density Max")
 print("---------------------------------------------------------")
-for key, val in od.iteritems(): 
+for key, val in od.items(): 
     print("%s       %05.3f          %5.3g g/cm^3   %5.3g g/cm^3" % \
            (key, val[1], val[0][0], val[0][1]))

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -1,6 +1,7 @@
 import yt
 import matplotlib.pyplot as plt
 import numpy as np
+from IPython import embed
 
 # Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
@@ -12,7 +13,7 @@
 
 storage = {}
 
-# By using the piter() function, we can iterate on every dataset in 
+# By using the piter() function, we can iterate on every dataset in
 # the TimeSeries object.  By using the storage keyword, we can populate
 # a dictionary where the dataset is the key, and sto.result is the value
 # for later use when the loop is complete.
@@ -25,13 +26,13 @@
     sphere = ds.sphere("c", (100., "kpc"))
     # Calculate the entropy within that sphere
     entr = sphere["entropy"].sum()
-    # Store the current time and sphere entropy for this dataset in our 
+    # Store the current time and sphere entropy for this dataset in our
     # storage dictionary as a tuple
     store.result = (ds.current_time.in_units('Gyr'), entr)
 
 # Convert the storage dictionary values to a Nx2 array, so the can be easily
 # plotted
-arr = np.array(storage.values())
+arr = np.array(list(storage.values()))
 
 # Plot up the results: time versus entropy
 plt.semilogy(arr[:,0], arr[:,1], 'r-')

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1011,7 +1011,7 @@
 
     """
     f = h5py.File(file_name, 'w')
-    for ion, params in lineDic.iteritems():
+    for ion, params in lineDic.items():
         f.create_dataset("{0}/N".format(ion),data=params['N'])
         f.create_dataset("{0}/b".format(ion),data=params['b'])
         f.create_dataset("{0}/z".format(ion),data=params['z'])

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -343,7 +343,7 @@
             del output["object"]
 
         # Combine results from each slice.
-        all_slices = all_storage.keys()
+        all_slices = list(all_storage.keys())
         all_slices.sort()
         for my_slice in all_slices:
             if save_slice_images:

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -22,6 +22,7 @@
 import glob
 import os
 import os.path as path
+from functools import cmp_to_key
 from collections import defaultdict
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import zip as izip
@@ -39,7 +40,7 @@
     TINY
 from yt.utilities.physical_ratios import \
      rho_crit_g_cm3_h2
-    
+
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
 
@@ -138,9 +139,9 @@
         c[2] = self["particle_position_z"] - self.ds.domain_left_edge[2]
         com = []
         for i in range(3):
-            # A halo is likely periodic around a boundary if the distance 
+            # A halo is likely periodic around a boundary if the distance
             # between the max and min particle
-            # positions are larger than half the box. 
+            # positions are larger than half the box.
             # So skip the rest if the converse is true.
             # Note we might make a change here when periodicity-handling is
             # fully implemented.
@@ -444,7 +445,7 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1, 
+        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1,
                                               dtype='float64'),'Msun')
         dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
@@ -475,7 +476,7 @@
         self.overdensity = self.mass_bins * Msun2g / \
             (4./3. * math.pi * rho_crit * \
             (self.radial_bins )**3.0)
-        
+
     def _get_ellipsoid_parameters_basic(self):
         np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
@@ -501,7 +502,7 @@
         for axis in range(np.size(DW)):
             cases = np.array([position[axis],
                                 position[axis] + DW[axis],
-                              position[axis] - DW[axis]])        
+                              position[axis] - DW[axis]])
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
         # find the furthest particle's index
@@ -571,7 +572,7 @@
     _name = "RockstarHalo"
     # See particle_mask
     _radjust = 4.
-    
+
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -635,11 +636,11 @@
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
         the particles that constitute the halo.
-        
+
         Parameters
         ----------
         None
-        
+
         Returns
         -------
         tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
@@ -650,7 +651,7 @@
               #. mag_C as a float.
               #. e0_vector as an array.
               #. tilt as a float.
-        
+
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
@@ -662,22 +663,22 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
         r"""Returns an ellipsoidal data object.
-        
+
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -686,7 +687,7 @@
         ell = self.data.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3],
             ep[4], ep[5])
         return ell
-    
+
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -763,14 +764,14 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                #this is an index for turning data sorted by particle index 
+                #this is an index for turning data sorted by particle index
                 #into the same order as the fields on disk
                 self._pid_sort = field_data.argsort().argsort()
             #convert to YTArray using the data from disk
             if key == 'particle_mass':
                 field_data = self.ds.arr(field_data, 'Msun')
             else:
-                field_data = self.ds.arr(field_data, 
+                field_data = self.ds.arr(field_data,
                     self.ds._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
@@ -856,21 +857,21 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
-        r"""Returns an ellipsoidal data object.        
+        r"""Returns an ellipsoidal data object.
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -947,11 +948,11 @@
     def maximum_density(self):
         r"""Undefined for text halos."""
         return -1
-    
+
     def maximum_density_location(self):
         r"""Undefined, default to CoM"""
         return self.center_of_mass()
-    
+
     def get_size(self):
         # Have to just get it from the sphere.
         return self["particle_position_x"].size
@@ -964,8 +965,8 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, 
-        otherwise on all particles.  Returns an iterable collection of 
+        *dm_only* is True (default), only run it on the dark matter particles,
+        otherwise on all particles.  Returns an iterable collection of
         *HopGroup* items.
         """
         self._data_source = data_source
@@ -1051,7 +1052,7 @@
         ellipsoid_data : bool.
             Whether to print the ellipsoidal information to the file.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1144,10 +1145,10 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-        ('vmax_r', np.float32), 
+        ('vmax_r', np.float32),
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
-        ('klypin_rs', np.float32), 
+        ('klypin_rs', np.float32),
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
         ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
@@ -1221,9 +1222,9 @@
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
-        
+
         ds = self.ds
-        # In order to read the binary data, we need to figure out which 
+        # In order to read the binary data, we need to figure out which
         # binary files belong to this output.
         basedir = os.path.dirname(self.out_list)
         s = self.out_list.split('_')[-1]
@@ -1523,12 +1524,14 @@
                 id += 1
 
         def haloCmp(h1, h2):
+            def cmp(a, b):
+                return (a > b) - (a < b)
             c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
                 return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
-        self._groups.sort(haloCmp)
+        self._groups.sort(key=cmp_to_key(haloCmp))
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
             if halo.id in self._max_dens:
@@ -1873,7 +1876,7 @@
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
     r"""Load a text file of halos.
-    
+
     Like LoadHaloes, but when all that is available is a plain
     text file. This assumes the text file has the 3-positions of halos
     along with a radius. The halo objects created are spheres.
@@ -1882,7 +1885,7 @@
     ----------
     fname : String
         The name of the text file to read in.
-    
+
     columns : dict
         A dict listing the column name : column number pairs for data
         in the text file. It is zero-based (like Python).
@@ -1890,7 +1893,7 @@
         Any column name outside of ['x', 'y', 'z', 'r'] will be attached
         to each halo object in the supplementary dict 'supp'. See
         example.
-    
+
     comment : String
         If the first character of a line is equal to this, the line is
         skipped. Default = "#".
@@ -1915,7 +1918,7 @@
     Parameters
     ----------
     fname : String
-        The name of the Rockstar file to read in. Default = 
+        The name of the Rockstar file to read in. Default =
         "rockstar_halos/out_0.list'.
 
     Examples

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -20,7 +20,8 @@
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.funcs import mylog
-    
+from yt.extern.six import string_types
+
 from .clump_info_items import \
     clump_info_registry
 from .clump_validators import \
@@ -268,7 +269,7 @@
 
 def write_clump_index(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     for q in range(level):
@@ -285,7 +286,7 @@
 
 def write_clumps(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     if ((clump.children is None) or (len(clump.children) == 0)):

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -130,7 +130,7 @@
     def __new__(cls, outputs, *args, **kwargs):
         if isinstance(outputs, string_types):
             outputs = get_filenames_from_glob_pattern(outputs)
-        ret = super(DatasetSeries, cls).__new__(cls, *args, **kwargs)
+        ret = super(DatasetSeries, cls).__new__(cls)
         try:
             ret._pre_outputs = outputs[:]
         except TypeError:

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -76,7 +76,7 @@
         for key, val in self._handle.attrs.items():
             if key.startswith('component_'):
                 comp_number = int(re.match('component_(\d+)', key).groups()[0])
-                field_dict[val] = comp_number
+                field_dict[val.decode('utf-8')] = comp_number
         self._field_dict = field_dict
         return self._field_dict
 

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -32,7 +32,7 @@
     def _read_particle_coords(self, chunks, ptf):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")
         z = np.ones(x.shape)
@@ -43,7 +43,7 @@
     def _read_particle_fields(self, chunks, ptf, selector):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         field_list = ptf[ptype]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -12,14 +12,18 @@
 
 import numpy as np
 import base64
-from yt.extern.six.moves import StringIO
+from yt.extern.six import PY3
 from yt.fields.derived_field import ValidateSpatial
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
-
+if PY3:
+    from io import BytesIO as IO
+else:
+    from yt.extern.six.moves import StringIO as IO
+    
 import os
 
 def _make_counts(emin, emax):
@@ -255,12 +259,12 @@
 
     def _repr_html_(self):
         ret = ''
-        for k, v in self.plots.iteritems():
+        for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)
-            f = StringIO()
+            f = IO()
             canvas.print_figure(f)
             f.seek(0)
-            img = base64.b64encode(f.read())
+            img = base64.b64encode(f.read()).decode()
             ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
                    r'src="data:image/png;base64,%s"><br>' % img
         return ret

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -39,7 +39,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
@@ -57,7 +57,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -70,7 +70,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -88,7 +88,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -20,7 +20,12 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.fortran_utils as fpu
-from yt.extern.six.moves import cStringIO
+from yt.extern.six import PY3
+
+if PY3:
+    from io import BytesIO as IO
+else:
+    from cStringIO import StringIO as IO
 
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
@@ -37,7 +42,7 @@
                 f = open(subset.domain.hydro_fn, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                content = cStringIO(f.read())
+                content = IO(f.read())
                 rv = subset.fill(content, fields, selector)
                 for ft, f in fields:
                     d = rv.pop(f)

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/frontends/rockstar/io.py
--- a/yt/frontends/rockstar/io.py
+++ b/yt/frontends/rockstar/io.py
@@ -28,6 +28,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+from operator import attrgetter
 
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
@@ -45,12 +46,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -66,11 +66,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -27,7 +27,6 @@
 import numpy as np
 import weakref
 import re
-import string
 
 class FixedResolutionBuffer(object):
     r"""
@@ -178,13 +177,13 @@
             pstr = m.string[m.start()+1:m.end()-1]
             segments = fname.split("_")
             for i,s in enumerate(segments):
-                segments[i] = string.capitalize(s)
+                segments[i] = s.capitalize()
                 if s == pstr:
                     ipstr = i
             element = segments[ipstr-1]
             roman = pnum2rom[pstr[1:]]
             label = element + '\ ' + roman + '\ ' + \
-                string.join(segments[ipstr+1:], '\ ')
+                '\ '.join(segments[ipstr+1:])
         else:
             label = fname
         return label

diff -r 2cde834170caa48af78ef852ff084173accf615a -r 46df01c5b71de9acf35065b7e536462887afb8f1 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -222,7 +222,7 @@
             plot_spec = [plot_spec.copy() for p in profiles]
 
         ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)
-
+        
     @validate_plot
     def save(self, name=None, suffix=None):
         r"""
@@ -530,7 +530,10 @@
                 xma = xmax
             extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
             units = {p.x_field: str(p.x.units)}
-            logs = {p.x_field: self.x_log}
+            if self.x_log is None:
+                logs = None
+            else:
+                logs = {p.x_field: self.x_log}
             for field in p.field_map.values():
                 units[field] = str(p.field_data[field].units)
             self.profiles[i] = \
@@ -1148,8 +1151,14 @@
         extrema = {p.x_field: ((xmin, str(p.x.units)), (xmax, str(p.x.units))),
                    p.y_field: ((p.y_bins.min(), str(p.y.units)),
                                (p.y_bins.max(), str(p.y.units)))}
-        logs = {p.x_field: self.x_log,
-                p.y_field: self.y_log}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1206,8 +1215,14 @@
         extrema = {p.x_field: ((p.x_bins.min(), str(p.x.units)),
                                (p.x_bins.max(), str(p.x.units))),
                    p.y_field: ((ymin, str(p.y.units)), (ymax, str(p.y.units)))}
-        logs = {p.x_field: self.x_log,
-                p.y_field: self.y_log}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,


https://bitbucket.org/yt_analysis/yt/commits/f2357819d9dc/
Changeset:   f2357819d9dc
Branch:      yt
User:        jzuhone
Date:        2015-07-16 02:33:28+00:00
Summary:     If we transpose, it won't write it
Affected #:  1 file

diff -r 06f11431edb6c114f5f13ca56dc79df3ff17d77e -r f2357819d9dcbdcdb3cdd830c04dd7bb45a31290 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -170,7 +170,7 @@
         bitmap_array = np.concatenate([bitmap_array.astype('uint8'),
                                        alpha_channel], axis=-1)
     if transpose:
-        bitmap_array = bitmap_array.swapaxes(0,1)
+        bitmap_array = bitmap_array.swapaxes(0,1).copy(order="C")
     if filename is not None:
         pw.write_png(bitmap_array, filename)
     else:


https://bitbucket.org/yt_analysis/yt/commits/1c6090614f12/
Changeset:   1c6090614f12
Branch:      yt
User:        jzuhone
Date:        2015-07-16 02:33:44+00:00
Summary:     Merge
Affected #:  1 file

diff -r f2357819d9dcbdcdb3cdd830c04dd7bb45a31290 -r 1c6090614f120b59430da4e8aefa76bed532724f yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -122,7 +122,7 @@
         for key in fields:
             if key not in exclude_fields:
                 if hasattr(img_data[key], "units"):
-                    self.field_units[key] = str(img_data[key].units)
+                    self.field_units[key] = img_data[key].units
                 else:
                     self.field_units[key] = "dimensionless"
                 mylog.info("Making a FITS image of field %s" % key)


https://bitbucket.org/yt_analysis/yt/commits/a3f85d2c4e87/
Changeset:   a3f85d2c4e87
Branch:      yt
User:        jzuhone
Date:        2015-07-16 14:36:05+00:00
Summary:     We have a simple way to define gradient fields now, so I'm removing this confusion
Affected #:  1 file

diff -r 1c6090614f120b59430da4e8aefa76bed532724f -r a3f85d2c4e874560d4fe590847a653e302a2baba doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -174,7 +174,7 @@
 
 Field plugins can be loaded dynamically, although at present this is not
 particularly useful.  Plans for extending field plugins to dynamically load, to
-enable simple definition of common types (gradient, divergence, etc), and to
+enable simple definition of common types (divergence, curl, etc), and to
 more verbosely describe available fields, have been put in place for future
 versions.
 


https://bitbucket.org/yt_analysis/yt/commits/3abb8256a83f/
Changeset:   3abb8256a83f
Branch:      yt
User:        jzuhone
Date:        2015-07-16 16:16:28+00:00
Summary:     Remove this line
Affected #:  1 file

diff -r a3f85d2c4e874560d4fe590847a653e302a2baba -r 3abb8256a83fa94721882e0066c65f08b0f32398 doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -1,7 +1,6 @@
 import yt
 import matplotlib.pyplot as plt
 import numpy as np
-from IPython import embed
 
 # Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )


https://bitbucket.org/yt_analysis/yt/commits/ad366f1dd932/
Changeset:   ad366f1dd932
Branch:      yt
User:        chummels
Date:        2015-07-16 16:28:25+00:00
Summary:     Merged in jzuhone/yt (pull request #1638)

[bugfix] Bugfixes for Python 3
Affected #:  23 files

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -174,7 +174,7 @@
 
 Field plugins can be loaded dynamically, although at present this is not
 particularly useful.  Plans for extending field plugins to dynamically load, to
-enable simple definition of common types (gradient, divergence, etc), and to
+enable simple definition of common types (divergence, curl, etc), and to
 more verbosely describe available fields, have been put in place for future
 versions.
 

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -10,10 +10,10 @@
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
-# Define a function that will accept a ds and add the new field 
+# Define a function that will accept a ds and add the new field
 # defined above.  This will be given to the LightRay below.
 def setup_ds(ds):
-    ds.add_field("O_p5_number_density", 
+    ds.add_field(("gas","O_p5_number_density"),
                  function=_OVI_number_density,
                  units="cm**-3")
 
@@ -62,7 +62,7 @@
 
 # Get all fields that need to be added to the light ray
 fields = ['temperature']
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     fields.append(params['field'])
 
 # Make a light ray, and set njobs to -1 to use one core
@@ -79,7 +79,7 @@
 sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
 
 # Iterate over species
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     # Iterate over transitions for a single species
     for i in range(params['numLines']):
         # Add the lines to the spectrum

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ /dev/null
@@ -1,105 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import numpy as np
-import yt
-# Need to grab the proton mass from the constants database
-from yt.utilities.physical_constants import mp
-
-exit()
-# Define the emission field
-
-keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08  # Convert degrees Kelvin to degrees keV
-sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799  # Exponential of Euler's constant
-
-
-def _FreeFree_Emission(field, data):
-
-    if data.has_field_parameter("Z"):
-        Z = data.get_field_parameter("Z")
-    else:
-        Z = 1.077  # Primordial H/He plasma
-
-    if data.has_field_parameter("mue"):
-        mue = data.get_field_parameter("mue")
-    else:
-        mue = 1./0.875  # Primordial H/He plasma
-
-    if data.has_field_parameter("mui"):
-        mui = data.get_field_parameter("mui")
-    else:
-        mui = 1./0.8125  # Primordial H/He plasma
-
-    if data.has_field_parameter("Ephoton"):
-        Ephoton = data.get_field_parameter("Ephoton")
-    else:
-        Ephoton = 1.0  # in keV
-
-    if data.has_field_parameter("photon_emission"):
-        photon_emission = data.get_field_parameter("photon_emission")
-    else:
-        photon_emission = False  # Flag for energy or photon emission
-
-    n_e = data["density"]/(mue*mp)
-    n_i = data["density"]/(mui*mp)
-    kT = data["temperature"]*KtokeV
-
-    # Compute the Gaunt factor
-
-    g_ff = np.zeros(kT.shape)
-    g_ff[Ephoton/kT > 1.] = np.sqrt((3./np.pi)*kT[Ephoton/kT > 1.]/Ephoton)
-    g_ff[Ephoton/kT < 1.] = (sqrt3/np.pi)*np.log((4./expgamma) *
-                                                 kT[Ephoton/kT < 1.]/Ephoton)
-
-    eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
-        np.exp(-Ephoton/kT)*g_ff
-
-    if photon_emission:
-        eps_E /= (Ephoton*keVtoerg)
-
-    return eps_E
-
-yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
-
-# Define the luminosity derived quantity
-def _FreeFreeLuminosity(data):
-    return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-
-
-def _combFreeFreeLuminosity(data, luminosity):
-    return luminosity.sum()
-
-yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
-                combine_function=_combFreeFreeLuminosity, n_ret=1)
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-sphere = ds.sphere(ds.domain_center, (100., "kpc"))
-
-# Print out the total luminosity at 1 keV for the sphere
-
-print("L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# The defaults for the field assume a H/He primordial plasma.
-# Let's set the appropriate parameters for a pure hydrogen plasma.
-
-sphere.set_field_parameter("mue", 1.0)
-sphere.set_field_parameter("mui", 1.0)
-sphere.set_field_parameter("Z", 1.0)
-
-print("L_E (1 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Now let's print the luminosity at an energy of E = 10 keV
-
-sphere.set_field_parameter("Ephoton", 10.0)
-
-print("L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Finally, let's set the flag for photon emission, to get the total number
-# of photons emitted at this energy:
-
-sphere.set_field_parameter("photon_emission", True)
-
-print("L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/cookbook/simulation_analysis.py
--- a/doc/source/cookbook/simulation_analysis.py
+++ b/doc/source/cookbook/simulation_analysis.py
@@ -2,11 +2,11 @@
 yt.enable_parallelism()
 import collections
 
-# Enable parallelism in the script (assuming it was called with 
+# Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
 yt.enable_parallelism()
 
-# By using wildcards such as ? and * with the load command, we can load up a 
+# By using wildcards such as ? and * with the load command, we can load up a
 # Time Series containing all of these datasets simultaneously.
 ts = yt.load('enzo_tiny_cosmology/DD????/DD????')
 
@@ -16,7 +16,7 @@
 # Create an empty dictionary
 data = {}
 
-# Iterate through each dataset in the Time Series (using piter allows it 
+# Iterate through each dataset in the Time Series (using piter allows it
 # to happen in parallel automatically across available processors)
 for ds in ts.piter():
     ad = ds.all_data()
@@ -31,6 +31,6 @@
 # Print out all the values we calculated.
 print("Dataset      Redshift        Density Min      Density Max")
 print("---------------------------------------------------------")
-for key, val in od.iteritems(): 
+for key, val in od.items(): 
     print("%s       %05.3f          %5.3g g/cm^3   %5.3g g/cm^3" % \
            (key, val[1], val[0][0], val[0][1]))

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -12,7 +12,7 @@
 
 storage = {}
 
-# By using the piter() function, we can iterate on every dataset in 
+# By using the piter() function, we can iterate on every dataset in
 # the TimeSeries object.  By using the storage keyword, we can populate
 # a dictionary where the dataset is the key, and sto.result is the value
 # for later use when the loop is complete.
@@ -25,13 +25,13 @@
     sphere = ds.sphere("c", (100., "kpc"))
     # Calculate the entropy within that sphere
     entr = sphere["entropy"].sum()
-    # Store the current time and sphere entropy for this dataset in our 
+    # Store the current time and sphere entropy for this dataset in our
     # storage dictionary as a tuple
     store.result = (ds.current_time.in_units('Gyr'), entr)
 
 # Convert the storage dictionary values to a Nx2 array, so the can be easily
 # plotted
-arr = np.array(storage.values())
+arr = np.array(list(storage.values()))
 
 # Plot up the results: time versus entropy
 plt.semilogy(arr[:,0], arr[:,1], 'r-')

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1011,7 +1011,7 @@
 
     """
     f = h5py.File(file_name, 'w')
-    for ion, params in lineDic.iteritems():
+    for ion, params in lineDic.items():
         f.create_dataset("{0}/N".format(ion),data=params['N'])
         f.create_dataset("{0}/b".format(ion),data=params['b'])
         f.create_dataset("{0}/z".format(ion),data=params['z'])

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -343,7 +343,7 @@
             del output["object"]
 
         # Combine results from each slice.
-        all_slices = all_storage.keys()
+        all_slices = list(all_storage.keys())
         all_slices.sort()
         for my_slice in all_slices:
             if save_slice_images:

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -22,6 +22,7 @@
 import glob
 import os
 import os.path as path
+from functools import cmp_to_key
 from collections import defaultdict
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import zip as izip
@@ -39,7 +40,7 @@
     TINY
 from yt.utilities.physical_ratios import \
      rho_crit_g_cm3_h2
-    
+
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
 
@@ -138,9 +139,9 @@
         c[2] = self["particle_position_z"] - self.ds.domain_left_edge[2]
         com = []
         for i in range(3):
-            # A halo is likely periodic around a boundary if the distance 
+            # A halo is likely periodic around a boundary if the distance
             # between the max and min particle
-            # positions are larger than half the box. 
+            # positions are larger than half the box.
             # So skip the rest if the converse is true.
             # Note we might make a change here when periodicity-handling is
             # fully implemented.
@@ -444,7 +445,7 @@
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1, 
+        self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1,
                                               dtype='float64'),'Msun')
         dist = np.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
@@ -475,7 +476,7 @@
         self.overdensity = self.mass_bins * Msun2g / \
             (4./3. * math.pi * rho_crit * \
             (self.radial_bins )**3.0)
-        
+
     def _get_ellipsoid_parameters_basic(self):
         np.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
@@ -501,7 +502,7 @@
         for axis in range(np.size(DW)):
             cases = np.array([position[axis],
                                 position[axis] + DW[axis],
-                              position[axis] - DW[axis]])        
+                              position[axis] - DW[axis]])
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
         # find the furthest particle's index
@@ -571,7 +572,7 @@
     _name = "RockstarHalo"
     # See particle_mask
     _radjust = 4.
-    
+
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -635,11 +636,11 @@
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
         the particles that constitute the halo.
-        
+
         Parameters
         ----------
         None
-        
+
         Returns
         -------
         tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
@@ -650,7 +651,7 @@
               #. mag_C as a float.
               #. e0_vector as an array.
               #. tilt as a float.
-        
+
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
@@ -662,22 +663,22 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
         r"""Returns an ellipsoidal data object.
-        
+
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -686,7 +687,7 @@
         ell = self.data.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3],
             ep[4], ep[5])
         return ell
-    
+
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -763,14 +764,14 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                #this is an index for turning data sorted by particle index 
+                #this is an index for turning data sorted by particle index
                 #into the same order as the fields on disk
                 self._pid_sort = field_data.argsort().argsort()
             #convert to YTArray using the data from disk
             if key == 'particle_mass':
                 field_data = self.ds.arr(field_data, 'Msun')
             else:
-                field_data = self.ds.arr(field_data, 
+                field_data = self.ds.arr(field_data,
                     self.ds._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
@@ -856,21 +857,21 @@
             basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
         toreturn.extend(updated)
         return tuple(toreturn)
-    
+
     def get_ellipsoid(self):
-        r"""Returns an ellipsoidal data object.        
+        r"""Returns an ellipsoidal data object.
         This will generate a new, empty ellipsoidal data object for this
         halo.
-        
+
         Parameters
         ----------
         None.
-        
+
         Returns
         -------
         ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
             The ellipsoidal data object.
-        
+
         Examples
         --------
         >>> ell = halos[0].get_ellipsoid()
@@ -947,11 +948,11 @@
     def maximum_density(self):
         r"""Undefined for text halos."""
         return -1
-    
+
     def maximum_density_location(self):
         r"""Undefined, default to CoM"""
         return self.center_of_mass()
-    
+
     def get_size(self):
         # Have to just get it from the sphere.
         return self["particle_position_x"].size
@@ -964,8 +965,8 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, 
-        otherwise on all particles.  Returns an iterable collection of 
+        *dm_only* is True (default), only run it on the dark matter particles,
+        otherwise on all particles.  Returns an iterable collection of
         *HopGroup* items.
         """
         self._data_source = data_source
@@ -1051,7 +1052,7 @@
         ellipsoid_data : bool.
             Whether to print the ellipsoidal information to the file.
             Default = False.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1144,10 +1145,10 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-        ('vmax_r', np.float32), 
+        ('vmax_r', np.float32),
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
-        ('klypin_rs', np.float32), 
+        ('klypin_rs', np.float32),
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
         ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
@@ -1221,9 +1222,9 @@
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
-        
+
         ds = self.ds
-        # In order to read the binary data, we need to figure out which 
+        # In order to read the binary data, we need to figure out which
         # binary files belong to this output.
         basedir = os.path.dirname(self.out_list)
         s = self.out_list.split('_')[-1]
@@ -1523,12 +1524,14 @@
                 id += 1
 
         def haloCmp(h1, h2):
+            def cmp(a, b):
+                return (a > b) - (a < b)
             c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
                 return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
-        self._groups.sort(haloCmp)
+        self._groups.sort(key=cmp_to_key(haloCmp))
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
             if halo.id in self._max_dens:
@@ -1873,7 +1876,7 @@
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
     r"""Load a text file of halos.
-    
+
     Like LoadHaloes, but when all that is available is a plain
     text file. This assumes the text file has the 3-positions of halos
     along with a radius. The halo objects created are spheres.
@@ -1882,7 +1885,7 @@
     ----------
     fname : String
         The name of the text file to read in.
-    
+
     columns : dict
         A dict listing the column name : column number pairs for data
         in the text file. It is zero-based (like Python).
@@ -1890,7 +1893,7 @@
         Any column name outside of ['x', 'y', 'z', 'r'] will be attached
         to each halo object in the supplementary dict 'supp'. See
         example.
-    
+
     comment : String
         If the first character of a line is equal to this, the line is
         skipped. Default = "#".
@@ -1915,7 +1918,7 @@
     Parameters
     ----------
     fname : String
-        The name of the Rockstar file to read in. Default = 
+        The name of the Rockstar file to read in. Default =
         "rockstar_halos/out_0.list'.
 
     Examples

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -20,7 +20,8 @@
 from yt.fields.derived_field import \
     ValidateSpatial
 from yt.funcs import mylog
-    
+from yt.extern.six import string_types
+
 from .clump_info_items import \
     clump_info_registry
 from .clump_validators import \
@@ -268,7 +269,7 @@
 
 def write_clump_index(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     for q in range(level):
@@ -285,7 +286,7 @@
 
 def write_clumps(clump, level, fh):
     top = False
-    if not isinstance(fh, file):
+    if isinstance(fh, string_types):
         fh = open(fh, "w")
         top = True
     if ((clump.children is None) or (len(clump.children) == 0)):

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -130,7 +130,7 @@
     def __new__(cls, outputs, *args, **kwargs):
         if isinstance(outputs, string_types):
             outputs = get_filenames_from_glob_pattern(outputs)
-        ret = super(DatasetSeries, cls).__new__(cls, *args, **kwargs)
+        ret = super(DatasetSeries, cls).__new__(cls)
         try:
             ret._pre_outputs = outputs[:]
         except TypeError:

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -76,7 +76,7 @@
         for key, val in self._handle.attrs.items():
             if key.startswith('component_'):
                 comp_number = int(re.match('component_(\d+)', key).groups()[0])
-                field_dict[val] = comp_number
+                field_dict[val.decode('utf-8')] = comp_number
         self._field_dict = field_dict
         return self._field_dict
 

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -32,7 +32,7 @@
     def _read_particle_coords(self, chunks, ptf):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")
         z = np.ones(x.shape)
@@ -43,7 +43,7 @@
     def _read_particle_fields(self, chunks, ptf, selector):
         pdata = self.ds._handle[self.ds.first_image].data
         assert(len(ptf) == 1)
-        ptype = ptf.keys()[0]
+        ptype = list(ptf.keys())[0]
         field_list = ptf[ptype]
         x = np.asarray(pdata.field("X"), dtype="=f8")
         y = np.asarray(pdata.field("Y"), dtype="=f8")

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -12,14 +12,18 @@
 
 import numpy as np
 import base64
-from yt.extern.six.moves import StringIO
+from yt.extern.six import PY3
 from yt.fields.derived_field import ValidateSpatial
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
-
+if PY3:
+    from io import BytesIO as IO
+else:
+    from yt.extern.six.moves import StringIO as IO
+    
 import os
 
 def _make_counts(emin, emax):
@@ -255,12 +259,12 @@
 
     def _repr_html_(self):
         ret = ''
-        for k, v in self.plots.iteritems():
+        for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)
-            f = StringIO()
+            f = IO()
             canvas.print_figure(f)
             f.seek(0)
-            img = base64.b64encode(f.read())
+            img = base64.b64encode(f.read()).decode()
             ret += r'<img style="max-width:100%%;max-height:100%%;" ' \
                    r'src="data:image/png;base64,%s"><br>' % img
         return ret

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -39,7 +39,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
@@ -57,7 +57,7 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -70,7 +70,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -88,7 +88,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files, key=lambda x: x.filename):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -20,7 +20,12 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 import yt.utilities.fortran_utils as fpu
-from yt.extern.six.moves import cStringIO
+from yt.extern.six import PY3
+
+if PY3:
+    from io import BytesIO as IO
+else:
+    from cStringIO import StringIO as IO
 
 class IOHandlerRAMSES(BaseIOHandler):
     _dataset_type = "ramses"
@@ -37,7 +42,7 @@
                 f = open(subset.domain.hydro_fn, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                content = cStringIO(f.read())
+                content = IO(f.read())
                 rv = subset.fill(content, fields, selector)
                 for ft, f in fields:
                     d = rv.pop(f)

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/frontends/rockstar/io.py
--- a/yt/frontends/rockstar/io.py
+++ b/yt/frontends/rockstar/io.py
@@ -28,6 +28,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+from operator import attrgetter
 
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
@@ -45,12 +46,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -66,11 +66,11 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(ptf.keys()[0] == "halos")
+        assert(list(ptf.keys())[0] == "halos")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
+        for data_file in sorted(data_files,key=attrgetter("filename")):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -122,7 +122,7 @@
         for key in fields:
             if key not in exclude_fields:
                 if hasattr(img_data[key], "units"):
-                    self.field_units[key] = str(img_data[key].units)
+                    self.field_units[key] = img_data[key].units
                 else:
                     self.field_units[key] = "dimensionless"
                 mylog.info("Making a FITS image of field %s" % key)

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -11,7 +11,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import numpy as np
-from yt.extern.six.moves import zip as izip
 
 import matplotlib
 import matplotlib.colors as cc
@@ -86,9 +85,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
-         'green':zip(_vs,_kamae_grn,_kamae_grn),
-         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':np.transpose([_vs,_kamae_red,_kamae_red]),
+         'green':np.transpose([_vs,_kamae_grn,_kamae_grn]),
+         'blue':np.transpose([_vs,_kamae_blu,_kamae_blu])}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -151,9 +150,9 @@
 _vs = np.linspace(0,1,256)
 for k,v in list(_cm.color_map_luts.items()):
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': zip(_vs,v[0],v[0]),
-                  'green': zip(_vs,v[1],v[1]),
-                  'blue': zip(_vs,v[2],v[2]) }
+        cdict = { 'red': np.transpose([_vs,v[0],v[0]]),
+                  'green': np.transpose([_vs,v[1],v[1]]),
+                  'blue': np.transpose([_vs,v[2],v[2]]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):
@@ -393,9 +392,9 @@
     #   Second number is the (0..1) number to interpolate to when coming *from below*
     #   Third number is the (0..1) number to interpolate to when coming *from above*
     _vs = np.linspace(0,1,256)
-    cdict = {'red':   zip(_vs, cmap[:,0], cmap[:,0]),
-             'green': zip(_vs, cmap[:,1], cmap[:,1]),
-             'blue':  zip(_vs, cmap[:,2], cmap[:,2])}
+    cdict = {'red':   np.transpose([_vs, cmap[:,0], cmap[:,0]]),
+             'green': np.transpose([_vs, cmap[:,1], cmap[:,1]]),
+             'blue':  np.transpose([_vs, cmap[:,2], cmap[:,2]])}
 
     if name is not None:
         add_cmap(name, cdict)

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -27,7 +27,6 @@
 import numpy as np
 import weakref
 import re
-import string
 
 class FixedResolutionBuffer(object):
     r"""
@@ -178,13 +177,13 @@
             pstr = m.string[m.start()+1:m.end()-1]
             segments = fname.split("_")
             for i,s in enumerate(segments):
-                segments[i] = string.capitalize(s)
+                segments[i] = s.capitalize()
                 if s == pstr:
                     ipstr = i
             element = segments[ipstr-1]
             roman = pnum2rom[pstr[1:]]
             label = element + '\ ' + roman + '\ ' + \
-                string.join(segments[ipstr+1:], '\ ')
+                '\ '.join(segments[ipstr+1:])
         else:
             label = fname
         return label

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -170,7 +170,7 @@
         bitmap_array = np.concatenate([bitmap_array.astype('uint8'),
                                        alpha_channel], axis=-1)
     if transpose:
-        bitmap_array = bitmap_array.swapaxes(0,1)
+        bitmap_array = bitmap_array.swapaxes(0,1).copy(order="C")
     if filename is not None:
         pw.write_png(bitmap_array, filename)
     else:

diff -r b7b8cdbad2505dea1391bddbe3ab6e5d65f2e9b6 -r ad366f1dd932031937b1a7b59937fa3f0a278ecf yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -222,7 +222,7 @@
             plot_spec = [plot_spec.copy() for p in profiles]
 
         ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)
-
+        
     @validate_plot
     def save(self, name=None, suffix=None):
         r"""
@@ -530,15 +530,20 @@
                 xma = xmax
             extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
             units = {p.x_field: str(p.x.units)}
+            if self.x_log is None:
+                logs = None
+            else:
+                logs = {p.x_field: self.x_log}
             for field in p.field_map.values():
                 units[field] = str(p.field_data[field].units)
             self.profiles[i] = \
                 create_profile(p.data_source, p.x_field,
                                n_bins=len(p.x_bins)-1,
-                               fields=p.field_map.values(),
+                               fields=list(p.field_map.values()),
                                weight_field=p.weight_field,
                                accumulation=p.accumulation,
                                fractional=p.fractional,
+                               logs=logs,
                                extrema=extrema, units=units)
         return self
 
@@ -1146,6 +1151,14 @@
         extrema = {p.x_field: ((xmin, str(p.x.units)), (xmax, str(p.x.units))),
                    p.y_field: ((p.y_bins.min(), str(p.y.units)),
                                (p.y_bins.max(), str(p.y.units)))}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1155,11 +1168,12 @@
         self.profile = create_profile(
             p.data_source,
             [p.x_field, p.y_field],
-            p.field_map.values(),
+            list(p.field_map.values()),
             n_bins=[len(p.x_bins)-1, len(p.y_bins)-1],
             weight_field=p.weight_field,
             units=units,
             extrema=extrema,
+            logs=logs,
             **additional_kwargs)
         for field in zunits:
             self.profile.set_field_unit(field, zunits[field])
@@ -1201,6 +1215,14 @@
         extrema = {p.x_field: ((p.x_bins.min(), str(p.x.units)),
                                (p.x_bins.max(), str(p.x.units))),
                    p.y_field: ((ymin, str(p.y.units)), (ymax, str(p.y.units)))}
+        if self.x_log is not None or self.y_log is not None:
+            logs = {}
+        else:
+            logs = None
+        if self.x_log is not None:
+            logs[p.x_field] = self.x_log
+        if self.y_log is not None:
+            logs[p.y_field] = self.y_log
         deposition = getattr(self.profile, "deposition", None)
         if deposition is None:
             additional_kwargs = {'accumulation': p.accumulation,
@@ -1210,11 +1232,12 @@
         self.profile = create_profile(
             p.data_source,
             [p.x_field, p.y_field],
-            p.field_map.values(),
+            list(p.field_map.values()),
             n_bins=[len(p.x_bins)-1, len(p.y_bins)-1],
             weight_field=p.weight_field,
             units=units,
             extrema=extrema,
+            logs=logs,
             **additional_kwargs)
         for field in zunits:
             self.profile.set_field_unit(field, zunits[field])

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list