[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 9 09:32:42 PDT 2015


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/70a4ac2df84d/
Changeset:   70a4ac2df84d
Branch:      yt
User:        jzuhone
Date:        2015-06-20 05:23:17+00:00
Summary:     Enabling the Athena frontend to read Fargo 3D data. Engaging in a massive simplification of the Python 2/3 string handling. I hate VTK.
Affected #:  2 files

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -35,6 +35,40 @@
 from yt.units.yt_array import YTQuantity
 from yt.utilities.decompose import \
     decompose_array, get_psize
+from sys import version
+
+def chk23(strin):
+    if version < '3':
+        return strin
+    else:
+        return strin.encode('utf-8')
+
+def str23(strin):
+    if version < '3':
+        return strin
+    else:
+        if isinstance(strin, list):
+            return [s.decode('utf-8') for s in strin]
+        else:
+            return strin.decode('utf-8')
+
+def check_readline(fl):
+    line = fl.readline()
+    chk = chk23("SCALARS")
+    if chk in line and not line.startswith(chk):
+        line = line[line.find(chk):]
+    chk = chk23("VECTORS")
+    if chk in line and not line.startswith(chk):
+        line = line[line.find(chk):]
+    return line
+
+def check_break(line):
+    splitup = line.strip().split()
+    do_break = chk23('SCALAR') in splitup
+    do_break = (chk23('VECTOR') in splitup) & do_break
+    do_break = (chk23('TABLE') in splitup) & do_break
+    do_break = (len(line) == 0) & do_break
+    return do_break
 
 def _get_convert(fname):
     def _conv(data):
@@ -77,92 +111,51 @@
         return "AthenaGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 def parse_line(line, grid):
-    from sys import version
     # grid is a dictionary
-    from sys import version
     splitup = line.strip().split()
-    if version < '3':
-        if "vtk" in splitup:
-            grid['vtk_version'] = splitup[-1]
-        elif "time=" in splitup:
-            time_index = splitup.index("time=")
-            grid['time'] = float(splitup[time_index+1].rstrip(','))
-            grid['level'] = int(splitup[time_index+3].rstrip(','))
-            grid['domain'] = int(splitup[time_index+5].rstrip(','))                        
-        elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
-        elif "ORIGIN" in splitup:
-            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
-        elif "SPACING" in splitup:
-            grid['dds'] = np.array(splitup[-3:]).astype('float64')
-        elif "CELL_DATA" in splitup:
-            grid["ncells"] = int(splitup[-1])
-        elif "SCALARS" in splitup:
-            field = splitup[1]
-            grid['read_field'] = field
-            grid['read_type'] = 'scalar'
-        elif "VECTORS" in splitup:
-            field = splitup[1]
-            grid['read_field'] = field
-            grid['read_type'] = 'vector'
-    else:
-        if b"vtk" in splitup:
-            grid['vtk_version'] = splitup[-1].decode('utf-8')
-        elif b"time=" in splitup:
-            time_index = splitup.index(b"time=")
-            field = splitup[time_index+1].decode('utf-8')
-            field = field.rstrip(',')
-            grid['time'] = float(field)
-            field = splitup[time_index+3].decode('utf-8')
-            field = field.rstrip(',')
-            grid['level'] = int(field)
-            field = splitup[time_index+5].decode('utf-8')
-            field = field.rstrip(',')
-            grid['domain'] = int(field)                        
-        elif b"DIMENSIONS" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['dimensions'] = np.array(field).astype('int')
-        elif b"ORIGIN" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['left_edge'] = np.array(field).astype('float64')
-        elif b"SPACING" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['dds'] = np.array(field).astype('float64')
-        elif b"CELL_DATA" in splitup:
-            grid["ncells"] = int(splitup[-1].decode('utf-8'))
-        elif b"SCALARS" in splitup:
-            field = splitup[1].decode('utf-8')
-            grid['read_field'] = field
-            grid['read_type'] = 'scalar'
-        elif b"VECTORS" in splitup:
-            field = splitup[1].decode('utf-8')
-            grid['read_field'] = field
-            grid['read_type'] = 'vector'
-
+    if chk23("vtk") in splitup:
+        grid['vtk_version'] = str23(splitup[-1])
+    elif chk23("time=") in splitup:
+        time_index = splitup.index(chk23("time="))
+        grid['time'] = float(str23(splitup[time_index+1]).rstrip(','))
+        grid['level'] = int(str23(splitup[time_index+3]).rstrip(','))
+        grid['domain'] = int(str23(splitup[time_index+5]).rstrip(','))
+    elif chk23("DIMENSIONS") in splitup:
+        grid['dimensions'] = np.array(str23(splitup[-3:])).astype('int')
+    elif chk23("ORIGIN") in splitup:
+        grid['left_edge'] = np.array(str23(splitup[-3:])).astype('float64')
+    elif chk23("SPACING") in splitup:
+        grid['dds'] = np.array(str23(splitup[-3:])).astype('float64')
+    elif chk23("CELL_DATA") in splitup or chk23("POINT_DATA") in splitup:
+        grid["ncells"] = int(str23(splitup[-1]))
+    elif chk23("SCALARS") in splitup:
+        field = str23(splitup[1])
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif chk23("VECTORS") in splitup:
+        field = str23(splitup[1])
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+    elif chk23("time") in splitup:
+        time_index = splitup.index(chk23("time"))
+        grid['time'] = float(str23(splitup[time_index+1]))
+    
 class AthenaHierarchy(GridIndex):
 
     grid = AthenaGrid
     _dataset_type='athena'
     _data_file = None
-    
+
     def __init__(self, ds, dataset_type='athena'):
-        from sys import version
         self.dataset = weakref.proxy(ds)
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
         self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
-        #self.directory = os.path.dirname(self.index_filename)
         if version < '3':
             self._fhandle = file(self.index_filename,'rb')
         else:
-            self._fhandle = open(self.index_filename,'rb')            
+            self._fhandle = open(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
 
         self._fhandle.close()
@@ -170,39 +163,19 @@
     def _detect_output_fields(self):
         field_map = {}
         f = open(self.index_filename,'rb')
-        from sys import version
-        def chk23(strin):
-            if version < '3':
-                return strin
-            else:
-                return strin.encode('utf-8')
-        def check_readline(fl):
-            line = fl.readline()
-            chk = chk23("SCALARS")
-            if chk in line and not line.startswith(chk):
-                line = line[line.find(chk):]
-            chk = chk23("VECTORS")
-            if chk in line and not line.startswith(chk):
-                line = line[line.find(chk):]
-            return line
         line = check_readline(f)
         chkwhile = chk23('')
         while line != chkwhile:
             splitup = line.strip().split()
             chkd = chk23("DIMENSIONS")
             chkc = chk23("CELL_DATA")
+            chkp = chk23("POINT_DATA")
             if chkd in splitup:
-                field = splitup[-3:]
-                if version >= '3':
-                    for i in range(0,len(field)):
-                        field[i] = field[i].decode('utf-8')
+                field = str23(splitup[-3:])
                 grid_dims = np.array(field).astype('int')
                 line = check_readline(f)
-            elif chkc in splitup:
-                if version < '3':
-                    grid_ncells = int(splitup[-1])
-                else:
-                    grid_ncells = int(splitup[-1].decode('utf-8'))
+            elif chkc in splitup or chkp in splitup:
+                grid_ncells = int(str23(splitup[-1]))
                 line = check_readline(f)
                 if np.prod(grid_dims) != grid_ncells:
                     grid_dims -= 1
@@ -221,32 +194,23 @@
             chks = chk23('SCALARS')
             chkv = chk23('VECTORS')
             if chks in line and chks not in splitup:
-                splitup = line[line.find(chks):].strip().split()
-                if version >='3':
-                    splitup = splitup.decode('utf-8')
+                splitup = str23(line[line.find(chks):].strip().split())
             if chkv in line and chkv not in splitup:
-                splitup = line[line.find(chkv):].strip().split()
-                if version >='3':
-                    splitup = splitup.decode('utf-8')
+                splitup = str23(line[line.find(chkv):].strip().split())
             if chks in splitup:
-                if version < '3':
-                    field = ("athena", splitup[1])
-                else:
-                    field = ("athena", splitup[1].decode('utf-8'))
+                field = ("athena", str23(splitup[1]))
+                dtype = str23(splitup[-1]).lower()
                 if not read_table:
                     line = check_readline(f) # Read the lookup table line
                     read_table = True
-                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                field_map[field] = ('scalar', f.tell() - read_table_offset, dtype)
                 read_table=False
-
             elif chkv in splitup:
-                if version < '3':
-                    field = splitup[1]
-                else:
-                    field = splitup[1].decode('utf-8')
+                field = str23(splitup[1])
+                dtype = str23(splitup[-1]).lower()
                 for ax in 'xyz':
                     field_map[("athena","%s_%s" % (field, ax))] =\
-                            ('vector', f.tell() - read_table_offset)
+                            ('vector', f.tell() - read_table_offset, dtype)
             line = check_readline(f)
 
         f.close()
@@ -266,13 +230,7 @@
         line = f.readline()
         while grid['read_field'] is None:
             parse_line(line, grid)
-            if "SCALAR" in line.strip().split():
-                break
-            if "VECTOR" in line.strip().split():
-                break
-            if 'TABLE' in line.strip().split():
-                break
-            if len(line) == 0: break
+            if check_break(line): break
             line = f.readline()
         f.close()
 
@@ -282,7 +240,7 @@
             grid['dimensions'] -= 1
             grid['dimensions'][grid['dimensions']==0]=1
         if np.prod(grid['dimensions']) != grid['ncells']:
-            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+            mylog.error('product of dimensions %i not equal to number of cells %i' %
                   (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
@@ -292,10 +250,10 @@
         if dataset_dir.endswith("id0"):
             dname = "id0/"+dname
             dataset_dir = dataset_dir[:-3]
-                        
+
         gridlistread = glob.glob(os.path.join(dataset_dir, 'id*/%s-id*%s' % (dname[4:-9],dname[-9:])))
         gridlistread.insert(0,self.index_filename)
-        if 'id0' in dname :
+        if 'id0' in dname:
             gridlistread += glob.glob(os.path.join(dataset_dir, 'id*/lev*/%s*-lev*%s' % (dname[4:-9],dname[-9:])))
         else :
             gridlistread += glob.glob(os.path.join(dataset_dir, 'lev*/%s*-lev*%s' % (dname[:-9],dname[-9:])))
@@ -318,26 +276,35 @@
             line = f.readline()
             while gridread['read_field'] is None:
                 parse_line(line, gridread)
-                if "SCALAR" in line.strip().split():
-                    break
-                if "VECTOR" in line.strip().split():
-                    break 
-                if 'TABLE' in line.strip().split():
-                    break
-                if len(line) == 0: break
+                splitup = line.strip().split()
+                if chk23('X_COORDINATES') in splitup:
+                    gridread['left_edge'] = np.zeros(3)
+                    gridread['dds'] = np.zeros(3)
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][0] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][0] = v[1]-v[0]
+                if chk23('Y_COORDINATES') in splitup:
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][1] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][1] = v[1]-v[0]
+                if chk23('Z_COORDINATES') in splitup:
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][2] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][2] = v[1]-v[0]
+                if check_break(line): break
                 line = f.readline()
             f.close()
-            levels[j] = gridread['level']
+            levels[j] = gridread.get('level', 0)
             glis[j,0] = gridread['left_edge'][0]
             glis[j,1] = gridread['left_edge'][1]
             glis[j,2] = gridread['left_edge'][2]
-            # It seems some datasets have a mismatch between ncells and 
+            # It seems some datasets have a mismatch between ncells and
             # the actual grid dimensions.
             if np.prod(gridread['dimensions']) != gridread['ncells']:
                 gridread['dimensions'] -= 1
                 gridread['dimensions'][gridread['dimensions']==0]=1
             if np.prod(gridread['dimensions']) != gridread['ncells']:
-                mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                mylog.error('product of dimensions %i not equal to number of cells %i' %
                       (np.prod(gridread['dimensions']), gridread['ncells']))
                 raise TypeError
             gdims[j,0] = gridread['dimensions'][0]
@@ -346,7 +313,7 @@
             # Setting dds=1 for non-active dimensions in 1D/2D datasets
             gridread['dds'][gridread['dimensions']==1] = 1.
             gdds[j,:] = gridread['dds']
-            
+
             j=j+1
 
         gres = glis + gdims*gdds
@@ -357,10 +324,10 @@
         new_dre = np.max(gres,axis=0)
         self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
         self.dataset.domain_width = \
-                (self.dataset.domain_right_edge - 
+                (self.dataset.domain_right_edge -
                  self.dataset.domain_left_edge)
         self.dataset.domain_center = \
-                0.5*(self.dataset.domain_left_edge + 
+                0.5*(self.dataset.domain_left_edge +
                      self.dataset.domain_right_edge)
         self.dataset.domain_dimensions = \
                 np.round(self.dataset.domain_width/gdds[0]).astype('int')
@@ -434,7 +401,6 @@
                                                         dx*self.grid_dimensions,
                                                         decimals=12),
                                                "code_length")
-        
         if self.dataset.dimensionality <= 2:
             self.grid_right_edge[:,2] = dre[2]
         if self.dataset.dimensionality == 1:
@@ -486,7 +452,8 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None, nprocs=1):
+                 units_override=None, nprocs=1,
+                 geometry="cartesian"):
         self.fluid_types += ("athena",)
         self.nprocs = nprocs
         if parameters is None:
@@ -509,6 +476,7 @@
             storage_filename = '%s.yt' % filename.split('/')[-1]
         self.storage_filename = storage_filename
         self.backup_filename = self.filename[:-4] + "_backup.gdf"
+        self.geometry = geometry
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
@@ -543,13 +511,22 @@
         line = self._handle.readline()
         while grid['read_field'] is None:
             parse_line(line, grid)
-            if "SCALAR" in line.strip().split():
-                break
-            if "VECTOR" in line.strip().split():
-                break
-            if 'TABLE' in line.strip().split():
-                break
-            if len(line) == 0: break
+            splitup = line.strip().split()
+            if chk23('X_COORDINATES') in splitup:
+                grid['left_edge'] = np.zeros(3)
+                grid['dds'] = np.zeros(3)
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][0] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][0] = v[1]-v[0]
+            if chk23('Y_COORDINATES') in splitup:
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][1] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][1] = v[1]-v[0]
+            if chk23('Z_COORDINATES') in splitup:
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][2] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][2] = v[1]-v[0]
+            if check_break(line): break
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
@@ -608,7 +585,6 @@
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
             self.parameters["Gamma"] = 5./3. 
-        self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 
     @classmethod

diff -r dc2467c4eae70b9f185adf2cba8f61b95f65fea0 -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -16,8 +16,11 @@
            BaseIOHandler
 import numpy as np
 from yt.funcs import mylog, defaultdict
+from .data_structures import chk23
 
-float_size = np.dtype(">f4").itemsize
+float_size = {"float":np.dtype(">f4").itemsize,
+              "double":np.dtype(">f8").itemsize}
+
 axis_list = ["_x","_y","_z"]
 
 class IOHandlerAthena(BaseIOHandler):
@@ -48,24 +51,28 @@
             grid0_ncells = np.prod(grid.index.grids[0].read_dims)
             read_table_offset = get_read_table_offset(f)
             for field in fields:
-                dtype, offsetr = grid.index._field_map[field]
+                ftype, offsetr, dtype = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
                 offset = int(offset) # Casting to be certain.
-                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size
+                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size[dtype]
                 xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
                 yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
                 f.seek(read_table_offset+offset+file_offset)
-                if dtype == 'scalar':
+                if dtype == 'float':
+                    dt = '>f4'
+                elif dtype == 'double':
+                    dt = '>f8'
+                if ftype == 'scalar':
                     f.seek(read_table_offset+offset+file_offset)
-                    v = np.fromfile(f, dtype='>f4',
+                    v = np.fromfile(f, dtype=dt,
                                     count=grid_ncells).reshape(read_dims,order='F')
-                if dtype == 'vector':
+                if ftype == 'vector':
                     vec_offset = axis_list.index(field[-1][-2:])
                     f.seek(read_table_offset+offset+3*file_offset)
-                    v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    v = np.fromfile(f, dtype=dt, count=3*grid_ncells)
                     v = v[vec_offset::3].reshape(read_dims,order='F')
                 if grid.ds.field_ordering == 1:
                     data[grid.id][field] = v[xread,yread,:].T.astype("float64")
@@ -108,11 +115,9 @@
     line = f.readline()
     while True:
         splitup = line.strip().split()
-        if version < '3':
-            chk = 'CELL_DATA'
-        else:
-            chk = b'CELL_DATA'
-        if chk in splitup:
+        chkc = chk23('CELL_DATA')
+        chkp = chk23('POINT_DATA')
+        if chkc in splitup or chkp in splitup:
             f.readline()
             read_table_offset = f.tell()
             break


https://bitbucket.org/yt_analysis/yt/commits/e2cbb4b47393/
Changeset:   e2cbb4b47393
Branch:      yt
User:        jzuhone
Date:        2015-06-20 05:35:18+00:00
Summary:     Fix setting Athena geometry. Fix Python 2/3-isms in the coordinate handlers.
Affected #:  4 files

diff -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f -r e2cbb4b47393461224a095bc81ab1a041825bd71 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -470,13 +470,13 @@
                                   "and will be removed in a future release. Use units_override instead.")
                     already_warned = True
                 units_override[k] = self.specified_parameters.pop(k)
+        self.geometry = geometry
         Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]
         self.storage_filename = storage_filename
         self.backup_filename = self.filename[:-4] + "_backup.gdf"
-        self.geometry = geometry
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.

diff -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f -r e2cbb4b47393461224a095bc81ab1a041825bd71 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -143,7 +143,7 @@
         rv = {self.axis_id['r']: ('theta', 'z'),
               self.axis_id['z']: ('x', 'y'),
               self.axis_id['theta']: ('r', 'z')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].upper()] = rv[i]
         self._image_axis_name = rv

diff -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f -r e2cbb4b47393461224a095bc81ab1a041825bd71 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -239,7 +239,7 @@
                  ('R', 'z'),
               self.axis_id['altitude']:
                  ('longitude', 'latitude')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].capitalize()] = rv[i]
         self._image_axis_name = rv

diff -r 70a4ac2df84d744e57bae8f1c42add6b12c1b26f -r e2cbb4b47393461224a095bc81ab1a041825bd71 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -186,7 +186,7 @@
         rv = {self.axis_id['r']: ('theta', 'phi'),
               self.axis_id['theta']: ('x / \\sin(\\theta)', 'y / \\sin(\\theta)'),
               self.axis_id['phi']: ('R', 'z')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].capitalize()] = rv[i]
         self._image_axis_name = rv


https://bitbucket.org/yt_analysis/yt/commits/bdff1527ffe5/
Changeset:   bdff1527ffe5
Branch:      yt
User:        jzuhone
Date:        2015-06-20 05:52:39+00:00
Summary:     This was working just fine before, and it was already documented this way, so we'll change it back
Affected #:  1 file

diff -r e2cbb4b47393461224a095bc81ab1a041825bd71 -r bdff1527ffe552f9d3c78e92c2ced9105159b08f yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -452,8 +452,7 @@
 
     def __init__(self, filename, dataset_type='athena',
                  storage_filename=None, parameters=None,
-                 units_override=None, nprocs=1,
-                 geometry="cartesian"):
+                 units_override=None, nprocs=1):
         self.fluid_types += ("athena",)
         self.nprocs = nprocs
         if parameters is None:
@@ -470,7 +469,6 @@
                                   "and will be removed in a future release. Use units_override instead.")
                     already_warned = True
                 units_override[k] = self.specified_parameters.pop(k)
-        self.geometry = geometry
         Dataset.__init__(self, filename, dataset_type, units_override=units_override)
         self.filename = filename
         if storage_filename is None:
@@ -584,7 +582,8 @@
         if "gamma" in self.specified_parameters:
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
-            self.parameters["Gamma"] = 5./3. 
+            self.parameters["Gamma"] = 5./3.
+        self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 
     @classmethod


https://bitbucket.org/yt_analysis/yt/commits/0f49db3f1197/
Changeset:   0f49db3f1197
Branch:      yt
User:        jzuhone
Date:        2015-06-20 05:55:25+00:00
Summary:     Make a note of this in the docs
Affected #:  1 file

diff -r bdff1527ffe552f9d3c78e92c2ced9105159b08f -r 0f49db3f11976bdbe5d5dd2458beea8dcbfd89bc doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,7 +104,8 @@
 -----------
 
 Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+ZuHone. Both uniform grid and SMR datasets are supported. yt also 
+recognizes Fargo3D data written to VTK files as Athena data. 
 
 Loading Athena datasets is slightly different depending on whether
 your dataset came from a serial or a parallel run. If the data came


https://bitbucket.org/yt_analysis/yt/commits/c90d73b3658b/
Changeset:   c90d73b3658b
Branch:      yt
User:        jzuhone
Date:        2015-06-20 05:56:36+00:00
Summary:     Let's be more cautious
Affected #:  1 file

diff -r 0f49db3f11976bdbe5d5dd2458beea8dcbfd89bc -r c90d73b3658b1fff1b36bb6af2735f01634fff2f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,8 +104,11 @@
 -----------
 
 Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported. yt also 
-recognizes Fargo3D data written to VTK files as Athena data. 
+ZuHone. Both uniform grid and SMR datasets are supported. 
+
+.. note: 
+   yt also recognizes Fargo3D data written to VTK files as 
+   Athena data, but support for Fargo3D data is preliminary. 
 
 Loading Athena datasets is slightly different depending on whether
 your dataset came from a serial or a parallel run. If the data came


https://bitbucket.org/yt_analysis/yt/commits/18284d0aa5f0/
Changeset:   18284d0aa5f0
Branch:      yt
User:        jzuhone
Date:        2015-06-20 15:24:59+00:00
Summary:     Using six for version discrimination
Affected #:  2 files

diff -r c90d73b3658b1fff1b36bb6af2735f01634fff2f -r 18284d0aa5f08c0bb2152a354711d606c3ebcbe0 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -24,27 +24,25 @@
     GridIndex
 from yt.data_objects.static_output import \
            Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
 from yt.geometry.geometry_handler import \
     YTDataChunk
+from yt.extern.six import PY3
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
 from yt.utilities.decompose import \
     decompose_array, get_psize
-from sys import version
 
 def chk23(strin):
-    if version < '3':
+    if PY3:
         return strin
     else:
         return strin.encode('utf-8')
 
 def str23(strin):
-    if version < '3':
+    if PY3:
         return strin
     else:
         if isinstance(strin, list):
@@ -152,7 +150,7 @@
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
         self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
-        if version < '3':
+        if PY3:
             self._fhandle = file(self.index_filename,'rb')
         else:
             self._fhandle = open(self.index_filename,'rb')

diff -r c90d73b3658b1fff1b36bb6af2735f01634fff2f -r 18284d0aa5f08c0bb2152a354711d606c3ebcbe0 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -111,7 +111,6 @@
         return rv
 
 def get_read_table_offset(f):
-    from sys import version
     line = f.readline()
     while True:
         splitup = line.strip().split()


https://bitbucket.org/yt_analysis/yt/commits/8a24aa8bb9e8/
Changeset:   8a24aa8bb9e8
Branch:      yt
User:        chummels
Date:        2015-07-09 16:32:31+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1619)

Supporting Fargo3D data, cleaning up Athena frontend
Affected #:  6 files

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,7 +104,11 @@
 -----------
 
 Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+ZuHone. Both uniform grid and SMR datasets are supported. 
+
+.. note: 
+   yt also recognizes Fargo3D data written to VTK files as 
+   Athena data, but support for Fargo3D data is preliminary. 
 
 Loading Athena datasets is slightly different depending on whether
 your dataset came from a serial or a parallel run. If the data came

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -24,18 +24,50 @@
     GridIndex
 from yt.data_objects.static_output import \
            Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
 from yt.geometry.geometry_handler import \
     YTDataChunk
+from yt.extern.six import PY3
 
 from .fields import AthenaFieldInfo
 from yt.units.yt_array import YTQuantity
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+def chk23(strin):
+    if PY3:
+        return strin
+    else:
+        return strin.encode('utf-8')
+
+def str23(strin):
+    if PY3:
+        return strin
+    else:
+        if isinstance(strin, list):
+            return [s.decode('utf-8') for s in strin]
+        else:
+            return strin.decode('utf-8')
+
+def check_readline(fl):
+    line = fl.readline()
+    chk = chk23("SCALARS")
+    if chk in line and not line.startswith(chk):
+        line = line[line.find(chk):]
+    chk = chk23("VECTORS")
+    if chk in line and not line.startswith(chk):
+        line = line[line.find(chk):]
+    return line
+
+def check_break(line):
+    splitup = line.strip().split()
+    do_break = chk23('SCALAR') in splitup
+    do_break = (chk23('VECTOR') in splitup) & do_break
+    do_break = (chk23('TABLE') in splitup) & do_break
+    do_break = (len(line) == 0) & do_break
+    return do_break
+
 def _get_convert(fname):
     def _conv(data):
         return data.convert(fname)
@@ -77,92 +109,51 @@
         return "AthenaGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 def parse_line(line, grid):
-    from sys import version
     # grid is a dictionary
-    from sys import version
     splitup = line.strip().split()
-    if version < '3':
-        if "vtk" in splitup:
-            grid['vtk_version'] = splitup[-1]
-        elif "time=" in splitup:
-            time_index = splitup.index("time=")
-            grid['time'] = float(splitup[time_index+1].rstrip(','))
-            grid['level'] = int(splitup[time_index+3].rstrip(','))
-            grid['domain'] = int(splitup[time_index+5].rstrip(','))                        
-        elif "DIMENSIONS" in splitup:
-            grid['dimensions'] = np.array(splitup[-3:]).astype('int')
-        elif "ORIGIN" in splitup:
-            grid['left_edge'] = np.array(splitup[-3:]).astype('float64')
-        elif "SPACING" in splitup:
-            grid['dds'] = np.array(splitup[-3:]).astype('float64')
-        elif "CELL_DATA" in splitup:
-            grid["ncells"] = int(splitup[-1])
-        elif "SCALARS" in splitup:
-            field = splitup[1]
-            grid['read_field'] = field
-            grid['read_type'] = 'scalar'
-        elif "VECTORS" in splitup:
-            field = splitup[1]
-            grid['read_field'] = field
-            grid['read_type'] = 'vector'
-    else:
-        if b"vtk" in splitup:
-            grid['vtk_version'] = splitup[-1].decode('utf-8')
-        elif b"time=" in splitup:
-            time_index = splitup.index(b"time=")
-            field = splitup[time_index+1].decode('utf-8')
-            field = field.rstrip(',')
-            grid['time'] = float(field)
-            field = splitup[time_index+3].decode('utf-8')
-            field = field.rstrip(',')
-            grid['level'] = int(field)
-            field = splitup[time_index+5].decode('utf-8')
-            field = field.rstrip(',')
-            grid['domain'] = int(field)                        
-        elif b"DIMENSIONS" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['dimensions'] = np.array(field).astype('int')
-        elif b"ORIGIN" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['left_edge'] = np.array(field).astype('float64')
-        elif b"SPACING" in splitup:
-            field = splitup[-3:]
-            for i in range(0,len(field)):
-                field[i] = field[i].decode('utf-8')
-            grid['dds'] = np.array(field).astype('float64')
-        elif b"CELL_DATA" in splitup:
-            grid["ncells"] = int(splitup[-1].decode('utf-8'))
-        elif b"SCALARS" in splitup:
-            field = splitup[1].decode('utf-8')
-            grid['read_field'] = field
-            grid['read_type'] = 'scalar'
-        elif b"VECTORS" in splitup:
-            field = splitup[1].decode('utf-8')
-            grid['read_field'] = field
-            grid['read_type'] = 'vector'
-
+    if chk23("vtk") in splitup:
+        grid['vtk_version'] = str23(splitup[-1])
+    elif chk23("time=") in splitup:
+        time_index = splitup.index(chk23("time="))
+        grid['time'] = float(str23(splitup[time_index+1]).rstrip(','))
+        grid['level'] = int(str23(splitup[time_index+3]).rstrip(','))
+        grid['domain'] = int(str23(splitup[time_index+5]).rstrip(','))
+    elif chk23("DIMENSIONS") in splitup:
+        grid['dimensions'] = np.array(str23(splitup[-3:])).astype('int')
+    elif chk23("ORIGIN") in splitup:
+        grid['left_edge'] = np.array(str23(splitup[-3:])).astype('float64')
+    elif chk23("SPACING") in splitup:
+        grid['dds'] = np.array(str23(splitup[-3:])).astype('float64')
+    elif chk23("CELL_DATA") in splitup or chk23("POINT_DATA") in splitup:
+        grid["ncells"] = int(str23(splitup[-1]))
+    elif chk23("SCALARS") in splitup:
+        field = str23(splitup[1])
+        grid['read_field'] = field
+        grid['read_type'] = 'scalar'
+    elif chk23("VECTORS") in splitup:
+        field = str23(splitup[1])
+        grid['read_field'] = field
+        grid['read_type'] = 'vector'
+    elif chk23("time") in splitup:
+        time_index = splitup.index(chk23("time"))
+        grid['time'] = float(str23(splitup[time_index+1]))
+    
 class AthenaHierarchy(GridIndex):
 
     grid = AthenaGrid
     _dataset_type='athena'
     _data_file = None
-    
+
     def __init__(self, ds, dataset_type='athena'):
-        from sys import version
         self.dataset = weakref.proxy(ds)
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
         self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
-        #self.directory = os.path.dirname(self.index_filename)
-        if version < '3':
+        if PY3:
             self._fhandle = file(self.index_filename,'rb')
         else:
-            self._fhandle = open(self.index_filename,'rb')            
+            self._fhandle = open(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
 
         self._fhandle.close()
@@ -170,39 +161,19 @@
     def _detect_output_fields(self):
         field_map = {}
         f = open(self.index_filename,'rb')
-        from sys import version
-        def chk23(strin):
-            if version < '3':
-                return strin
-            else:
-                return strin.encode('utf-8')
-        def check_readline(fl):
-            line = fl.readline()
-            chk = chk23("SCALARS")
-            if chk in line and not line.startswith(chk):
-                line = line[line.find(chk):]
-            chk = chk23("VECTORS")
-            if chk in line and not line.startswith(chk):
-                line = line[line.find(chk):]
-            return line
         line = check_readline(f)
         chkwhile = chk23('')
         while line != chkwhile:
             splitup = line.strip().split()
             chkd = chk23("DIMENSIONS")
             chkc = chk23("CELL_DATA")
+            chkp = chk23("POINT_DATA")
             if chkd in splitup:
-                field = splitup[-3:]
-                if version >= '3':
-                    for i in range(0,len(field)):
-                        field[i] = field[i].decode('utf-8')
+                field = str23(splitup[-3:])
                 grid_dims = np.array(field).astype('int')
                 line = check_readline(f)
-            elif chkc in splitup:
-                if version < '3':
-                    grid_ncells = int(splitup[-1])
-                else:
-                    grid_ncells = int(splitup[-1].decode('utf-8'))
+            elif chkc in splitup or chkp in splitup:
+                grid_ncells = int(str23(splitup[-1]))
                 line = check_readline(f)
                 if np.prod(grid_dims) != grid_ncells:
                     grid_dims -= 1
@@ -221,32 +192,23 @@
             chks = chk23('SCALARS')
             chkv = chk23('VECTORS')
             if chks in line and chks not in splitup:
-                splitup = line[line.find(chks):].strip().split()
-                if version >='3':
-                    splitup = splitup.decode('utf-8')
+                splitup = str23(line[line.find(chks):].strip().split())
             if chkv in line and chkv not in splitup:
-                splitup = line[line.find(chkv):].strip().split()
-                if version >='3':
-                    splitup = splitup.decode('utf-8')
+                splitup = str23(line[line.find(chkv):].strip().split())
             if chks in splitup:
-                if version < '3':
-                    field = ("athena", splitup[1])
-                else:
-                    field = ("athena", splitup[1].decode('utf-8'))
+                field = ("athena", str23(splitup[1]))
+                dtype = str23(splitup[-1]).lower()
                 if not read_table:
                     line = check_readline(f) # Read the lookup table line
                     read_table = True
-                field_map[field] = ('scalar', f.tell() - read_table_offset)
+                field_map[field] = ('scalar', f.tell() - read_table_offset, dtype)
                 read_table=False
-
             elif chkv in splitup:
-                if version < '3':
-                    field = splitup[1]
-                else:
-                    field = splitup[1].decode('utf-8')
+                field = str23(splitup[1])
+                dtype = str23(splitup[-1]).lower()
                 for ax in 'xyz':
                     field_map[("athena","%s_%s" % (field, ax))] =\
-                            ('vector', f.tell() - read_table_offset)
+                            ('vector', f.tell() - read_table_offset, dtype)
             line = check_readline(f)
 
         f.close()
@@ -266,13 +228,7 @@
         line = f.readline()
         while grid['read_field'] is None:
             parse_line(line, grid)
-            if "SCALAR" in line.strip().split():
-                break
-            if "VECTOR" in line.strip().split():
-                break
-            if 'TABLE' in line.strip().split():
-                break
-            if len(line) == 0: break
+            if check_break(line): break
             line = f.readline()
         f.close()
 
@@ -282,7 +238,7 @@
             grid['dimensions'] -= 1
             grid['dimensions'][grid['dimensions']==0]=1
         if np.prod(grid['dimensions']) != grid['ncells']:
-            mylog.error('product of dimensions %i not equal to number of cells %i' % 
+            mylog.error('product of dimensions %i not equal to number of cells %i' %
                   (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
@@ -292,10 +248,10 @@
         if dataset_dir.endswith("id0"):
             dname = "id0/"+dname
             dataset_dir = dataset_dir[:-3]
-                        
+
         gridlistread = glob.glob(os.path.join(dataset_dir, 'id*/%s-id*%s' % (dname[4:-9],dname[-9:])))
         gridlistread.insert(0,self.index_filename)
-        if 'id0' in dname :
+        if 'id0' in dname:
             gridlistread += glob.glob(os.path.join(dataset_dir, 'id*/lev*/%s*-lev*%s' % (dname[4:-9],dname[-9:])))
         else :
             gridlistread += glob.glob(os.path.join(dataset_dir, 'lev*/%s*-lev*%s' % (dname[:-9],dname[-9:])))
@@ -318,26 +274,35 @@
             line = f.readline()
             while gridread['read_field'] is None:
                 parse_line(line, gridread)
-                if "SCALAR" in line.strip().split():
-                    break
-                if "VECTOR" in line.strip().split():
-                    break 
-                if 'TABLE' in line.strip().split():
-                    break
-                if len(line) == 0: break
+                splitup = line.strip().split()
+                if chk23('X_COORDINATES') in splitup:
+                    gridread['left_edge'] = np.zeros(3)
+                    gridread['dds'] = np.zeros(3)
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][0] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][0] = v[1]-v[0]
+                if chk23('Y_COORDINATES') in splitup:
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][1] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][1] = v[1]-v[0]
+                if chk23('Z_COORDINATES') in splitup:
+                    v = np.fromfile(f, dtype='>f8', count=2)
+                    gridread['left_edge'][2] = v[0]-0.5*(v[1]-v[0])
+                    gridread['dds'][2] = v[1]-v[0]
+                if check_break(line): break
                 line = f.readline()
             f.close()
-            levels[j] = gridread['level']
+            levels[j] = gridread.get('level', 0)
             glis[j,0] = gridread['left_edge'][0]
             glis[j,1] = gridread['left_edge'][1]
             glis[j,2] = gridread['left_edge'][2]
-            # It seems some datasets have a mismatch between ncells and 
+            # It seems some datasets have a mismatch between ncells and
             # the actual grid dimensions.
             if np.prod(gridread['dimensions']) != gridread['ncells']:
                 gridread['dimensions'] -= 1
                 gridread['dimensions'][gridread['dimensions']==0]=1
             if np.prod(gridread['dimensions']) != gridread['ncells']:
-                mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                mylog.error('product of dimensions %i not equal to number of cells %i' %
                       (np.prod(gridread['dimensions']), gridread['ncells']))
                 raise TypeError
             gdims[j,0] = gridread['dimensions'][0]
@@ -346,7 +311,7 @@
             # Setting dds=1 for non-active dimensions in 1D/2D datasets
             gridread['dds'][gridread['dimensions']==1] = 1.
             gdds[j,:] = gridread['dds']
-            
+
             j=j+1
 
         gres = glis + gdims*gdds
@@ -357,10 +322,10 @@
         new_dre = np.max(gres,axis=0)
         self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
         self.dataset.domain_width = \
-                (self.dataset.domain_right_edge - 
+                (self.dataset.domain_right_edge -
                  self.dataset.domain_left_edge)
         self.dataset.domain_center = \
-                0.5*(self.dataset.domain_left_edge + 
+                0.5*(self.dataset.domain_left_edge +
                      self.dataset.domain_right_edge)
         self.dataset.domain_dimensions = \
                 np.round(self.dataset.domain_width/gdds[0]).astype('int')
@@ -434,7 +399,6 @@
                                                         dx*self.grid_dimensions,
                                                         decimals=12),
                                                "code_length")
-        
         if self.dataset.dimensionality <= 2:
             self.grid_right_edge[:,2] = dre[2]
         if self.dataset.dimensionality == 1:
@@ -543,13 +507,22 @@
         line = self._handle.readline()
         while grid['read_field'] is None:
             parse_line(line, grid)
-            if "SCALAR" in line.strip().split():
-                break
-            if "VECTOR" in line.strip().split():
-                break
-            if 'TABLE' in line.strip().split():
-                break
-            if len(line) == 0: break
+            splitup = line.strip().split()
+            if chk23('X_COORDINATES') in splitup:
+                grid['left_edge'] = np.zeros(3)
+                grid['dds'] = np.zeros(3)
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][0] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][0] = v[1]-v[0]
+            if chk23('Y_COORDINATES') in splitup:
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][1] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][1] = v[1]-v[0]
+            if chk23('Z_COORDINATES') in splitup:
+                v = np.fromfile(self._handle, dtype='>f8', count=2)
+                grid['left_edge'][2] = v[0]-0.5*(v[1]-v[0])
+                grid['dds'][2] = v[1]-v[0]
+            if check_break(line): break
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
@@ -607,7 +580,7 @@
         if "gamma" in self.specified_parameters:
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
-            self.parameters["Gamma"] = 5./3. 
+            self.parameters["Gamma"] = 5./3.
         self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -16,8 +16,11 @@
            BaseIOHandler
 import numpy as np
 from yt.funcs import mylog, defaultdict
+from .data_structures import chk23
 
-float_size = np.dtype(">f4").itemsize
+float_size = {"float":np.dtype(">f4").itemsize,
+              "double":np.dtype(">f8").itemsize}
+
 axis_list = ["_x","_y","_z"]
 
 class IOHandlerAthena(BaseIOHandler):
@@ -48,24 +51,28 @@
             grid0_ncells = np.prod(grid.index.grids[0].read_dims)
             read_table_offset = get_read_table_offset(f)
             for field in fields:
-                dtype, offsetr = grid.index._field_map[field]
+                ftype, offsetr, dtype = grid.index._field_map[field]
                 if grid_ncells != grid0_ncells:
                     offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
                 if grid_ncells == grid0_ncells:
                     offset = offsetr
                 offset = int(offset) # Casting to be certain.
-                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size
+                file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size[dtype]
                 xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])
                 yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])
                 f.seek(read_table_offset+offset+file_offset)
-                if dtype == 'scalar':
+                if dtype == 'float':
+                    dt = '>f4'
+                elif dtype == 'double':
+                    dt = '>f8'
+                if ftype == 'scalar':
                     f.seek(read_table_offset+offset+file_offset)
-                    v = np.fromfile(f, dtype='>f4',
+                    v = np.fromfile(f, dtype=dt,
                                     count=grid_ncells).reshape(read_dims,order='F')
-                if dtype == 'vector':
+                if ftype == 'vector':
                     vec_offset = axis_list.index(field[-1][-2:])
                     f.seek(read_table_offset+offset+3*file_offset)
-                    v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    v = np.fromfile(f, dtype=dt, count=3*grid_ncells)
                     v = v[vec_offset::3].reshape(read_dims,order='F')
                 if grid.ds.field_ordering == 1:
                     data[grid.id][field] = v[xread,yread,:].T.astype("float64")
@@ -104,15 +111,12 @@
         return rv
 
 def get_read_table_offset(f):
-    from sys import version
     line = f.readline()
     while True:
         splitup = line.strip().split()
-        if version < '3':
-            chk = 'CELL_DATA'
-        else:
-            chk = b'CELL_DATA'
-        if chk in splitup:
+        chkc = chk23('CELL_DATA')
+        chkp = chk23('POINT_DATA')
+        if chkc in splitup or chkp in splitup:
             f.readline()
             read_table_offset = f.tell()
             break

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -143,7 +143,7 @@
         rv = {self.axis_id['r']: ('theta', 'z'),
               self.axis_id['z']: ('x', 'y'),
               self.axis_id['theta']: ('r', 'z')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].upper()] = rv[i]
         self._image_axis_name = rv

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -239,7 +239,7 @@
                  ('R', 'z'),
               self.axis_id['altitude']:
                  ('longitude', 'latitude')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].capitalize()] = rv[i]
         self._image_axis_name = rv

diff -r 4d383ff42e245a683b65a87e648cc7276e927a04 -r 8a24aa8bb9e815acc4737f5d64d74be75472fb10 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -186,7 +186,7 @@
         rv = {self.axis_id['r']: ('theta', 'phi'),
               self.axis_id['theta']: ('x / \\sin(\\theta)', 'y / \\sin(\\theta)'),
               self.axis_id['phi']: ('R', 'z')}
-        for i in rv.keys():
+        for i in list(rv.keys()):
             rv[self.axis_name[i]] = rv[i]
             rv[self.axis_name[i].capitalize()] = rv[i]
         self._image_axis_name = rv

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list