[Yt-svn] commit/yt: 8 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Nov 10 07:28:43 PST 2011


8 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/6cee606e6753/
changeset:   6cee606e6753
branch:      yt
user:        MatthewTurk
date:        2011-11-02 20:26:32
summary:     reshaping with order='F' is the same as the previous operation
affected #:  1 file

diff -r 16e8d749a806df28e253f4ca4d6c8275cb99a8e8 -r 6cee606e67539cb1fb38b5e5584741defb76b9be yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -125,7 +125,7 @@
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
         field = na.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # @todo: we can/should also check against the max and min in the header
         # file



https://bitbucket.org/yt_analysis/yt/changeset/f88cc9e384e6/
changeset:   f88cc9e384e6
branch:      yt
user:        MatthewTurk
date:        2011-11-02 20:48:32
summary:     For all grids that are fully contained in an object, where there are no child
grids, this will provide a substantial speedup.  For instance, on a Nyx unigrid
run, I saw a speedup of 66% for calculating the Extrema of Density.  This
should help out Unigrid calculations the most.  Ideally we would move to a
handful of specialized functions, too, with better understanding of geometry.
affected #:  1 file

diff -r 6cee606e67539cb1fb38b5e5584741defb76b9be -r f88cc9e384e6aadd32f23150329e31d81275eb20 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2352,11 +2352,13 @@
             f = grid[field]
             return na.array([f[i,:][pointI] for i in range(3)])
         else:
+            tr = grid[field]
+            if tr.size == 1: # dx, dy, dz, cellvolume
+                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+            if len(grid.Children) == 0 and self._is_fully_enclosed(grid):
+                return tr.ravel()
             pointI = self._get_point_indices(grid)
-            if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions, dtype='float64')
-                return t[pointI].ravel()
-            return grid[field][pointI].ravel()
+            return tr[pointI].ravel()
 
     def _flush_data_to_grids(self, field, default_val, dtype='float32'):
         """



https://bitbucket.org/yt_analysis/yt/changeset/461d2821835a/
changeset:   461d2821835a
branch:      yt
user:        MatthewTurk
date:        2011-11-02 22:18:24
summary:     A fix for in case grids overlap.
affected #:  1 file

diff -r f88cc9e384e6aadd32f23150329e31d81275eb20 -r 461d2821835a742ad7db41ded6d18c47aa7c4e52 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2355,7 +2355,8 @@
             tr = grid[field]
             if tr.size == 1: # dx, dy, dz, cellvolume
                 tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
-            if len(grid.Children) == 0 and self._is_fully_enclosed(grid):
+            if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
+                and self._is_fully_enclosed(grid):
                 return tr.ravel()
             pointI = self._get_point_indices(grid)
             return tr[pointI].ravel()



https://bitbucket.org/yt_analysis/yt/changeset/d169a86ac9ff/
changeset:   d169a86ac9ff
branch:      yt
user:        MatthewTurk
date:        2011-11-02 22:25:27
summary:     Backporting IO fix for Nyx back to Orion.
affected #:  1 file

diff -r 461d2821835a742ad7db41ded6d18c47aa7c4e52 -r d169a86ac9ff74bd79383681634519b03dbed6fd yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -111,7 +111,7 @@
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
         field = na.fromfile(inFile,count=nElements,dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file
 



https://bitbucket.org/yt_analysis/yt/changeset/b005b512b743/
changeset:   b005b512b743
branch:      yt
user:        MatthewTurk
date:        2011-11-02 23:14:09
summary:     Adding a new read_and_seek option for reading with an offset to the start of a
chunk, then reading a line, then seeking relative to that chunk.
affected #:  2 files

diff -r d169a86ac9ff74bd79383681634519b03dbed6fd -r b005b512b743929946b196e312e009d9588af6c8 yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -28,7 +28,7 @@
 
 import os
 import numpy as na
-from yt.utilities.amr_utils import read_castro_particles
+from yt.utilities.amr_utils import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
 from definitions import fab_header_pattern, nyx_particle_field_names, \
@@ -57,80 +57,24 @@
         if field in nyx_particle_field_names:
             return self._read_particle_field(grid, field)
         filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen, 'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        """
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            header_re = re.compile(fab_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = \
-                headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian.
-            # @todo: this code is ugly.
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i' % bytesPerReal)  # always a floating point
-
-            # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." % grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." % grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." % grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." % grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." % grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-        else:
-        """
-        start = grid.start_index
-        stop = grid.stop_index
-        dtype = grid.hierarchy._dtype
+        offset1 = grid._offset[field]
+        # one field has nElements * bytesPerReal bytes and is located
+        # nElements * bytesPerReal * field_index from the offset location
         bytesPerReal = grid.hierarchy._bytesPerReal
 
+        fieldname = yt_to_nyx_fields_dict.get(field, field)
+        field_index = grid.field_indexes[fieldname]
         nElements = grid.ActiveDimensions.prod()
+        offset2 = int(nElements*bytesPerReal*field_index)
 
-        # one field has nElements * bytesPerReal bytes and is located
-        # nElements * bytesPerReal * field_index from the offset location
-        if yt_to_nyx_fields_dict.has_key(field):
-            fieldname = yt_to_nyx_fields_dict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
+        dtype = grid.hierarchy._dtype
+        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
         # @todo: we can/should also check against the max and min in the header
         # file
 
-        inFile.close()
         return field
 
     def _read_data_slice(self, grid, field, axis, coord):


diff -r d169a86ac9ff74bd79383681634519b03dbed6fd -r b005b512b743929946b196e312e009d9588af6c8 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -48,6 +48,21 @@
     int fseek(FILE *stream, long offset, int whence)
     size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream)
     long ftell(FILE *stream)
+    size_t getline(char **lineptr, size_t *n, FILE *stream)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def read_and_seek(char *filename, int offset1, int offset2,
+                  np.ndarray buffer, int bytes):
+    cdef FILE *f = fopen(filename, "rb")
+    cdef void *buf = <void *> buffer.data
+    cdef char *line[1024] # long enough, I suppose
+    cdef size_t n = 1024
+    fseek(f, offset1, SEEK_SET)
+    getline(line, &n, f)
+    fseek(f, offset2, SEEK_CUR)
+    fread(buf, 1, bytes, f)
+    fclose(f)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt/changeset/9ef068d6a13a/
changeset:   9ef068d6a13a
branch:      yt
user:        MatthewTurk
date:        2011-11-02 23:28:29
summary:     Fixing bug in Nyx field definitions
affected #:  1 file

diff -r b005b512b743929946b196e312e009d9588af6c8 -r 9ef068d6a13a234a227db5c525ed6c302105a605 yt/frontends/nyx/fields.py
--- a/yt/frontends/nyx/fields.py
+++ b/yt/frontends/nyx/fields.py
@@ -46,13 +46,13 @@
 # Density
 add_field("density", function=lambda a, b: None, take_log=True,
           validators=[ValidateDataField("density")],
-          units=r"\rm{g}} / \rm{cm}^3",
-          projected_units =r"\rm{g}} / \rm{cm}^2")
-nyx_fields["density"]._projected_units =r"\rm{g}} / \rm{cm}^2"
+          units=r"\rm{g} / \rm{cm}^3",
+          projected_units =r"\rm{g} / \rm{cm}^2")
+nyx_fields["density"]._projected_units =r"\rm{g} / \rm{cm}^2"
 
 add_field("Density", function=lambda a, b: b["density"], take_log=True,
-          units=r"\rm{g}} / \rm{cm}^3",
-          projected_units =r"\rm{g}} / \rm{cm}^2")
+          units=r"\rm{g} / \rm{cm}^3",
+          projected_units =r"\rm{g} / \rm{cm}^2")
 
 # Particle mass in units of $ M_{\odot}
 def _convertParticleMassMsun(data):
@@ -64,8 +64,8 @@
           particle_type=True, convert_function=_convertParticleMassMsun, take_log=True, units=r"\rm{M_{\odot}}")
           
 add_field("Dark_Matter_Density", function=lambda a, b: b["particle_mass_density"], take_log=True,
-          units=r"\rm{g}} / \rm{cm}^3",particle_type=True,
-          projected_units =r"\rm{g}} / \rm{cm}^2")
+          units=r"\rm{g} / \rm{cm}^3",particle_type=True,
+          projected_units =r"\rm{g} / \rm{cm}^2")
 
 
 # Energy Density



https://bitbucket.org/yt_analysis/yt/changeset/f1455a42317b/
changeset:   f1455a42317b
branch:      yt
user:        MatthewTurk
date:        2011-11-04 13:06:03
summary:     Fixing a crazy-bad bug that I made, where I was allocating char *line[1024].
Moved to malloc/free, like in all of the examples.
affected #:  1 file

diff -r 9ef068d6a13a234a227db5c525ed6c302105a605 -r f1455a42317ba7e612842567bb8f62b9d720ac48 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -28,6 +28,7 @@
 cimport cython
 
 from stdio cimport fopen, fclose, FILE
+cimport libc.stdlib as stdlib
 
 #cdef inline int imax(int i0, int i1):
     #if i0 > i1: return i0
@@ -56,10 +57,11 @@
                   np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
-    cdef char *line[1024] # long enough, I suppose
+    cdef char *line = <char *> stdlib.malloc(sizeof(char)*1024)
     cdef size_t n = 1024
     fseek(f, offset1, SEEK_SET)
-    getline(line, &n, f)
+    getline(&line, &n, f)
+    stdlib.free(line)
     fseek(f, offset2, SEEK_CUR)
     fread(buf, 1, bytes, f)
     fclose(f)



https://bitbucket.org/yt_analysis/yt/changeset/906ce654ac6e/
changeset:   906ce654ac6e
branch:      yt
user:        MatthewTurk
date:        2011-11-08 16:51:57
summary:     Getting rid of getline and moving to fgets.  Closes #319 .
affected #:  1 file

diff -r f1455a42317ba7e612842567bb8f62b9d720ac48 -r 906ce654ac6ed5498c6dbd1b57bee1e82ea25caa yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -49,7 +49,7 @@
     int fseek(FILE *stream, long offset, int whence)
     size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream)
     long ftell(FILE *stream)
-    size_t getline(char **lineptr, size_t *n, FILE *stream)
+    char *fgets(char *s, int size, FILE *stream)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -57,11 +57,10 @@
                   np.ndarray buffer, int bytes):
     cdef FILE *f = fopen(filename, "rb")
     cdef void *buf = <void *> buffer.data
-    cdef char *line = <char *> stdlib.malloc(sizeof(char)*1024)
-    cdef size_t n = 1024
+    cdef char line[1024]
+    cdef size_t n = 1023
     fseek(f, offset1, SEEK_SET)
-    getline(&line, &n, f)
-    stdlib.free(line)
+    fgets(line, n, f)
     fseek(f, offset2, SEEK_CUR)
     fread(buf, 1, bytes, f)
     fclose(f)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list