[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jul 10 10:05:24 PDT 2013


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/75f379501f60/
Changeset:   75f379501f60
Branch:      yt
User:        ngoldbaum
Date:        2013-07-09 04:31:47
Summary:     This should make it so StreamStaticOutput.__init__ only runs once.
Affected #:  1 file

diff -r 213d534593fb056814ce918c902e4fc3ea3360e9 -r 75f379501f60899606b419886e4650afafdd0a3f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,9 +55,11 @@
             mylog.debug("Registering: %s as %s", name, cls)
 
     def __new__(cls, filename=None, *args, **kwargs):
+        from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            if not isinstance(filename, StreamHandler):
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)


https://bitbucket.org/yt_analysis/yt/commits/475d4521d1ab/
Changeset:   475d4521d1ab
Branch:      yt
User:        ngoldbaum
Date:        2013-07-09 04:38:15
Summary:     Merged yt_analysis/yt into yt
Affected #:  4 files

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3703,7 +3703,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/fortran_utils.py
--- /dev/null
+++ b/yt/utilities/fortran_utils.py
@@ -0,0 +1,243 @@
+"""
+Utilities for reading Fortran files.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import struct
+import numpy as np
+import os
+
+def read_attrs(f, attrs,endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    according to a definition of attributes, returning a dictionary.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.  Note that this
+    function is used for reading sequentially-written records.  If you have
+    many written that were written simultaneously, see read_record.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    attrs : iterable of iterables
+        This object should be an iterable of one of the formats: 
+        [ (attr_name, count, struct type), ... ].
+        [ ((name1,name2,name3),count, vector type]
+        [ ((name1,name2,name3),count, 'type type type']
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_attrs(f, header)
+    """
+    vv = {}
+    net_format = endian
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        net_format += "".join(["I"] + ([t] * n) + ["I"])
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vv = {}
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        if type(a)==tuple:
+            n = len(a)
+        s1 = vals.pop(0)
+        v = [vals.pop(0) for i in range(n)]
+        s2 = vals.pop(0)
+        if s1 != s2:
+            size = struct.calcsize(endian + "I" + "".join(n*[t]) + "I")
+        assert(s1 == s2)
+        if n == 1: v = v[0]
+        if type(a)==tuple:
+            assert len(a) == len(v)
+            for k,val in zip(a,v):
+                vv[k]=val
+        else:
+            vv[a] = v
+    return vv
+
+def read_vector(f, d, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a vector of values.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    d : data type
+        This is the datatype (from the struct module) that we should read.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    tr : numpy.ndarray
+        This is the vector of values read from the file.
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_vector(f, 'd')
+    """
+    pad_fmt = "%sI" % (endian)
+    pad_size = struct.calcsize(pad_fmt)
+    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
+    vec_fmt = "%s%s" % (endian, d)
+    vec_size = struct.calcsize(vec_fmt)
+    if vec_len % vec_size != 0:
+        print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
+        raise RuntimeError
+    vec_num = vec_len / vec_size
+    if isinstance(f, file): # Needs to be explicitly a file
+        tr = np.fromfile(f, vec_fmt, count=vec_num)
+    else:
+        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
+    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
+    assert(vec_len == vec_len2)
+    return tr
+
+def skip(f, n=1, endian='='):
+    r"""This function accepts a file pointer and skips a Fortran unformatted
+    record. Optionally check that the skip was done correctly by checking 
+    the pad bytes.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    n : int
+        Number of records to skip.
+    check : bool
+        Assert that the pad bytes are equal
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    skipped: The number of elements in the skipped array
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> skip(f, 3)
+    """
+    skipped = 0
+    pos = f.tell()
+    for i in range(n):
+        fmt = endian+"I"
+        size = f.read(struct.calcsize(fmt))
+        s1= struct.unpack(fmt, size)[0]
+        f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
+        s2= struct.unpack(fmt, size)[0]
+        assert s1==s2 
+        skipped += s1/struct.calcsize(fmt)
+    return skipped
+
+def peek_record_size(f,endian='='):
+    r""" This function accept the file handle and returns
+    the size of the next record and then rewinds the file
+    to the previous position.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    Number of bytes in the next record
+    """
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def read_record(f, rspec, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a single "record" with different components.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    rspec : iterable of iterables
+        This object should be an iterable of the format [ (attr_name, count,
+        struct type), ... ].
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_record(f, header)
+    """
+    vv = {}
+    net_format = endian + "I"
+    for a, n, t in rspec:
+        t = t if len(t)==1 else t[-1]
+        net_format += "%s%s"%(n, t)
+    net_format += "I"
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vvv = vals[:]
+    s1, s2 = vals.pop(0), vals.pop(-1)
+    if s1 != s2:
+        print "S1 = %s ; S2 = %s ; SIZE = %s"
+        raise RuntimeError
+    pos = 0
+    for a, n, t in rspec:
+        vv[a] = vals[pos:pos+n]
+        pos += n
+    return vv
+

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,13 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
+# The following value was calcualted assuming H = 100 km/s/Mpc.
+# To get the correct value for your cosmological parameters, 
+# you'll need to multiply through by h^2
+# [where h = H / (100 km/s/Mpc)].  See the Overdensity field in
+# yt.data_objects.universal_fields.
+rho_crit_now = 1.8788e-29  # g/cm^3 (cosmological critical density)
+
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -3,6 +3,7 @@
 import sys
 import os.path
 import glob
+import platform
 
 
 # snatched from PyTables
@@ -23,6 +24,8 @@
 # snatched from PyTables
 def get_default_dirs():
     default_header_dirs = []
+    default_library_dirs = []
+
     add_from_path("CPATH", default_header_dirs)
     add_from_path("C_INCLUDE_PATH", default_header_dirs)
     add_from_flags("CPPFLAGS", "-I", default_header_dirs)
@@ -30,12 +33,21 @@
         ['/usr/include', '/usr/local/include', '/usr/X11']
     )
 
-    default_library_dirs = []
+    _archs = ['lib64', 'lib']
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            _archs.extend(
+                ['lib/x86_64-linux-gnu',
+                 'lib/i686-linux-gnu',
+                 'lib/i386-linux-gnu']
+            )
+
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
         os.path.join(_tree, _arch)
-        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
-        for _arch in ('lib64', 'lib')
+        for _tree in ('/usr', '/usr/local', '/usr/X11', '/')
+        for _arch in _archs
     )
     return default_header_dirs, default_library_dirs
 
@@ -59,6 +71,14 @@
 
 
 def check_prefix(inc_dir, lib_dir):
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            print("Since you are using multiarch distro it's hard to detect")
+            print("whether library matches the header file. We will assume")
+            print("it does. If you encounter any build failures please use")
+            print("proper cfg files to provide path to the dependencies")
+            return (inc_dir, lib_dir)
     prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
     if prefix is not '' and prefix == os.path.dirname(inc_dir):
         return (inc_dir, lib_dir)
@@ -69,20 +89,29 @@
 
 
 def get_location_from_ctypes(header, library):
+    yt_inst = os.environ.get('YT_DEST')
+    if yt_inst is not None:
+        # since we prefer installation via script, make sure
+        # that YT_DEST path take precedence above all else
+        return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
+
     try:
         import ctypes
         import ctypes.util
     except ImportError:
         return (None, None)
 
+    target_inc, target_libdir = None, None
     default_header_dirs, default_library_dirs = get_default_dirs()
-    target_inc, target_libdir = None, None
     for inc_prefix in default_header_dirs:
         if os.path.isfile(os.path.join(inc_prefix, header)):
             target_inc = inc_prefix
 
     target_libfile = ctypes.util.find_library(library)
-    if target_libfile is not None and os.path.isfile(target_libfile):
+    if None in (target_inc, target_libfile):
+        # either header or lib was not found, abort now
+        return (None, None)
+    if os.path.isfile(target_libfile):
         return check_prefix(target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:


https://bitbucket.org/yt_analysis/yt/commits/0203a30a0271/
Changeset:   0203a30a0271
Branch:      yt
User:        ngoldbaum
Date:        2013-07-10 19:02:12
Summary:     Duck typing to avoid an import.
Affected #:  1 file

diff -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 -r 0203a30a0271d5714ea7c02f9766341f9b2946dc yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,10 +55,13 @@
             mylog.debug("Registering: %s as %s", name, cls)
 
     def __new__(cls, filename=None, *args, **kwargs):
-        from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            if not isinstance(filename, StreamHandler):
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
                 obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)


https://bitbucket.org/yt_analysis/yt/commits/a745781d8c7d/
Changeset:   a745781d8c7d
Branch:      yt
User:        MatthewTurk
Date:        2013-07-10 19:04:54
Summary:     Merged in ngoldbaum/yt (pull request #550)

Fixing a bug that causes StaticOutput.__init__() to run twice for Stream datasets
Affected #:  1 file

diff -r 577bb3ce6fc3e6d71130be84d03f46201f8cac7b -r a745781d8c7dde89973c1e8cac7da07aa564036e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -57,7 +57,12 @@
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list