[yt-svn] commit/yt: 14 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 13 09:10:54 PDT 2015


14 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/21cb3a3fabd0/
Changeset:   21cb3a3fabd0
Branch:      yt
User:        jzuhone
Date:        2015-07-20 17:23:45+00:00
Summary:     Adding savetxt and loadtxt convenience functions.
Affected #:  2 files

diff -r a23b54180c2a47222c325fe74e5fe11f004e4f9a -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -103,7 +103,9 @@
     YTQuantity, \
     uconcatenate, \
     uintersect1d, \
-    uunion1d
+    uunion1d, \
+    loadtxt, \
+    savetxt
 
 from yt.fields.api import \
     field_plugins, \
@@ -169,4 +171,3 @@
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
-

diff -r a23b54180c2a47222c325fe74e5fe11f004e4f9a -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1227,7 +1227,7 @@
 def ucross(arr1,arr2, registry=None):
     """Applies the cross product to two YT arrays.
 
-    This wrapper around numpy.cross preserves units.  
+    This wrapper around numpy.cross preserves units.
     See the documentation of numpy.cross for full
     details.
     """
@@ -1308,3 +1308,32 @@
     else:
         raise RuntimeError("Undefined operation for a YTArray subclass. "
                            "Received operand types (%s) and (%s)" % (cls1, cls2))
+
+def loadtxt(fname, dtype='float', delimiter='\t', usecols=None):
+    f = open(fname, 'r')
+    line = f.readline()
+    f.close()
+    u = line.strip().split()[1:]
+    if usecols is None:
+        units = u
+    else:
+        units = []
+        for col in usecols:
+            units.append(u[col])
+    arrays = np.loadtxt(fname, dtype=dtype, comments="#",
+                        delimiter=delimiter, converters=None,
+                        unpack=True, usecols=usecols, ndmin=0)
+    return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
+
+def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', newline='\n'):
+    if not isinstance(arrays, list):
+        arrays = [arrays]
+    units = []
+    for array in arrays:
+        if hasattr(array, "units"):
+            units.append(str(array.units))
+        else:
+            units.append("dimensionless")
+    header = "\t".join(units)
+    np.savetxt(fname, np.transpose(arrays), header=header,
+               fmt=fmt, delimiter=delimiter, newline=newline)


https://bitbucket.org/yt_analysis/yt/commits/08f5c3ebbe9c/
Changeset:   08f5c3ebbe9c
Branch:      yt
User:        jzuhone
Date:        2015-07-23 13:26:26+00:00
Summary:     Merging
Affected #:  5 files

diff -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -641,7 +641,7 @@
 TORNADO='tornado-4.0.2'
 ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
-SETUPTOOLS='setuptools-16.0'
+SETUPTOOLS='setuptools-18.0.1'
 
 # Now we dump all our SHA512 files out.
 echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
@@ -669,7 +669,7 @@
 echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
 echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
-echo '38a89aad89dc9aa682dbfbca623e2f69511f5e20d4a3526c01aabbc7e93ae78f20aac566676b431e111540b41540a1c4f644ce4174e7ecf052318612075e02dc  setuptools-16.0.tar.gz' > setuptools-16.0.tar.gz.sha512
+echo '9b318ce2ee2cf787929dcb886d76c492b433e71024fda9452d8b4927652a298d6bd1bdb7a4c73883a98e100024f89b46ea8aa14b250f896e549e6dd7e10a6b41  setuptools-18.0.1.tar.gz' > setuptools-18.0.1.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz

diff -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -180,7 +180,7 @@
                 self.domain_left_edge = None
                 self.domain_right_edge = None
         else: 
-            bbox = self.arr(self.bounding_box, 'code_length', dtype="float64")
+            bbox = np.array(self.bounding_box, dtype="float64")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]

diff -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -603,20 +603,24 @@
         return mask.view("bool")
 
     def __hash__(self):
-        return hash(("mine", self._hash_vals()) +
-                    ("base", self._base_hash()))
+        cdef np.int64_t hash_val = 0
+        for v in self._hash_vals() + self._base_hash():
+            hash_val ^= hash(v)
+        return hash_val
 
     def _hash_vals(self):
         raise NotImplementedError
 
     def _base_hash(self):
-        return (self.min_level, self.max_level, self.overlap_cells,
-                self.periodicity[0],
-                self.periodicity[1],
-                self.periodicity[2],
-                self.domain_width[0],
-                self.domain_width[1],
-                self.domain_width[2])
+        return (("min_level", self.min_level),
+                ("max_level", self.max_level),
+                ("overlap_cells", self.overlap_cells),
+                ("periodicity[0]", self.periodicity[0]),
+                ("periodicity[1]", self.periodicity[1]),
+                ("periodicity[2]", self.periodicity[2]),
+                ("domain_width[0]", self.domain_width[0]),
+                ("domain_width[1]", self.domain_width[1]),
+                ("domain_width[2]", self.domain_width[2]))
 
 
 cdef class PointSelector(SelectorObject):
@@ -670,7 +674,9 @@
             return 0
 
     def _hash_vals(self):
-        return (self.p[0], self.p[1], self.p[2])
+        return (("p[0]", self.p[0]),
+                ("p[1]", self.p[1]),
+                ("p[2]", self.p[2]))
 
 point_selector = PointSelector
 
@@ -771,8 +777,11 @@
         return 1
 
     def _hash_vals(self):
-        return (self.radius, self.radius2,
-                self.center[0], self.center[1], self.center[2])
+        return (("radius", self.radius),
+                ("radius2", self.radius2),
+                ("center[0]", self.center[0]),
+                ("center[1]", self.center[1]),
+                ("center[2]", self.center[2]))
 
 sphere_selector = SphereSelector
 
@@ -867,8 +876,12 @@
         return 1
 
     def _hash_vals(self):
-        return (self.left_edge[0], self.left_edge[1], self.left_edge[2],
-                self.right_edge[0], self.right_edge[1], self.right_edge[2])
+        return (("left_edge[0]", self.left_edge[0]),
+                ("left_edge[1]", self.left_edge[1]),
+                ("left_edge[2]", self.left_edge[2]),
+                ("right_edge[0]", self.right_edge[0]),
+                ("right_edge[1]", self.right_edge[1]),
+                ("right_edge[2]", self.right_edge[2]))
 
 region_selector = RegionSelector
 
@@ -899,7 +912,10 @@
         return 1
 
     def _hash_vals(self):
-        return self._conditionals
+        t = ()
+        for i, c in enumerate(self._conditionals):
+            t += ("conditional[%s]" % i, c)
+        return ("conditionals", t)
 
 cut_region_selector = CutRegionSelector
 
@@ -1005,9 +1021,15 @@
         return 0
 
     def _hash_vals(self):
-        return (self.norm_vec[0], self.norm_vec[1], self.norm_vec[2],
-                self.center[0], self.center[1], self.center[2],
-                self.radius, self.radius2, self.height)
+        return (("norm_vec[0]", self.norm_vec[0]),
+                ("norm_vec[1]", self.norm_vec[1]),
+                ("norm_vec[2]", self.norm_vec[2]),
+                ("center[0]", self.center[0]),
+                ("center[1]", self.center[1]),
+                ("center[2]", self.center[2]),
+                ("radius", self.radius),
+                ("radius2", self.radius2),
+                ("height", self.height))
 
 disk_selector = DiskSelector
 
@@ -1084,8 +1106,10 @@
         return 1
 
     def _hash_vals(self):
-        return (self.norm_vec[0], self.norm_vec[1], self.norm_vec[2],
-                self.d)
+        return (("norm_vec[0]", self.norm_vec[0]),
+                ("norm_vec[1]", self.norm_vec[1]), 
+                ("norm_vec[2]", self.norm_vec[2]),
+                ("d", self.d))
 
 cutting_selector = CuttingPlaneSelector
 
@@ -1173,7 +1197,8 @@
         return 0
 
     def _hash_vals(self):
-        return (self.axis, self.coord)
+        return (("axis", self.axis),
+                ("coord", self.coord))
 
 slice_selector = SliceSelector
 
@@ -1271,7 +1296,11 @@
         return 0
 
     def _hash_vals(self):
-        return (self.px_ax, self.py_ax, self.px, self.py, self.axis)
+        return (("px_ax", self.px_ax),
+                ("py_ax", self.py_ax),
+                ("px", self.px),
+                ("py", self.py),
+                ("axis", self.axis))
 
 ortho_ray_selector = OrthoRaySelector
 
@@ -1495,9 +1524,15 @@
         return self.select_bbox(left_edge, right_edge)
 
     def _hash_vals(self):
-        return (self.p1[0], self.p1[1], self.p1[2],
-                self.p2[0], self.p2[1], self.p2[2],
-                self.vec[0], self.vec[1], self.vec[2])
+        return (("p1[0]", self.p1[0]),
+                ("p1[1]", self.p1[1]),
+                ("p1[2]", self.p1[2]),
+                ("p2[0]", self.p2[0]),
+                ("p2[1]", self.p2[1]),
+                ("p2[2]", self.p2[2]),
+                ("vec[0]", self.vec[0]),
+                ("vec[1]", self.vec[1]),
+                ("vec[2]", self.vec[2]))
 
 ray_selector = RaySelector
 
@@ -1622,11 +1657,21 @@
         return 0
 
     def _hash_vals(self):
-        return (self.vec[0][0], self.vec[0][1], self.vec[0][2],
-                self.vec[1][0], self.vec[1][1], self.vec[1][2],
-                self.vec[2][0], self.vec[2][1], self.vec[2][2],
-                self.mag[0], self.mag[1], self.mag[2],
-                self.center[0], self.center[1], self.center[2])
+        return (("vec[0][0]", self.vec[0][0]),
+                ("vec[0][1]", self.vec[0][1]),
+                ("vec[0][2]", self.vec[0][2]),
+                ("vec[1][0]", self.vec[1][0]),
+                ("vec[1][1]", self.vec[1][1]),
+                ("vec[1][2]", self.vec[1][2]),
+                ("vec[2][0]", self.vec[2][0]),
+                ("vec[2][1]", self.vec[2][1]),
+                ("vec[2][2]", self.vec[2][2]),
+                ("mag[0]", self.mag[0]),
+                ("mag[1]", self.mag[1]),
+                ("mag[2]", self.mag[2]),
+                ("center[0]", self.center[0]),
+                ("center[1]", self.center[1]),
+                ("center[2]", self.center[2]))
 
 ellipsoid_selector = EllipsoidSelector
 

diff -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -349,8 +349,8 @@
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
-        self.ranks = range(self.size)
-        self.available_ranks = range(self.size)
+        self.ranks = list(range(self.size))
+        self.available_ranks = list(range(self.size))
         self.workgroups = []
 
     def add_workgroup(self, size=None, ranks=None, name=None):
@@ -1259,7 +1259,7 @@
         self.num_items = len(items)
         self.items = items
         assert(self.num_items >= self.comm.size)
-        self.owned = range(self.comm.size)
+        self.owned = list(range(self.comm.size))
         self.pointer = 0
         if parallel_capable:
             communication_system.push_with_ids([self.comm.rank])

diff -r 21cb3a3fabd05f20266e4107f5bbf788493240c2 -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f yt/visualization/tests/test_filters.py
--- a/yt/visualization/tests/test_filters.py
+++ b/yt/visualization/tests/test_filters.py
@@ -22,7 +22,10 @@
         cls.frb = p.to_frb((1, 'unitary'), 64)
 
     def teardown(self):
-        del self.frb["density"]
+        try:
+            del self.frb["density"]
+        except KeyError:
+            pass
 
     def test_white_noise_filter(self):
         self.frb.apply_white_noise()


https://bitbucket.org/yt_analysis/yt/commits/d2d8b6ffd3d6/
Changeset:   d2d8b6ffd3d6
Branch:      yt
User:        jzuhone
Date:        2015-07-24 13:26:05+00:00
Summary:     Some new functionality and some docstrings. Still not quite finished.
Affected #:  1 file

diff -r 08f5c3ebbe9c4c7139a3aeb69d9d600f50cd123f -r d2d8b6ffd3d6f67fe0c917f728d8f3a2cc56a6fa yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1310,6 +1310,8 @@
                            "Received operand types (%s) and (%s)" % (cls1, cls2))
 
 def loadtxt(fname, dtype='float', delimiter='\t', usecols=None):
+    r"""
+    """
     f = open(fname, 'r')
     line = f.readline()
     f.close()
@@ -1325,7 +1327,40 @@
                         unpack=True, usecols=usecols, ndmin=0)
     return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
 
-def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', newline='\n'):
+def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', newline='\n',
+            header='', footer='', comments='#'):
+    r"""
+    
+    Parameters
+    ----------
+    fname : string
+        The file to write the YTArrays to.
+    arrays : list of YTArrays or single YTArray
+        The array(s) to write to the file.
+    fmt : str or sequence of strs, optional
+        A single format (%10.5f), or a sequence of formats. 
+    delimiter : string, optional
+        String or character separating columns.
+    newline : string, optional
+        String or character separating lines.
+    header : string, optional
+        String that will be written at the beginning of the file, before the
+        unit header.
+    footer : string, optional
+        String that will be written at the end of the file.
+    comments : str, optional
+        String that will be prepended to the ``header`` and ``footer`` strings,
+        to mark them as comments. Default: '# ',  as expected by e.g.
+        ``yt.loadtxt``.
+    
+    Examples
+    --------
+    >>> sp = ds.sphere("c", (100,"kpc"))
+    >>> a = sphere["density"]
+    >>> b = sphere["temperature"]
+    >>> c = sphere["velocity_x"]
+    >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
+    """
     if not isinstance(arrays, list):
         arrays = [arrays]
     units = []
@@ -1334,6 +1369,7 @@
             units.append(str(array.units))
         else:
             units.append("dimensionless")
-    header = "\t".join(units)
+    header = header + newline + delimiter.join(units)
     np.savetxt(fname, np.transpose(arrays), header=header,
-               fmt=fmt, delimiter=delimiter, newline=newline)
+               fmt=fmt, delimiter=delimiter, footer=footer,
+               newline=newline, comments=comments)


https://bitbucket.org/yt_analysis/yt/commits/587bad9c5f8f/
Changeset:   587bad9c5f8f
Branch:      yt
User:        jzuhone
Date:        2015-07-27 21:21:12+00:00
Summary:     More work on savetxt and loadtxt
Affected #:  1 file

diff -r d2d8b6ffd3d6f67fe0c917f728d8f3a2cc56a6fa -r 587bad9c5f8f0815fc4bd1e8444c5c0a2cb6e1f6 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1309,48 +1309,76 @@
         raise RuntimeError("Undefined operation for a YTArray subclass. "
                            "Received operand types (%s) and (%s)" % (cls1, cls2))
 
-def loadtxt(fname, dtype='float', delimiter='\t', usecols=None):
+def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, skiprows=0,
+            comments='#'):
     r"""
+    Load YTArrays with unit information from a text file. Each row in the
+    text file must have the same number of values.
+
+    Parameters
+    ----------
+    fname : str
+        Filename to read. 
+    dtype : data-type, optional
+        Data-type of the resulting array; default: float.
+    delimiter : str, optional
+        The string used to separate values.  By default, this is any
+        whitespace.
+    usecols : sequence, optional
+        Which columns to read, with 0 being the first.  For example,
+        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+        The default, None, results in all columns being read.
+    skiprows : int, optional
+        Skip the first `skiprows` lines; default: 0.
+    comments : str, optional
+        The character used to indicate the start of a comment;
+        default: '#'.
+
+    Examples
+    --------
+    >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
     """
     f = open(fname, 'r')
-    line = f.readline()
+    next_one = False
+    for line in f.readlines():
+        words = line.strip.split()
+        if next_one:
+            units = words[1:]
+            break
+        elif words[1] == "Units":
+            next_one = True
     f.close()
-    u = line.strip().split()[1:]
-    if usecols is None:
-        units = u
-    else:
-        units = []
-        for col in usecols:
-            units.append(u[col])
+    if usecols is not None:
+        units = [units[col] for col in usecols]
     arrays = np.loadtxt(fname, dtype=dtype, comments="#",
                         delimiter=delimiter, converters=None,
-                        unpack=True, usecols=usecols, ndmin=0)
+                        unpack=True, usecols=usecols, ndmin=0,
+                        skiprows=skiprows, comments=comments)
     return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
 
-def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', newline='\n',
-            header='', footer='', comments='#'):
+def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
+            footer='', comments='#'):
     r"""
+    Write YTArrays with unit information to a text file.
     
     Parameters
     ----------
-    fname : string
+    fname : str
         The file to write the YTArrays to.
     arrays : list of YTArrays or single YTArray
         The array(s) to write to the file.
     fmt : str or sequence of strs, optional
         A single format (%10.5f), or a sequence of formats. 
-    delimiter : string, optional
+    delimiter : str, optional
         String or character separating columns.
-    newline : string, optional
-        String or character separating lines.
-    header : string, optional
+    header : str, optional
         String that will be written at the beginning of the file, before the
         unit header.
-    footer : string, optional
+    footer : str, optional
         String that will be written at the end of the file.
     comments : str, optional
         String that will be prepended to the ``header`` and ``footer`` strings,
-        to mark them as comments. Default: '# ',  as expected by e.g.
+        to mark them as comments. Default: '# ', as expected by e.g.
         ``yt.loadtxt``.
     
     Examples
@@ -1369,7 +1397,7 @@
             units.append(str(array.units))
         else:
             units.append("dimensionless")
-    header = header + newline + delimiter.join(units)
+    header = header + "%s Units\n" % comments + delimiter.join(units)
     np.savetxt(fname, np.transpose(arrays), header=header,
                fmt=fmt, delimiter=delimiter, footer=footer,
-               newline=newline, comments=comments)
+               newline='\n', comments=comments)


https://bitbucket.org/yt_analysis/yt/commits/4eb92f965d57/
Changeset:   4eb92f965d57
Branch:      yt
User:        jzuhone
Date:        2015-07-28 02:16:33+00:00
Summary:     Fixing bugs
Affected #:  1 file

diff -r 587bad9c5f8f0815fc4bd1e8444c5c0a2cb6e1f6 -r 4eb92f965d57778558adfe0a7e675ff690d6bb38 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1309,8 +1309,7 @@
         raise RuntimeError("Undefined operation for a YTArray subclass. "
                            "Received operand types (%s) and (%s)" % (cls1, cls2))
 
-def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, skiprows=0,
-            comments='#'):
+def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
     r"""
     Load YTArrays with unit information from a text file. Each row in the
     text file must have the same number of values.
@@ -1328,8 +1327,6 @@
         Which columns to read, with 0 being the first.  For example,
         ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
         The default, None, results in all columns being read.
-    skiprows : int, optional
-        Skip the first `skiprows` lines; default: 0.
     comments : str, optional
         The character used to indicate the start of a comment;
         default: '#'.
@@ -1341,19 +1338,19 @@
     f = open(fname, 'r')
     next_one = False
     for line in f.readlines():
-        words = line.strip.split()
-        if next_one:
-            units = words[1:]
-            break
-        elif words[1] == "Units":
-            next_one = True
+        words = line.strip().split()
+        if len(words) > 1 and words[0] == comments:
+            if next_one:
+                units = words[1:]
+                break
+            elif words[1] == "Units":
+                next_one = True
     f.close()
     if usecols is not None:
         units = [units[col] for col in usecols]
-    arrays = np.loadtxt(fname, dtype=dtype, comments="#",
+    arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
                         delimiter=delimiter, converters=None,
-                        unpack=True, usecols=usecols, ndmin=0,
-                        skiprows=skiprows, comments=comments)
+                        unpack=True, usecols=usecols, ndmin=0)
     return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
 
 def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
@@ -1397,7 +1394,9 @@
             units.append(str(array.units))
         else:
             units.append("dimensionless")
-    header = header + "%s Units\n" % comments + delimiter.join(units)
+    if header != '':
+        header += '\n'
+    header += " Units\n " + '\t'.join(units)
     np.savetxt(fname, np.transpose(arrays), header=header,
                fmt=fmt, delimiter=delimiter, footer=footer,
                newline='\n', comments=comments)


https://bitbucket.org/yt_analysis/yt/commits/5e40f3aa73d2/
Changeset:   5e40f3aa73d2
Branch:      yt
User:        jzuhone
Date:        2015-07-28 02:34:22+00:00
Summary:     Simpler way to do this
Affected #:  1 file

diff -r 4eb92f965d57778558adfe0a7e675ff690d6bb38 -r 5e40f3aa73d29c6e8bdf75d9066b36b386cdad09 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1339,12 +1339,11 @@
     next_one = False
     for line in f.readlines():
         words = line.strip().split()
-        if len(words) > 1 and words[0] == comments:
-            if next_one:
-                units = words[1:]
-                break
-            elif words[1] == "Units":
-                next_one = True
+        if next_one:
+            units = words[1:]
+            break
+        if len(words) == 2 and words[1] == "Units":
+            next_one = True
     f.close()
     if usecols is not None:
         units = [units[col] for col in usecols]
@@ -1377,7 +1376,7 @@
         String that will be prepended to the ``header`` and ``footer`` strings,
         to mark them as comments. Default: '# ', as expected by e.g.
         ``yt.loadtxt``.
-    
+
     Examples
     --------
     >>> sp = ds.sphere("c", (100,"kpc"))


https://bitbucket.org/yt_analysis/yt/commits/f79e2c55e6ce/
Changeset:   f79e2c55e6ce
Branch:      yt
User:        jzuhone
Date:        2015-07-28 14:30:21+00:00
Summary:     Testing loading and saving arrays to ASCII
Affected #:  1 file

diff -r 5e40f3aa73d29c6e8bdf75d9066b36b386cdad09 -r f79e2c55e6ce6b8ff415b3e18bcd182765cce2eb yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -35,7 +35,7 @@
     YTArray, YTQuantity, \
     unary_operators, binary_operators, \
     uconcatenate, uintersect1d, \
-    uunion1d
+    uunion1d, loadtxt, savetxt
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_ds, requires_module
@@ -1050,3 +1050,22 @@
     yield assert_true, ret == 0.5
     yield assert_true, ret.units.is_dimensionless
     yield assert_true, ret.units.base_value == 1.0
+
+def test_load_and_save():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    a = YTArray(np.random.random(10), "kpc")
+    b = YTArray(np.random.random(10), "Msun")
+    c = YTArray(np.random.random(10), "km/s")
+
+    savetxt("arrays.dat", [a,b,c], delimiter=",")
+
+    d, e = loadtxt("arrays.dat", usecols=(1,2), delimiter=",")
+
+    yield assert_array_equal, b, d
+    yield assert_array_equal, c, e
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/5c4b1a712543/
Changeset:   5c4b1a712543
Branch:      yt
User:        jzuhone
Date:        2015-08-10 17:27:11+00:00
Summary:     Catching a rare case where fractions are handled sloppily
Affected #:  1 file

diff -r f79e2c55e6ce6b8ff415b3e18bcd182765cce2eb -r 5c4b1a712543833d8d5a32d56212f4c70ddc6211 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -111,6 +111,8 @@
                     field_units = field_units.replace(unit, known_units[unit])
                     n += 1
             if n != len(units): field_units = "dimensionless"
+            if field_units[0] == "/":
+                field_units = "1%s" % field_units
             return field_units
         except KeyError:
             return "dimensionless"


https://bitbucket.org/yt_analysis/yt/commits/3dcc127e24e6/
Changeset:   3dcc127e24e6
Branch:      yt
User:        jzuhone
Date:        2015-08-11 15:14:56+00:00
Summary:     Adding docs for reading/writing HDF5 and ASCII
Affected #:  1 file

diff -r 5c4b1a712543833d8d5a32d56212f4c70ddc6211 -r 3dcc127e24e6c0631102d56f4788fdce639fd661 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
+  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -516,9 +516,147 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Saving and Loading `YTArray`s to/from disk"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
+      "\n",
+      "To write to HDF5, use `write_hdf5`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')\n",
+      "my_temp = YTArray(np.random.random(10), 'K')\n",
+      "my_dens.write_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "my_temp.write_hdf5(\"my_data.h5\", dataset_name=\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.\n",
+      "\n",
+      "We can use the `from_hdf5` classmethod to read the data back in:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "read_dens = YTArray.from_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "print read_dens\n",
+      "print my_dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels = YTArray(np.random.normal(10), 'km/s')\n",
+      "info = {\"source\":\"galaxy cluster\",\"user\":\"jzuhone\"}\n",
+      "my_vels.write_hdf5(\"my_data.h5\", dataset_name=\"velocity\", info=info)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "a = YTArray(np.random.random(size=10), \"cm\")\n",
+      "b = YTArray(np.random.random(size=10), \"g\")\n",
+      "c = YTArray(np.random.random(size=10), \"s\")\n",
+      "yt.savetxt(\"my_data.dat\", [a,b,c], header='My cool data', footer='Data is over', delimiter=\"\\t\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%%bash \n",
+      "more my_data.dat"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "bb, cc = yt.loadtxt(\"my_data.dat\", usecols=(1,2), delimiter=\"\\t\")\n",
+      "print bb\n",
+      "print b\n",
+      "print\n",
+      "print cc\n",
+      "print c"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/fd2005bf98ed/
Changeset:   fd2005bf98ed
Branch:      yt
User:        jzuhone
Date:        2015-08-11 16:53:35+00:00
Summary:     Handling malformed units headers. If we can't read the header we issue a warning and read in the data anyway, making it dimensionless.
Affected #:  1 file

diff -r 3dcc127e24e6c0631102d56f4788fdce639fd661 -r fd2005bf98edc689b7ab43e6f55d3e4aef115e21 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -40,6 +40,7 @@
 from sympy import Rational
 from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
 from yt.units.equivalencies import equivalence_registry
+from yt.utilities.logger import ytLogger as mylog
 
 NULL_UNIT = Unit()
 
@@ -1337,19 +1338,37 @@
     """
     f = open(fname, 'r')
     next_one = False
+    units = []
+    num_cols = -1
     for line in f.readlines():
         words = line.strip().split()
-        if next_one:
-            units = words[1:]
-            break
-        if len(words) == 2 and words[1] == "Units":
-            next_one = True
+        if len(words) == 0:
+            continue
+        if line[0] == comments:
+            if next_one:
+                units = words[1:]
+            if len(words) == 2 and words[1] == "Units":
+                next_one = True
+        else:
+            # Here we catch the first line of numbers
+            try:
+                float(words[0])
+                num_cols = len(words)
+                break
+            except ValueError:
+                mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
     f.close()
-    if usecols is not None:
-        units = [units[col] for col in usecols]
+    if len(units) != num_cols:
+        mylog.warning("Malformed or incomplete units header. Arrays will be "
+                      "dimensionless!")
+        print(num_cols)
+        units = ["dimensionless"]*num_cols
     arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
                         delimiter=delimiter, converters=None,
                         unpack=True, usecols=usecols, ndmin=0)
+    if usecols is not None:
+        units = [units[col] for col in usecols]
+    mylog.info("Array units: %s" % ", ".join(units))
     return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
 
 def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',


https://bitbucket.org/yt_analysis/yt/commits/d7ee5246b724/
Changeset:   d7ee5246b724
Branch:      yt
User:        jzuhone
Date:        2015-08-11 16:55:21+00:00
Summary:     Making this a tad more precise
Affected #:  1 file

diff -r fd2005bf98edc689b7ab43e6f55d3e4aef115e21 -r d7ee5246b7242597c434c61da583e202a9f33df3 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1352,7 +1352,8 @@
         else:
             # Here we catch the first line of numbers
             try:
-                float(words[0])
+                for word in words:
+                    float(word)
                 num_cols = len(words)
                 break
             except ValueError:


https://bitbucket.org/yt_analysis/yt/commits/d86ada9797e3/
Changeset:   d86ada9797e3
Branch:      yt
User:        jzuhone
Date:        2015-08-11 16:55:54+00:00
Summary:     Shouldn't include the print statement
Affected #:  1 file

diff -r d7ee5246b7242597c434c61da583e202a9f33df3 -r d86ada9797e3d5a19e753487e0847fde1497d10e yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1362,7 +1362,6 @@
     if len(units) != num_cols:
         mylog.warning("Malformed or incomplete units header. Arrays will be "
                       "dimensionless!")
-        print(num_cols)
         units = ["dimensionless"]*num_cols
     arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
                         delimiter=delimiter, converters=None,


https://bitbucket.org/yt_analysis/yt/commits/a733d43ebb15/
Changeset:   a733d43ebb15
Branch:      yt
User:        jzuhone
Date:        2015-08-12 02:31:29+00:00
Summary:     This wasn't done right
Affected #:  1 file

diff -r d86ada9797e3d5a19e753487e0847fde1497d10e -r a733d43ebb15b3ce47d36ff606a13a4eed627089 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1352,9 +1352,10 @@
         else:
             # Here we catch the first line of numbers
             try:
-                for word in words:
+                col_words = line.strip().split(delimiter)
+                for word in col_words:
                     float(word)
-                num_cols = len(words)
+                num_cols = len(col_words)
                 break
             except ValueError:
                 mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])


https://bitbucket.org/yt_analysis/yt/commits/ef1a228a21cc/
Changeset:   ef1a228a21cc
Branch:      yt
User:        MatthewTurk
Date:        2015-08-13 16:10:44+00:00
Summary:     Merged in jzuhone/yt (pull request #1692)

Adding loadtxt and savetxt convenience functions.
Affected #:  5 files

diff -r 3e6809ef522e9b61ac781688fa3dd3d4930792a9 -r ef1a228a21cc2a8736606107e929806ca6d0c60d doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
+  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -516,9 +516,147 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Saving and Loading `YTArray`s to/from disk"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
+      "\n",
+      "To write to HDF5, use `write_hdf5`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')\n",
+      "my_temp = YTArray(np.random.random(10), 'K')\n",
+      "my_dens.write_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "my_temp.write_hdf5(\"my_data.h5\", dataset_name=\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.\n",
+      "\n",
+      "We can use the `from_hdf5` classmethod to read the data back in:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "read_dens = YTArray.from_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "print read_dens\n",
+      "print my_dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels = YTArray(np.random.normal(10), 'km/s')\n",
+      "info = {\"source\":\"galaxy cluster\",\"user\":\"jzuhone\"}\n",
+      "my_vels.write_hdf5(\"my_data.h5\", dataset_name=\"velocity\", info=info)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "a = YTArray(np.random.random(size=10), \"cm\")\n",
+      "b = YTArray(np.random.random(size=10), \"g\")\n",
+      "c = YTArray(np.random.random(size=10), \"s\")\n",
+      "yt.savetxt(\"my_data.dat\", [a,b,c], header='My cool data', footer='Data is over', delimiter=\"\\t\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%%bash \n",
+      "more my_data.dat"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "bb, cc = yt.loadtxt(\"my_data.dat\", usecols=(1,2), delimiter=\"\\t\")\n",
+      "print bb\n",
+      "print b\n",
+      "print\n",
+      "print cc\n",
+      "print c"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 3e6809ef522e9b61ac781688fa3dd3d4930792a9 -r ef1a228a21cc2a8736606107e929806ca6d0c60d yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -103,7 +103,9 @@
     YTQuantity, \
     uconcatenate, \
     uintersect1d, \
-    uunion1d
+    uunion1d, \
+    loadtxt, \
+    savetxt
 
 from yt.fields.api import \
     field_plugins, \
@@ -169,4 +171,3 @@
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
-

diff -r 3e6809ef522e9b61ac781688fa3dd3d4930792a9 -r ef1a228a21cc2a8736606107e929806ca6d0c60d yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -111,6 +111,8 @@
                     field_units = field_units.replace(unit, known_units[unit])
                     n += 1
             if n != len(units): field_units = "dimensionless"
+            if field_units[0] == "/":
+                field_units = "1%s" % field_units
             return field_units
         except KeyError:
             return "dimensionless"

diff -r 3e6809ef522e9b61ac781688fa3dd3d4930792a9 -r ef1a228a21cc2a8736606107e929806ca6d0c60d yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -35,7 +35,7 @@
     YTArray, YTQuantity, \
     unary_operators, binary_operators, \
     uconcatenate, uintersect1d, \
-    uunion1d
+    uunion1d, loadtxt, savetxt
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_ds, requires_module
@@ -1061,3 +1061,22 @@
     yield assert_true, ret == 0.5
     yield assert_true, ret.units.is_dimensionless
     yield assert_true, ret.units.base_value == 1.0
+
+def test_load_and_save():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    a = YTArray(np.random.random(10), "kpc")
+    b = YTArray(np.random.random(10), "Msun")
+    c = YTArray(np.random.random(10), "km/s")
+
+    savetxt("arrays.dat", [a,b,c], delimiter=",")
+
+    d, e = loadtxt("arrays.dat", usecols=(1,2), delimiter=",")
+
+    yield assert_array_equal, b, d
+    yield assert_array_equal, c, e
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 3e6809ef522e9b61ac781688fa3dd3d4930792a9 -r ef1a228a21cc2a8736606107e929806ca6d0c60d yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -40,6 +40,7 @@
 from sympy import Rational
 from yt.units.unit_lookup_table import unit_prefixes, prefixable_units
 from yt.units.equivalencies import equivalence_registry
+from yt.utilities.logger import ytLogger as mylog
 
 NULL_UNIT = Unit()
 
@@ -1229,7 +1230,7 @@
 def ucross(arr1,arr2, registry=None):
     """Applies the cross product to two YT arrays.
 
-    This wrapper around numpy.cross preserves units.  
+    This wrapper around numpy.cross preserves units.
     See the documentation of numpy.cross for full
     details.
     """
@@ -1310,3 +1311,113 @@
     else:
         raise RuntimeError("Undefined operation for a YTArray subclass. "
                            "Received operand types (%s) and (%s)" % (cls1, cls2))
+
+def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
+    r"""
+    Load YTArrays with unit information from a text file. Each row in the
+    text file must have the same number of values.
+
+    Parameters
+    ----------
+    fname : str
+        Filename to read. 
+    dtype : data-type, optional
+        Data-type of the resulting array; default: float.
+    delimiter : str, optional
+        The string used to separate values.  By default, this is any
+        whitespace.
+    usecols : sequence, optional
+        Which columns to read, with 0 being the first.  For example,
+        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+        The default, None, results in all columns being read.
+    comments : str, optional
+        The character used to indicate the start of a comment;
+        default: '#'.
+
+    Examples
+    --------
+    >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
+    """
+    f = open(fname, 'r')
+    next_one = False
+    units = []
+    num_cols = -1
+    for line in f.readlines():
+        words = line.strip().split()
+        if len(words) == 0:
+            continue
+        if line[0] == comments:
+            if next_one:
+                units = words[1:]
+            if len(words) == 2 and words[1] == "Units":
+                next_one = True
+        else:
+            # Here we catch the first line of numbers
+            try:
+                col_words = line.strip().split(delimiter)
+                for word in col_words:
+                    float(word)
+                num_cols = len(col_words)
+                break
+            except ValueError:
+                mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
+    f.close()
+    if len(units) != num_cols:
+        mylog.warning("Malformed or incomplete units header. Arrays will be "
+                      "dimensionless!")
+        units = ["dimensionless"]*num_cols
+    arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
+                        delimiter=delimiter, converters=None,
+                        unpack=True, usecols=usecols, ndmin=0)
+    if usecols is not None:
+        units = [units[col] for col in usecols]
+    mylog.info("Array units: %s" % ", ".join(units))
+    return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
+
+def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
+            footer='', comments='#'):
+    r"""
+    Write YTArrays with unit information to a text file.
+    
+    Parameters
+    ----------
+    fname : str
+        The file to write the YTArrays to.
+    arrays : list of YTArrays or single YTArray
+        The array(s) to write to the file.
+    fmt : str or sequence of strs, optional
+        A single format (%10.5f), or a sequence of formats. 
+    delimiter : str, optional
+        String or character separating columns.
+    header : str, optional
+        String that will be written at the beginning of the file, before the
+        unit header.
+    footer : str, optional
+        String that will be written at the end of the file.
+    comments : str, optional
+        String that will be prepended to the ``header`` and ``footer`` strings,
+        to mark them as comments. Default: '# ', as expected by e.g.
+        ``yt.loadtxt``.
+
+    Examples
+    --------
+    >>> sp = ds.sphere("c", (100,"kpc"))
+    >>> a = sphere["density"]
+    >>> b = sphere["temperature"]
+    >>> c = sphere["velocity_x"]
+    >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
+    """
+    if not isinstance(arrays, list):
+        arrays = [arrays]
+    units = []
+    for array in arrays:
+        if hasattr(array, "units"):
+            units.append(str(array.units))
+        else:
+            units.append("dimensionless")
+    if header != '':
+        header += '\n'
+    header += " Units\n " + '\t'.join(units)
+    np.savetxt(fname, np.transpose(arrays), header=header,
+               fmt=fmt, delimiter=delimiter, footer=footer,
+               newline='\n', comments=comments)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list