[yt-svn] commit/yt: ngoldbaum: Merged in jzuhone/yt-3.x (pull request #1884)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Dec 8 08:58:28 PST 2015


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/da35da6e1f39/
Changeset:   da35da6e1f39
Branch:      yt
User:        ngoldbaum
Date:        2015-12-08 16:58:18+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1884)

Read/write YTArrays from/to groups within HDF5 files
Affected #:  3 files

diff -r 753456ba481faf74ba9e4f7e6def423537be8b32 -r da35da6e1f39944797ea2b76df26ac69a606cbb8 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
+  "signature": "sha256:6d823c3543f4183db8d28ad5003183515a69ce533fcfff00d92db0372afc3930"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -529,8 +529,21 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
-      "\n",
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "HDF5"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write to HDF5, use `write_hdf5`:"
      ]
     },
@@ -591,6 +604,38 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "ASCII"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
      ]
     },

diff -r 753456ba481faf74ba9e4f7e6def423537be8b32 -r da35da6e1f39944797ea2b76df26ac69a606cbb8 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -925,6 +925,12 @@
     yield assert_equal, warr, iarr
     yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
 
+    warr.write_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    giarr = YTArray.from_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group')
+
+    yield assert_equal, warr, giarr
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 

diff -r 753456ba481faf74ba9e4f7e6def423537be8b32 -r da35da6e1f39944797ea2b76df26ac69a606cbb8 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -719,8 +719,8 @@
     # End unit conversion methods
     #
 
-    def write_hdf5(self, filename, dataset_name=None, info=None):
-        r"""Writes ImageArray to hdf5 file.
+    def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
+        r"""Writes a YTArray to hdf5 file.
 
         Parameters
         ----------
@@ -733,16 +733,17 @@
         info: dictionary
             A dictionary of supplementary info to write to append as attributes
             to the dataset.
+            
+        group_name: string
+            An optional group to write the arrays to. If not specified, the arrays
+            are datasets at the top level by default.
 
         Examples
         --------
         >>> a = YTArray([1,2,3], 'cm')
-
         >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
-
         >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
         ...              info=myinfo)
-
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -756,8 +757,15 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        if dataset_name in f.keys():
-            d = f[dataset_name]
+        if group_name is not None:
+            if group_name in f:
+                g = f[group_name]
+            else:
+                g = f.create_group(group_name)
+        else:
+            g = f
+        if dataset_name in g.keys():
+            d = g[dataset_name]
             # Overwrite without deleting if we can get away with it.
             if d.shape == self.shape and d.dtype == self.dtype:
                 d[:] = self
@@ -765,16 +773,16 @@
                     del d.attrs[k]
             else:
                 del f[dataset_name]
-                d = f.create_dataset(dataset_name, data=self)
+                d = g.create_dataset(dataset_name, data=self)
         else:
-            d = f.create_dataset(dataset_name, data=self)
+            d = g.create_dataset(dataset_name, data=self)
 
         for k, v in info.items():
             d.attrs[k] = v
         f.close()
 
     @classmethod
-    def from_hdf5(cls, filename, dataset_name=None):
+    def from_hdf5(cls, filename, dataset_name=None, group_name=None):
         r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
 
         Parameters
@@ -786,6 +794,10 @@
             The name of the dataset to read from.  If the dataset has a units
             attribute, attempt to infer units as well.
 
+        group_name: string
+            An optional group to read the arrays from. If not specified, the arrays
+            are datasets at the top level by default.
+
         """
         import h5py
         from yt.extern.six.moves import cPickle as pickle
@@ -794,7 +806,11 @@
             dataset_name = 'array_data'
 
         f = h5py.File(filename)
-        dataset = f[dataset_name]
+        if group_name is not None:
+            g = f[group_name]
+        else:
+            g = f
+        dataset = g[dataset_name]
         data = dataset[:]
         units = dataset.attrs.get('units', '')
         if 'unit_registry' in dataset.attrs.keys():

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list