[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Nov 18 08:09:44 PST 2016


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/4efd2f0531f4/
Changeset:   4efd2f0531f4
Branch:      yt
User:        ngoldbaum
Date:        2016-10-28 16:07:48+00:00
Summary:     miscellaneous style cleanups in OWLS io.py
Affected #:  1 file

diff -r d15775018c266934317e4021fad305b4d37e08c5 -r 4efd2f0531f40e8ff918fde742864851fa37c8b0 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -172,16 +172,16 @@
 
             # only want particle data
             #--------------------------------------
-            if not key.startswith("PartType"): continue
+            if not key.startswith("PartType"):
+                continue
 
             # particle data group
             #--------------------------------------
             g = f[key]
-            if cname not in g: continue
+            if cname not in g:
+                continue
 
             # note str => not unicode!
-
-            #ptype = int(key[8:])
             ptype = str(key)
             if ptype not in self.var_mass:
                 fields.append((ptype, mname))
@@ -199,14 +199,14 @@
                     # Vector of metallicity
                     for i in range(g[k].shape[1]):
                         fields.append((ptype, "Metallicity_%02i" % i))
-                elif k == "ChemistryAbundances" and len(g[k].shape)>1:
+                elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
                     for i in range(g[k].shape[1]):
                         fields.append((ptype, "Chemistry_%03i" % i))
                 else:
                     kk = k
-                    if not hasattr(g[kk], "shape"): continue
+                    if not hasattr(g[kk], "shape"):
+                        continue
                     fields.append((ptype, str(kk)))
 
-
         f.close()
         return fields, {}


https://bitbucket.org/yt_analysis/yt/commits/d93abf83bf1a/
Changeset:   d93abf83bf1a
Branch:      yt
User:        ngoldbaum
Date:        2016-10-28 16:08:24+00:00
Summary:     Identify unknown vector fields during field detection for gadget. Closes #1294.
Affected #:  1 file

diff -r 4efd2f0531f40e8ff918fde742864851fa37c8b0 -r d93abf83bf1ac7bf7a9bcf55a3933ead6afdd867 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -206,6 +206,8 @@
                     kk = k
                     if not hasattr(g[kk], "shape"):
                         continue
+                    if len(g[kk].shape) > 1:
+                        self._vector_fields[kk] = g[kk].shape[1]
                     fields.append((ptype, str(kk)))
 
         f.close()


https://bitbucket.org/yt_analysis/yt/commits/d8d6be6d4ad1/
Changeset:   d8d6be6d4ad1
Branch:      yt
User:        ngoldbaum
Date:        2016-10-28 16:19:01+00:00
Summary:     Make IOHandlerOWLS subclass IOHandlerGadgetHDF5 instead of the other way around

This makes the inheritance hierarchy more clear to someone who is unaware of
the development history here. While it's true that we supported OWLS before
generic Gadget HDF5 data, this historical artifact shouldn't be represented
in the code. Since OWLS is a subset of Gadget HDF5 data, it's more logical
to keep the bulk of the implementation in the generic Gadget HDF5
implementation.
Affected #:  4 files

diff -r d93abf83bf1ac7bf7a9bcf55a3933ead6afdd867 -r d8d6be6d4ad11dca18cb3ccbd6a2cbd491a57ad4 yt/frontends/gadget/definitions.py
--- a/yt/frontends/gadget/definitions.py
+++ b/yt/frontends/gadget/definitions.py
@@ -84,3 +84,12 @@
                    ("StellarAge", "Stars")
     ),
 )
+
+gadget_hdf5_ptypes  = (
+    "PartType0",
+    "PartType1",
+    "PartType2",
+    "PartType3",
+    "PartType4",
+    "PartType5"
+)

diff -r d93abf83bf1ac7bf7a9bcf55a3933ead6afdd867 -r d8d6be6d4ad11dca18cb3ccbd6a2cbd491a57ad4 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -19,18 +19,202 @@
 import os
 
 from yt.extern.six import string_types
-from yt.frontends.owls.io import \
-    IOHandlerOWLS
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.on_demand_imports import _h5py as h5py
 
-from .data_structures import _get_gadget_format
+from .data_structures import \
+    _get_gadget_format
 
-class IOHandlerGadgetHDF5(IOHandlerOWLS):
+from .definitions import \
+    gadget_hdf5_ptypes
+
+def _get_h5_handle(fn):
+    try:
+        f = h5py.File(fn, "r")
+    except IOError:
+        print("ERROR OPENING %s" % (fn))
+        if os.path.exists(fn):
+            print("FILENAME EXISTS")
+        else:
+            print("FILENAME DOES NOT EXIST")
+        raise
+    return f
+
+class IOHandlerGadgetHDF5(BaseIOHandler):
     _dataset_type = "gadget_hdf5"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    _known_ptypes = gadget_hdf5_ptypes
+    _var_mass = None
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
+                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
+
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.ds["Massarr"]):
+                if v == 0:
+                    vm.append(self._known_ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = _get_h5_handle(data_file.filename)
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
+                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
+                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
+                yield ptype, (x, y, z)
+            f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = _get_h5_handle(data_file.filename)
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                g = f["/%s" % ptype]
+                coords = g["Coordinates"][:].astype("float64")
+                mask = selector.select_points(
+                            coords[:,0], coords[:,1], coords[:,2], 0.0)
+                del coords
+                if mask is None: continue
+                for field in field_list:
+
+                    if field in ("Mass", "Masses") and \
+                        ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        ind = self._known_ptypes.index(ptype)
+                        data[:] = self.ds["Massarr"][ind]
+
+                    elif field in self._element_names:
+                        rfield = 'ElementAbundance/' + field
+                        data = g[rfield][:][mask,...]
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
+                    elif field.startswith("Chemistry_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["ChemistryAbundances"][:,col][mask]
+                    else:
+                        data = g[field][:][mask,...]
+
+                    yield (ptype, field), data
+            f.close()
+
+    def _initialize_index(self, data_file, regions):
+        index_ptype = self.index_ptype
+        f = _get_h5_handle(data_file.filename)
+        if index_ptype == "all":
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+            keys = f.keys()
+        else:
+            pt = int(index_ptype[-1])
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
+            keys = [index_ptype]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        for key in keys:
+            if not key.startswith("PartType"): continue
+            if "Coordinates" not in f[key]: continue
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
+            regions.add_data_file(pos, data_file.file_id,
+                                  data_file.ds.filter_bbox)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge,
+                data_file.ds.filter_bbox)
+            ind += pos.shape[0]
+        f.close()
+        return morton
+
+    def _count_particles(self, data_file):
+        f = _get_h5_handle(data_file.filename)
+        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
+        f.close()
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
+        return npart
+
+
+    def _identify_fields(self, data_file):
+        f = _get_h5_handle(data_file.filename)
+        fields = []
+        cname = self.ds._particle_coordinates_name  # Coordinates
+        mname = self.ds._particle_mass_name  # Mass
+
+        # loop over all keys in OWLS hdf5 file
+        #--------------------------------------------------
+        for key in f.keys():
+
+            # only want particle data
+            #--------------------------------------
+            if not key.startswith("PartType"):
+                continue
+
+            # particle data group
+            #--------------------------------------
+            g = f[key]
+            if cname not in g:
+                continue
+
+            # note str => not unicode!
+            ptype = str(key)
+            if ptype not in self.var_mass:
+                fields.append((ptype, mname))
+
+            # loop over all keys in PartTypeX group
+            #----------------------------------------
+            for k in g.keys():
+
+                if k == 'ElementAbundance':
+                    gp = g[k]
+                    for j in gp.keys():
+                        kk = j
+                        fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
+                elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Chemistry_%03i" % i))
+                else:
+                    kk = k
+                    if not hasattr(g[kk], "shape"):
+                        continue
+                    if len(g[kk].shape) > 1:
+                        self._vector_fields[kk] = g[kk].shape[1]
+                    fields.append((ptype, str(kk)))
+
+        f.close()
+        return fields, {}
 
 ZeroMass = object()
 

diff -r d93abf83bf1ac7bf7a9bcf55a3933ead6afdd867 -r d8d6be6d4ad11dca18cb3ccbd6a2cbd491a57ad4 yt/frontends/owls/definitions.py
--- a/yt/frontends/owls/definitions.py
+++ b/yt/frontends/owls/definitions.py
@@ -13,6 +13,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-
-ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                 "PartType4", "PartType5")

diff -r d93abf83bf1ac7bf7a9bcf55a3933ead6afdd867 -r d8d6be6d4ad11dca18cb3ccbd6a2cbd491a57ad4 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -15,200 +15,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-from yt.utilities.lib.geometry_utils import \
-    compute_morton
-
-from .definitions import \
-    ghdf5_ptypes
-
-CHUNKSIZE = 10000000
-
-def _get_h5_handle(fn):
-    try:
-        f = h5py.File(fn, "r")
-    except IOError:
-        print("ERROR OPENING %s" % (fn))
-        if os.path.exists(fn):
-            print("FILENAME EXISTS")
-        else:
-            print("FILENAME DOES NOT EXIST")
-        raise
-    return f
-
-class IOHandlerOWLS(BaseIOHandler):
-    _dataset_type = "OWLS"
-    _vector_fields = ("Coordinates", "Velocity", "Velocities")
-    _known_ptypes = ghdf5_ptypes
-    _var_mass = None
-    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
-                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
-
-
-    @property
-    def var_mass(self):
-        if self._var_mass is None:
-            vm = []
-            for i, v in enumerate(self.ds["Massarr"]):
-                if v == 0:
-                    vm.append(self._known_ptypes[i])
-            self._var_mass = tuple(vm)
-        return self._var_mass
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        raise NotImplementedError
-
-    def _read_particle_coords(self, chunks, ptf):
-        # This will read chunks and yield the results.
-        chunks = list(chunks)
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            # This double-reads
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
-                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
-                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
-                yield ptype, (x, y, z)
-            f.close()
-
-    def _read_particle_fields(self, chunks, ptf, selector):
-        # Now we have all the sizes, and we can allocate
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                g = f["/%s" % ptype]
-                coords = g["Coordinates"][:].astype("float64")
-                mask = selector.select_points(
-                            coords[:,0], coords[:,1], coords[:,2], 0.0)
-                del coords
-                if mask is None: continue
-                for field in field_list:
-
-                    if field in ("Mass", "Masses") and \
-                        ptype not in self.var_mass:
-                        data = np.empty(mask.sum(), dtype="float64")
-                        ind = self._known_ptypes.index(ptype)
-                        data[:] = self.ds["Massarr"][ind]
+from yt.frontends.gadget.io import \
+    IOHandlerGadgetHDF5
 
-                    elif field in self._element_names:
-                        rfield = 'ElementAbundance/' + field
-                        data = g[rfield][:][mask,...]
-                    elif field.startswith("Metallicity_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["Metallicity"][:,col][mask]
-                    elif field.startswith("Chemistry_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["ChemistryAbundances"][:,col][mask]
-                    else:
-                        data = g[field][:][mask,...]
-
-                    yield (ptype, field), data
-            f.close()
-
-    def _initialize_index(self, data_file, regions):
-        index_ptype = self.index_ptype
-        f = _get_h5_handle(data_file.filename)
-        if index_ptype == "all":
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
-            keys = f.keys()
-        else:
-            pt = int(index_ptype[-1])
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
-            keys = [index_ptype]
-        morton = np.empty(pcount, dtype='uint64')
-        ind = 0
-        for key in keys:
-            if not key.startswith("PartType"): continue
-            if "Coordinates" not in f[key]: continue
-            ds = f[key]["Coordinates"]
-            dt = ds.dtype.newbyteorder("N") # Native
-            pos = np.empty(ds.shape, dtype=dt)
-            pos[:] = ds
-            regions.add_data_file(pos, data_file.file_id,
-                                  data_file.ds.filter_bbox)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge,
-                data_file.ds.filter_bbox)
-            ind += pos.shape[0]
-        f.close()
-        return morton
-
-    def _count_particles(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
-        f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
-        return npart
-
-
-    def _identify_fields(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        fields = []
-        cname = self.ds._particle_coordinates_name  # Coordinates
-        mname = self.ds._particle_mass_name  # Mass
-
-        # loop over all keys in OWLS hdf5 file
-        #--------------------------------------------------
-        for key in f.keys():
-
-            # only want particle data
-            #--------------------------------------
-            if not key.startswith("PartType"):
-                continue
-
-            # particle data group
-            #--------------------------------------
-            g = f[key]
-            if cname not in g:
-                continue
-
-            # note str => not unicode!
-            ptype = str(key)
-            if ptype not in self.var_mass:
-                fields.append((ptype, mname))
-
-            # loop over all keys in PartTypeX group
-            #----------------------------------------
-            for k in g.keys():
-
-                if k == 'ElementAbundance':
-                    gp = g[k]
-                    for j in gp.keys():
-                        kk = j
-                        fields.append((ptype, str(kk)))
-                elif k == 'Metallicity' and len(g[k].shape) > 1:
-                    # Vector of metallicity
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Metallicity_%02i" % i))
-                elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Chemistry_%03i" % i))
-                else:
-                    kk = k
-                    if not hasattr(g[kk], "shape"):
-                        continue
-                    if len(g[kk].shape) > 1:
-                        self._vector_fields[kk] = g[kk].shape[1]
-                    fields.append((ptype, str(kk)))
-
-        f.close()
-        return fields, {}
+class IOHandlerOWLS(IOHandlerGadgetHDF5):
+    _dataset_type = "OWLS"


https://bitbucket.org/yt_analysis/yt/commits/38566c0b4455/
Changeset:   38566c0b4455
Branch:      yt
User:        ngoldbaum
Date:        2016-11-08 21:48:54+00:00
Summary:     Removing unnecessary _get_h5_handle helper.

This function just prints errors directly to stdout in case opening the gadget
hdf5 file with h5py raises an error. The exact same error information is
already part of the error raised by h5py, so these print statements are
useless.

Better to just get the handle directly from h5py without dealing with this
helper function.
Affected #:  1 file

diff -r d8d6be6d4ad11dca18cb3ccbd6a2cbd491a57ad4 -r 38566c0b44554d244f7f044895ba0f002d8ca691 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -32,17 +32,6 @@
 from .definitions import \
     gadget_hdf5_ptypes
 
-def _get_h5_handle(fn):
-    try:
-        f = h5py.File(fn, "r")
-    except IOError:
-        print("ERROR OPENING %s" % (fn))
-        if os.path.exists(fn):
-            print("FILENAME EXISTS")
-        else:
-            print("FILENAME DOES NOT EXIST")
-        raise
-    return f
 
 class IOHandlerGadgetHDF5(BaseIOHandler):
     _dataset_type = "gadget_hdf5"
@@ -74,7 +63,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
+            f = h5py.File(data_file.filename, "r")
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:
@@ -92,7 +81,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
+            f = h5py.File(data_file.filename, "r")
             for ptype, field_list in sorted(ptf.items()):
                 if data_file.total_particles[ptype] == 0:
                     continue
@@ -127,7 +116,7 @@
 
     def _initialize_index(self, data_file, regions):
         index_ptype = self.index_ptype
-        f = _get_h5_handle(data_file.filename)
+        f = h5py.File(data_file.filename, "r")
         if index_ptype == "all":
             pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
             keys = f.keys()
@@ -156,7 +145,7 @@
         return morton
 
     def _count_particles(self, data_file):
-        f = _get_h5_handle(data_file.filename)
+        f = h5py.File(data_file.filename, "r")
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
         f.close()
         npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
@@ -164,7 +153,7 @@
 
 
     def _identify_fields(self, data_file):
-        f = _get_h5_handle(data_file.filename)
+        f = h5py.File(data_file.filename, "r")
         fields = []
         cname = self.ds._particle_coordinates_name  # Coordinates
         mname = self.ds._particle_mass_name  # Mass


https://bitbucket.org/yt_analysis/yt/commits/f89d0e732f1f/
Changeset:   f89d0e732f1f
Branch:      yt
User:        MatthewTurk
Date:        2016-11-18 16:09:16+00:00
Summary:     Merged in ngoldbaum/yt (pull request #2428)

Make IOHandlerOWLS subclass IOHandlerGadgetHDF5. Closes #1294.
Affected #:  4 files

diff -r 76e74bb22aa019850724f490f23607d18dc628c0 -r f89d0e732f1f4f4d030fde1aca7e4ddff1500f7b yt/frontends/gadget/definitions.py
--- a/yt/frontends/gadget/definitions.py
+++ b/yt/frontends/gadget/definitions.py
@@ -84,3 +84,12 @@
                    ("StellarAge", "Stars")
     ),
 )
+
+gadget_hdf5_ptypes  = (
+    "PartType0",
+    "PartType1",
+    "PartType2",
+    "PartType3",
+    "PartType4",
+    "PartType5"
+)

diff -r 76e74bb22aa019850724f490f23607d18dc628c0 -r f89d0e732f1f4f4d030fde1aca7e4ddff1500f7b yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -19,18 +19,191 @@
 import os
 
 from yt.extern.six import string_types
-from yt.frontends.owls.io import \
-    IOHandlerOWLS
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.on_demand_imports import _h5py as h5py
 
-from .data_structures import _get_gadget_format
+from .data_structures import \
+    _get_gadget_format
 
-class IOHandlerGadgetHDF5(IOHandlerOWLS):
+from .definitions import \
+    gadget_hdf5_ptypes
+
+
+class IOHandlerGadgetHDF5(BaseIOHandler):
     _dataset_type = "gadget_hdf5"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    _known_ptypes = gadget_hdf5_ptypes
+    _var_mass = None
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
+                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
+
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.ds["Massarr"]):
+                if v == 0:
+                    vm.append(self._known_ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = h5py.File(data_file.filename, "r")
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
+                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
+                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
+                yield ptype, (x, y, z)
+            f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = h5py.File(data_file.filename, "r")
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                g = f["/%s" % ptype]
+                coords = g["Coordinates"][:].astype("float64")
+                mask = selector.select_points(
+                            coords[:,0], coords[:,1], coords[:,2], 0.0)
+                del coords
+                if mask is None: continue
+                for field in field_list:
+
+                    if field in ("Mass", "Masses") and \
+                        ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        ind = self._known_ptypes.index(ptype)
+                        data[:] = self.ds["Massarr"][ind]
+
+                    elif field in self._element_names:
+                        rfield = 'ElementAbundance/' + field
+                        data = g[rfield][:][mask,...]
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
+                    elif field.startswith("Chemistry_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["ChemistryAbundances"][:,col][mask]
+                    else:
+                        data = g[field][:][mask,...]
+
+                    yield (ptype, field), data
+            f.close()
+
+    def _initialize_index(self, data_file, regions):
+        index_ptype = self.index_ptype
+        f = h5py.File(data_file.filename, "r")
+        if index_ptype == "all":
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+            keys = f.keys()
+        else:
+            pt = int(index_ptype[-1])
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
+            keys = [index_ptype]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        for key in keys:
+            if not key.startswith("PartType"): continue
+            if "Coordinates" not in f[key]: continue
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
+            regions.add_data_file(pos, data_file.file_id,
+                                  data_file.ds.filter_bbox)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge,
+                data_file.ds.filter_bbox)
+            ind += pos.shape[0]
+        f.close()
+        return morton
+
+    def _count_particles(self, data_file):
+        f = h5py.File(data_file.filename, "r")
+        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
+        f.close()
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
+        return npart
+
+
+    def _identify_fields(self, data_file):
+        f = h5py.File(data_file.filename, "r")
+        fields = []
+        cname = self.ds._particle_coordinates_name  # Coordinates
+        mname = self.ds._particle_mass_name  # Mass
+
+        # loop over all keys in OWLS hdf5 file
+        #--------------------------------------------------
+        for key in f.keys():
+
+            # only want particle data
+            #--------------------------------------
+            if not key.startswith("PartType"):
+                continue
+
+            # particle data group
+            #--------------------------------------
+            g = f[key]
+            if cname not in g:
+                continue
+
+            # note str => not unicode!
+            ptype = str(key)
+            if ptype not in self.var_mass:
+                fields.append((ptype, mname))
+
+            # loop over all keys in PartTypeX group
+            #----------------------------------------
+            for k in g.keys():
+
+                if k == 'ElementAbundance':
+                    gp = g[k]
+                    for j in gp.keys():
+                        kk = j
+                        fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
+                elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Chemistry_%03i" % i))
+                else:
+                    kk = k
+                    if not hasattr(g[kk], "shape"):
+                        continue
+                    if len(g[kk].shape) > 1:
+                        self._vector_fields[kk] = g[kk].shape[1]
+                    fields.append((ptype, str(kk)))
+
+        f.close()
+        return fields, {}
 
 ZeroMass = object()
 

diff -r 76e74bb22aa019850724f490f23607d18dc628c0 -r f89d0e732f1f4f4d030fde1aca7e4ddff1500f7b yt/frontends/owls/definitions.py
--- a/yt/frontends/owls/definitions.py
+++ b/yt/frontends/owls/definitions.py
@@ -13,6 +13,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-
-ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                 "PartType4", "PartType5")

diff -r 76e74bb22aa019850724f490f23607d18dc628c0 -r f89d0e732f1f4f4d030fde1aca7e4ddff1500f7b yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -15,198 +15,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-from yt.utilities.lib.geometry_utils import \
-    compute_morton
-
-from .definitions import \
-    ghdf5_ptypes
-
-CHUNKSIZE = 10000000
-
-def _get_h5_handle(fn):
-    try:
-        f = h5py.File(fn, "r")
-    except IOError:
-        print("ERROR OPENING %s" % (fn))
-        if os.path.exists(fn):
-            print("FILENAME EXISTS")
-        else:
-            print("FILENAME DOES NOT EXIST")
-        raise
-    return f
-
-class IOHandlerOWLS(BaseIOHandler):
-    _dataset_type = "OWLS"
-    _vector_fields = ("Coordinates", "Velocity", "Velocities")
-    _known_ptypes = ghdf5_ptypes
-    _var_mass = None
-    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
-                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
-
-
-    @property
-    def var_mass(self):
-        if self._var_mass is None:
-            vm = []
-            for i, v in enumerate(self.ds["Massarr"]):
-                if v == 0:
-                    vm.append(self._known_ptypes[i])
-            self._var_mass = tuple(vm)
-        return self._var_mass
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        raise NotImplementedError
-
-    def _read_particle_coords(self, chunks, ptf):
-        # This will read chunks and yield the results.
-        chunks = list(chunks)
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            # This double-reads
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
-                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
-                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
-                yield ptype, (x, y, z)
-            f.close()
-
-    def _read_particle_fields(self, chunks, ptf, selector):
-        # Now we have all the sizes, and we can allocate
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                g = f["/%s" % ptype]
-                coords = g["Coordinates"][:].astype("float64")
-                mask = selector.select_points(
-                            coords[:,0], coords[:,1], coords[:,2], 0.0)
-                del coords
-                if mask is None: continue
-                for field in field_list:
-
-                    if field in ("Mass", "Masses") and \
-                        ptype not in self.var_mass:
-                        data = np.empty(mask.sum(), dtype="float64")
-                        ind = self._known_ptypes.index(ptype)
-                        data[:] = self.ds["Massarr"][ind]
+from yt.frontends.gadget.io import \
+    IOHandlerGadgetHDF5
 
-                    elif field in self._element_names:
-                        rfield = 'ElementAbundance/' + field
-                        data = g[rfield][:][mask,...]
-                    elif field.startswith("Metallicity_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["Metallicity"][:,col][mask]
-                    elif field.startswith("Chemistry_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["ChemistryAbundances"][:,col][mask]
-                    else:
-                        data = g[field][:][mask,...]
-
-                    yield (ptype, field), data
-            f.close()
-
-    def _initialize_index(self, data_file, regions):
-        index_ptype = self.index_ptype
-        f = _get_h5_handle(data_file.filename)
-        if index_ptype == "all":
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
-            keys = f.keys()
-        else:
-            pt = int(index_ptype[-1])
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
-            keys = [index_ptype]
-        morton = np.empty(pcount, dtype='uint64')
-        ind = 0
-        for key in keys:
-            if not key.startswith("PartType"): continue
-            if "Coordinates" not in f[key]: continue
-            ds = f[key]["Coordinates"]
-            dt = ds.dtype.newbyteorder("N") # Native
-            pos = np.empty(ds.shape, dtype=dt)
-            pos[:] = ds
-            regions.add_data_file(pos, data_file.file_id,
-                                  data_file.ds.filter_bbox)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge,
-                data_file.ds.filter_bbox)
-            ind += pos.shape[0]
-        f.close()
-        return morton
-
-    def _count_particles(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
-        f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
-        return npart
-
-
-    def _identify_fields(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        fields = []
-        cname = self.ds._particle_coordinates_name  # Coordinates
-        mname = self.ds._particle_mass_name  # Mass
-
-        # loop over all keys in OWLS hdf5 file
-        #--------------------------------------------------
-        for key in f.keys():
-
-            # only want particle data
-            #--------------------------------------
-            if not key.startswith("PartType"): continue
-
-            # particle data group
-            #--------------------------------------
-            g = f[key]
-            if cname not in g: continue
-
-            # note str => not unicode!
-
-            #ptype = int(key[8:])
-            ptype = str(key)
-            if ptype not in self.var_mass:
-                fields.append((ptype, mname))
-
-            # loop over all keys in PartTypeX group
-            #----------------------------------------
-            for k in g.keys():
-
-                if k == 'ElementAbundance':
-                    gp = g[k]
-                    for j in gp.keys():
-                        kk = j
-                        fields.append((ptype, str(kk)))
-                elif k == 'Metallicity' and len(g[k].shape) > 1:
-                    # Vector of metallicity
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Metallicity_%02i" % i))
-                elif k == "ChemistryAbundances" and len(g[k].shape)>1:
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Chemistry_%03i" % i))
-                else:
-                    kk = k
-                    if not hasattr(g[kk], "shape"): continue
-                    fields.append((ptype, str(kk)))
-
-
-        f.close()
-        return fields, {}
+class IOHandlerOWLS(IOHandlerGadgetHDF5):
+    _dataset_type = "OWLS"

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list