[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Apr 5 10:03:41 PDT 2014


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/1d85e363ce61/
Changeset:   1d85e363ce61
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-04 18:17:19
Summary:     Attempting to enable field_dtypes passed to load() for Tipsy.
Affected #:  2 files

diff -r ae9632bccb40e04af7487d17d86f8e37ac143e11 -r 1d85e363ce61d487b405bbbf704f2595ed198db4 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -43,6 +43,8 @@
     gadget_header_specs, \
     gadget_field_specs, \
     gadget_ptype_specs
+from .io import \
+    IOHandlerTipsyBinary
 
 try:
     import requests
@@ -384,7 +386,9 @@
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        success, self.endian = self._validate_header(filename)
+        if field_dtypes is None:
+            field_dtypes = {}
+        success, self.endian = self._validate_header(filename, field_dtypes)
         if not success:
             print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
             print "%s != (%s == %s + %s + %s)" % (
@@ -400,8 +404,6 @@
 
         # My understanding is that dtypes are set on a field by field basis,
         # not on a (particle type, field) basis
-        if field_dtypes is None:
-            field_dtypes = {}
         self._field_dtypes = field_dtypes
 
         self._unit_base = unit_base or {}
@@ -520,7 +522,7 @@
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
     @staticmethod
-    def _validate_header(filename):
+    def _validate_header(filename, field_dtypes):
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
@@ -544,11 +546,16 @@
             endianswap = ">"
             f.seek(0)
             t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
+        # Now we construct the sizes of each of the particles.
+        dtypes = IOHandlerTipsyBinary._compute_dtypes(field_dtypes, endianswap)
         #Catch for 4 byte padding
-        if (fs == 32+48*ng+36*nd+44*ns):
+        gas_size = dtypes["Gas"].itemsize
+        dm_size = dtypes["DarkMatter"].itemsize
+        star_size = dtypes["Stars"].itemsize
+        if (fs == 32+gas_size*ng+dm_size*nd+star_size*ns):
             f.read(4)
         #File is borked if this is true
-        elif (fs != 28+48*ng+36*nd+44*ns):
+        elif (fs != 28+gas_size*ng+dm_size*nd+star_size*ns):
             f.close()
             return False, 0
         f.close()
@@ -556,7 +563,8 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        return TipsyDataset._validate_header(args[0])[0]
+        field_dtypes = kwargs.get("field_dtypes", {})
+        return TipsyDataset._validate_header(args[0], field_dtypes)[0]
 
 class HTTPParticleFile(ParticleFile):
     pass

diff -r ae9632bccb40e04af7487d17d86f8e37ac143e11 -r 1d85e363ce61d487b405bbbf704f2595ed198db4 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -392,7 +392,7 @@
                 "DarkMatter",
                 "Stars" )
 
-    _aux_fields = []
+    _aux_fields = None
     _fields = ( ("Gas", "Mass"),
                 ("Gas", "Coordinates"),
                 ("Gas", "Velocities"),
@@ -415,6 +415,10 @@
                 ("Stars", "Phi")
               )
 
+    def __init__(self, *args, **kwargs):
+        self._aux_fields = []
+        super(IOHandlerTipsyBinary, self).__init__(*args, **kwargs)
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -623,6 +627,22 @@
         }
         return npart
 
+    @classmethod
+    def _compute_dtypes(cls, field_dtypes, endian = "<"):
+        pds = {}
+        for ptype, field in cls._fields:
+            dtbase = field_dtypes.get(field, 'f')
+            ff = "%s%s" % (endian, dtbase)
+            if field in cls._vector_fields:
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
+            else:
+                dt = (field, ff)
+            pds.setdefault(ptype, []).append(dt)
+        pdtypes = {}
+        for ptype in pds:
+            pdtypes[ptype] = np.dtype(pds[ptype])
+        return pdtypes
+
     def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
         self._header_offset = data_file.pf._header_offset
@@ -632,19 +652,10 @@
         tp = data_file.total_particles
         aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
         self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
+        self._pdtypes = self._compute_dtypes(data_file.pf._field_dtypes)
         for ptype, field in self._fields:
-            pfields = []
             if tp[ptype] == 0: continue
-            dtbase = data_file.pf._field_dtypes.get(field, 'f')
-            ff = "%s%s" % (data_file.pf.endian, dtbase)
-            if field in self._vector_fields:
-                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
-            else:
-                dt = (field, ff)
-            pds.setdefault(ptype, []).append(dt)
             field_list.append((ptype, field))
-        for ptype in pds:
-            self._pdtypes[ptype] = np.dtype(pds[ptype])
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
             field_list += [("Gas",a) for a in self._aux_fields] 
         if any(["DarkMatter"==f[0] for f in field_list]):


https://bitbucket.org/yt_analysis/yt/commits/a93ce7b329e2/
Changeset:   a93ce7b329e2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-04 18:30:44
Summary:     Fixing some endianness drops.
Affected #:  1 file

diff -r 1d85e363ce61d487b405bbbf704f2595ed198db4 -r a93ce7b329e2543c3b81f6b42c00c4f1a46374e4 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -652,9 +652,13 @@
         tp = data_file.total_particles
         aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
         self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
-        self._pdtypes = self._compute_dtypes(data_file.pf._field_dtypes)
+        self._pdtypes = self._compute_dtypes(data_file.pf._field_dtypes,
+                                             data_file.pf.endian)
         for ptype, field in self._fields:
-            if tp[ptype] == 0: continue
+            if tp[ptype] == 0:
+                # We do not want out _pdtypes to have empty particles.
+                self._pdtypes.pop(ptype, None)
+                continue
             field_list.append((ptype, field))
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
             field_list += [("Gas",a) for a in self._aux_fields] 


https://bitbucket.org/yt_analysis/yt/commits/b701010572ea/
Changeset:   b701010572ea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-05 19:03:35
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #795)

Enable field_dtypes in load() for Tipsy
Affected #:  2 files

diff -r c08048dc7f10b4c23ce34322599988ad23fe1d45 -r b701010572eab88beb722c9ba88e9cb75bae8d58 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -43,6 +43,8 @@
     gadget_header_specs, \
     gadget_field_specs, \
     gadget_ptype_specs
+from .io import \
+    IOHandlerTipsyBinary
 
 try:
     import requests
@@ -384,7 +386,9 @@
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        success, self.endian = self._validate_header(filename)
+        if field_dtypes is None:
+            field_dtypes = {}
+        success, self.endian = self._validate_header(filename, field_dtypes)
         if not success:
             print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
             print "%s != (%s == %s + %s + %s)" % (
@@ -400,8 +404,6 @@
 
         # My understanding is that dtypes are set on a field by field basis,
         # not on a (particle type, field) basis
-        if field_dtypes is None:
-            field_dtypes = {}
         self._field_dtypes = field_dtypes
 
         self._unit_base = unit_base or {}
@@ -520,7 +522,7 @@
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
     @staticmethod
-    def _validate_header(filename):
+    def _validate_header(filename, field_dtypes):
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
@@ -544,11 +546,16 @@
             endianswap = ">"
             f.seek(0)
             t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
+        # Now we construct the sizes of each of the particles.
+        dtypes = IOHandlerTipsyBinary._compute_dtypes(field_dtypes, endianswap)
         #Catch for 4 byte padding
-        if (fs == 32+48*ng+36*nd+44*ns):
+        gas_size = dtypes["Gas"].itemsize
+        dm_size = dtypes["DarkMatter"].itemsize
+        star_size = dtypes["Stars"].itemsize
+        if (fs == 32+gas_size*ng+dm_size*nd+star_size*ns):
             f.read(4)
         #File is borked if this is true
-        elif (fs != 28+48*ng+36*nd+44*ns):
+        elif (fs != 28+gas_size*ng+dm_size*nd+star_size*ns):
             f.close()
             return False, 0
         f.close()
@@ -556,7 +563,8 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        return TipsyDataset._validate_header(args[0])[0]
+        field_dtypes = kwargs.get("field_dtypes", {})
+        return TipsyDataset._validate_header(args[0], field_dtypes)[0]
 
 class HTTPParticleFile(ParticleFile):
     pass

diff -r c08048dc7f10b4c23ce34322599988ad23fe1d45 -r b701010572eab88beb722c9ba88e9cb75bae8d58 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -392,7 +392,7 @@
                 "DarkMatter",
                 "Stars" )
 
-    _aux_fields = []
+    _aux_fields = None
     _fields = ( ("Gas", "Mass"),
                 ("Gas", "Coordinates"),
                 ("Gas", "Velocities"),
@@ -415,6 +415,10 @@
                 ("Stars", "Phi")
               )
 
+    def __init__(self, *args, **kwargs):
+        self._aux_fields = []
+        super(IOHandlerTipsyBinary, self).__init__(*args, **kwargs)
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -623,6 +627,22 @@
         }
         return npart
 
+    @classmethod
+    def _compute_dtypes(cls, field_dtypes, endian = "<"):
+        pds = {}
+        for ptype, field in cls._fields:
+            dtbase = field_dtypes.get(field, 'f')
+            ff = "%s%s" % (endian, dtbase)
+            if field in cls._vector_fields:
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
+            else:
+                dt = (field, ff)
+            pds.setdefault(ptype, []).append(dt)
+        pdtypes = {}
+        for ptype in pds:
+            pdtypes[ptype] = np.dtype(pds[ptype])
+        return pdtypes
+
     def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
         self._header_offset = data_file.pf._header_offset
@@ -632,19 +652,14 @@
         tp = data_file.total_particles
         aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
         self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
+        self._pdtypes = self._compute_dtypes(data_file.pf._field_dtypes,
+                                             data_file.pf.endian)
         for ptype, field in self._fields:
-            pfields = []
-            if tp[ptype] == 0: continue
-            dtbase = data_file.pf._field_dtypes.get(field, 'f')
-            ff = "%s%s" % (data_file.pf.endian, dtbase)
-            if field in self._vector_fields:
-                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
-            else:
-                dt = (field, ff)
-            pds.setdefault(ptype, []).append(dt)
+            if tp[ptype] == 0:
+                # We do not want out _pdtypes to have empty particles.
+                self._pdtypes.pop(ptype, None)
+                continue
             field_list.append((ptype, field))
-        for ptype in pds:
-            self._pdtypes[ptype] = np.dtype(pds[ptype])
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
             field_list += [("Gas",a) for a in self._aux_fields] 
         if any(["DarkMatter"==f[0] for f in field_list]):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list