[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 11:20:31 PST 2015


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/fdbbedfeaa31/
Changeset:   fdbbedfeaa31
Branch:      stable
User:        xarthisius
Date:        2015-09-22 21:42:26+00:00
Summary:     Backporting PR #1735 https://bitbucket.org/yt_analysis/yt/pull-requests/1735
Affected #:  2 files

diff -r d20a02c6232921a4105a5b32e1df4de08be0fc87 -r fdbbedfeaa312ac79c555d28f62250a86f828a56 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -72,7 +72,7 @@
                  bounding_box=None,
                  units_override=None):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
-        # specify a bounding box which effectively gives a domain_left_edge 
+        # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge
         self.bounding_box = bounding_box
         self.filter_bbox = (bounding_box is not None)
@@ -180,7 +180,7 @@
             else:
                 self.domain_left_edge = None
                 self.domain_right_edge = None
-        else: 
+        else:
             bbox = np.array(self.bounding_box, dtype="float64")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
@@ -227,7 +227,6 @@
             self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
             self.mass_unit = self.quan(mu, 'Msun')
             density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
-
             # If self.comoving is set, we know this is a gasoline data set,
             # and we do the conversion on the hubble constant.
             if self.comoving:
@@ -270,7 +269,7 @@
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
-        Valid is a boolean that is true if the file is a tipsy file, and endianswap is 
+        Valid is a boolean that is true if the file is a tipsy file, and endianswap is
         the endianness character '>' or '<'.
         '''
         try:

diff -r d20a02c6232921a4105a5b32e1df4de08be0fc87 -r fdbbedfeaa312ac79c555d28f62250a86f828a56 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -17,52 +17,53 @@
 
 import glob
 import numpy as np
+from numpy.lib.recfunctions import append_fields
 import os
 
-from yt.geometry.oct_container import \
-    _ORDER_MAX
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 from yt.utilities.logger import ytLogger as \
     mylog
-    
+
 CHUNKSIZE = 10000000
 
+
 class IOHandlerTipsyBinary(BaseIOHandler):
     _dataset_type = "tipsy"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
-    _pdtypes = None # dtypes, to be filled in later
+    _pdtypes = None  # dtypes, to be filled in later
+    _aux_pdtypes = None  # auxiliary files' dtypes
 
-    _ptypes = ( "Gas",
-                "DarkMatter",
-                "Stars" )
-    _chunksize = 64*64*64
+    _ptypes = ("Gas",
+               "DarkMatter",
+               "Stars")
+    _chunksize = 64 * 64 * 64
 
     _aux_fields = None
-    _fields = ( ("Gas", "Mass"),
-                ("Gas", "Coordinates"),
-                ("Gas", "Velocities"),
-                ("Gas", "Density"),
-                ("Gas", "Temperature"),
-                ("Gas", "Epsilon"),
-                ("Gas", "Metals"),
-                ("Gas", "Phi"),
-                ("DarkMatter", "Mass"),
-                ("DarkMatter", "Coordinates"),
-                ("DarkMatter", "Velocities"),
-                ("DarkMatter", "Epsilon"),
-                ("DarkMatter", "Phi"),
-                ("Stars", "Mass"),
-                ("Stars", "Coordinates"),
-                ("Stars", "Velocities"),
-                ("Stars", "Metals"),
-                ("Stars", "FormationTime"),
-                ("Stars", "Epsilon"),
-                ("Stars", "Phi")
-              )
+    _fields = (("Gas", "Mass"),
+               ("Gas", "Coordinates"),
+               ("Gas", "Velocities"),
+               ("Gas", "Density"),
+               ("Gas", "Temperature"),
+               ("Gas", "Epsilon"),
+               ("Gas", "Metals"),
+               ("Gas", "Phi"),
+               ("DarkMatter", "Mass"),
+               ("DarkMatter", "Coordinates"),
+               ("DarkMatter", "Velocities"),
+               ("DarkMatter", "Epsilon"),
+               ("DarkMatter", "Phi"),
+               ("Stars", "Mass"),
+               ("Stars", "Coordinates"),
+               ("Stars", "Velocities"),
+               ("Stars", "Metals"),
+               ("Stars", "FormationTime"),
+               ("Stars", "Epsilon"),
+               ("Stars", "Phi")
+               )
 
     def __init__(self, *args, **kwargs):
         self._aux_fields = []
@@ -71,50 +72,6 @@
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _read_aux_fields(self, field, mask, data_file):
-        """
-        Read in auxiliary files from gasoline/pkdgrav.
-        This method will automatically detect the format of the file.
-        """
-        filename = data_file.filename+'.'+field
-        dtype = None
-        # We need to do some fairly ugly detection to see what format the auxiliary
-        # files are in.  They can be either ascii or binary, and the binary files can be
-        # either floats, ints, or doubles.  We're going to use a try-catch cascade to
-        # determine the format.
-        try:#ASCII
-            auxdata = np.genfromtxt(filename, skip_header=1)
-            if auxdata.size != np.sum(data_file.total_particles.values()):
-                print("Error reading auxiliary tipsy file")
-                raise RuntimeError 
-        except ValueError:#binary/xdr
-            f = open(filename, 'rb')
-            l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
-            if l != np.sum(data_file.total_particles.values()):
-                print("Error reading auxiliary tipsy file")
-                raise RuntimeError
-            dtype = 'd'
-            if field in ('iord', 'igasorder', 'grp'):#These fields are integers
-                dtype = 'i'
-            try:# If we try loading doubles by default, we can catch an exception and try floats next
-                auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
-            except struct.error:
-                f.seek(4)
-                dtype = 'f'
-                try:
-                    auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
-                except struct.error: # None of the binary attempts to read succeeded
-                    print("Error reading auxiliary tipsy file")
-                    raise RuntimeError
-
-        # Use the mask to slice out the appropriate particle type data
-        if mask.size == data_file.total_particles['Gas']:
-            return auxdata[:data_file.total_particles['Gas']]
-        elif mask.size == data_file.total_particles['DarkMatter']:
-            return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
-        else:
-            return auxdata[-data_file.total_particles['Stars']:]
-
     def _fill_fields(self, fields, vals, mask, data_file):
         if mask is None:
             size = 0
@@ -123,27 +80,26 @@
         rv = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", size, field)
-            if field in self._aux_fields: #Read each of the auxiliary fields
-                rv[field] = self._read_aux_fields(field, mask, data_file)
-            elif field in self._vector_fields:
+            if field in self._vector_fields:
                 rv[field] = np.empty((size, 3), dtype="float64")
-                if size == 0: continue
-                rv[field][:,0] = vals[field]['x'][mask]
-                rv[field][:,1] = vals[field]['y'][mask]
-                rv[field][:,2] = vals[field]['z'][mask]
+                if size == 0:
+                    continue
+                rv[field][:, 0] = vals[field]['x'][mask]
+                rv[field][:, 1] = vals[field]['y'][mask]
+                rv[field][:, 2] = vals[field]['z'][mask]
             else:
                 rv[field] = np.empty(size, dtype="float64")
-                if size == 0: continue
+                if size == 0:
+                    continue
                 rv[field][:] = vals[field][mask]
             if field == "Coordinates":
                 eps = np.finfo(rv[field].dtype).eps
                 for i in range(3):
-                  rv[field][:,i] = np.clip(rv[field][:,i],
-                      self.domain_left_edge[i] + eps,
-                      self.domain_right_edge[i] - eps)
+                    rv[field][:, i] = np.clip(rv[field][:, i],
+                                              self.domain_left_edge[i] + eps,
+                                              self.domain_right_edge[i] - eps)
         return rv
 
-
     def _read_particle_coords(self, chunks, ptf):
         data_files = set([])
         for chunk in chunks:
@@ -153,14 +109,16 @@
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
-            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+            for ptype, field_list in sorted(ptf.items(),
+                                            key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
                 total = 0
                 while total < tp[ptype]:
-                    p = np.fromfile(f, self._pdtypes[ptype],
-                            count=min(self._chunksize, tp[ptype] - total))
+                    count = min(self._chunksize, tp[ptype] - total)
+                    p = np.fromfile(f, self._pdtypes[ptype], count=count)
                     total += p.size
-                    d = [p["Coordinates"][ax].astype("float64") for ax in 'xyz']
+                    d = [p["Coordinates"][ax].astype("float64")
+                         for ax in 'xyz']
                     del p
                     yield ptype, d
 
@@ -172,24 +130,68 @@
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
             poff = data_file.field_offsets
+            aux_fields_offsets = \
+                self._calculate_particle_offsets_aux(data_file)
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
-            for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+
+            # we need to open all aux files for chunking to work
+            aux_fh = {}
+            for afield in self._aux_fields:
+                aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb')
+
+            for ptype, field_list in sorted(ptf.items(),
+                                            key=lambda a: poff[a[0]]):
                 f.seek(poff[ptype], os.SEEK_SET)
+                afields = list(set(field_list).intersection(self._aux_fields))
+                for afield in afields:
+                    aux_fh[afield].seek(
+                        aux_fields_offsets[afield][ptype][0], os.SEEK_SET)
+
                 total = 0
                 while total < tp[ptype]:
-                    p = np.fromfile(f, self._pdtypes[ptype],
-                        count=min(self._chunksize, tp[ptype] - total))
+                    count = min(self._chunksize, tp[ptype] - total)
+                    p = np.fromfile(f, self._pdtypes[ptype], count=count)
+
+                    auxdata = []
+                    for afield in afields:
+                        if isinstance(self._aux_pdtypes[afield], np.dtype):
+                            auxdata.append(
+                                np.fromfile(aux_fh[afield],
+                                            self._aux_pdtypes[afield],
+                                            count=count)
+                            )
+                        else:
+                            aux_fh[afield].seek(0, os.SEEK_SET)
+                            sh = aux_fields_offsets[afield][ptype][0] + total
+                            sf = aux_fields_offsets[afield][ptype][1] + \
+                                tp[ptype] - count
+                            if tp[ptype] > 0:
+                                aux = np.genfromtxt(
+                                    aux_fh[afield], skip_header=sh,
+                                    skip_footer=sf
+                                )
+                                if aux.ndim < 1:
+                                    aux = np.array([aux])
+                                auxdata.append(aux)
+
                     total += p.size
+                    if afields:
+                        p = append_fields(p, afields, auxdata)
                     mask = selector.select_points(
                         p["Coordinates"]['x'].astype("float64"),
                         p["Coordinates"]['y'].astype("float64"),
                         p["Coordinates"]['z'].astype("float64"), 0.0)
-                    if mask is None: continue
+                    if mask is None:
+                        continue
                     tf = self._fill_fields(field_list, p, mask, data_file)
                     for field in field_list:
                         yield (ptype, field), tf.pop(field)
+
+            # close all file handles
             f.close()
+            for fh in list(aux_fh.values()):
+                fh.close()
 
     def _update_domain(self, data_file):
         '''
@@ -201,24 +203,25 @@
         ind = 0
         # Check to make sure that the domain hasn't already been set
         # by the parameter file
-        if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
+        if np.all(np.isfinite(ds.domain_left_edge)) and \
+                np.all(np.isfinite(ds.domain_right_edge)):
             return
         with open(data_file.filename, "rb") as f:
             ds.domain_left_edge = 0
             ds.domain_right_edge = 0
             f.seek(ds._header_offset)
-            mi =   np.array([1e30, 1e30, 1e30], dtype="float64")
-            ma =  -np.array([1e30, 1e30, 1e30], dtype="float64")
+            mi = np.array([1e30, 1e30, 1e30], dtype="float64")
+            ma = -np.array([1e30, 1e30, 1e30], dtype="float64")
             for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
-                if count == 0: continue
-                start, stop = ind, ind + count
+                if count == 0:
+                    continue
+                stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
-                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                     count = c)
-                    eps = np.finfo(pp["Coordinates"]["x"].dtype).eps
+                    pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+                                     count=c)
                     np.minimum(mi, [pp["Coordinates"]["x"].min(),
                                     pp["Coordinates"]["y"].min(),
                                     pp["Coordinates"]["z"].min()], mi)
@@ -234,7 +237,7 @@
         ds.domain_right_edge = ds.arr(ma, 'code_length')
         ds.domain_width = DW = ds.domain_right_edge - ds.domain_left_edge
         ds.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
-                                 DW.units.dimensions)
+                             DW.units.dimensions)
 
     def _initialize_index(self, data_file, regions):
         ds = data_file.ds
@@ -242,7 +245,6 @@
                           dtype="uint64")
         ind = 0
         DLE, DRE = ds.domain_left_edge, ds.domain_right_edge
-        dx = (DRE - DLE) / (2**_ORDER_MAX)
         self.domain_left_edge = DLE.in_units("code_length").ndarray_view()
         self.domain_right_edge = DRE.in_units("code_length").ndarray_view()
         with open(data_file.filename, "rb") as f:
@@ -250,28 +252,29 @@
             for iptype, ptype in enumerate(self._ptypes):
                 # We'll just add the individual types separately
                 count = data_file.total_particles[ptype]
-                if count == 0: continue
-                start, stop = ind, ind + count
+                if count == 0:
+                    continue
+                stop = ind + count
                 while ind < stop:
                     c = min(CHUNKSIZE, stop - ind)
-                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
-                                     count = c)
+                    pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+                                     count=c)
                     mis = np.empty(3, dtype="float64")
                     mas = np.empty(3, dtype="float64")
                     for axi, ax in enumerate('xyz'):
                         mi = pp["Coordinates"][ax].min()
                         ma = pp["Coordinates"][ax].max()
-                        mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+                        mylog.debug(
+                            "Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
                         mis[axi] = mi
                         mas[axi] = ma
                     pos = np.empty((pp.size, 3), dtype="float64")
                     for i, ax in enumerate("xyz"):
-                        eps = np.finfo(pp["Coordinates"][ax].dtype).eps
-                        pos[:,i] = pp["Coordinates"][ax]
+                        pos[:, i] = pp["Coordinates"][ax]
                     regions.add_data_file(pos, data_file.file_id,
                                           data_file.ds.filter_bbox)
-                    morton[ind:ind+c] = compute_morton(
-                        pos[:,0], pos[:,1], pos[:,2],
+                    morton[ind:ind + c] = compute_morton(
+                        pos[:, 0], pos[:, 1], pos[:, 2],
                         DLE, DRE, data_file.ds.filter_bbox)
                     ind += c
         mylog.info("Adding %0.3e particles", morton.size)
@@ -286,7 +289,7 @@
         return npart
 
     @classmethod
-    def _compute_dtypes(cls, field_dtypes, endian = "<"):
+    def _compute_dtypes(cls, field_dtypes, endian="<"):
         pds = {}
         for ptype, field in cls._fields:
             dtbase = field_dtypes.get(field, 'f')
@@ -304,27 +307,50 @@
     def _create_dtypes(self, data_file):
         # We can just look at the particle counts.
         self._header_offset = data_file.ds._header_offset
-        self._pdtypes = {}
-        pds = {}
-        field_list = []
-        tp = data_file.total_particles
-        aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
-        self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
         self._pdtypes = self._compute_dtypes(data_file.ds._field_dtypes,
                                              data_file.ds.endian)
+        self._field_list = []
         for ptype, field in self._fields:
-            if tp[ptype] == 0:
+            if data_file.total_particles[ptype] == 0:
                 # We do not want out _pdtypes to have empty particles.
                 self._pdtypes.pop(ptype, None)
                 continue
-            field_list.append((ptype, field))
-        if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
-            field_list += [("Gas",a) for a in self._aux_fields]
-        if any(["DarkMatter"==f[0] for f in field_list]):
-            field_list += [("DarkMatter",a) for a in self._aux_fields]
-        if any(["Stars"==f[0] for f in field_list]):
-            field_list += [("Stars",a) for a in self._aux_fields]
-        self._field_list = field_list
+            self._field_list.append((ptype, field))
+
+        # Find out which auxiliaries we have and what is their format
+        tot_parts = np.sum(data_file.total_particles.values())
+        endian = data_file.ds.endian
+        self._aux_pdtypes = {}
+        self._aux_fields = [f.rsplit('.')[-1]
+                            for f in glob.glob(data_file.filename + '.*')]
+        for afield in self._aux_fields:
+            filename = data_file.filename + '.' + afield
+            # We need to do some fairly ugly detection to see what format the
+            # auxiliary files are in.  They can be either ascii or binary, and
+            # the binary files can be either floats, ints, or doubles.  We're
+            # going to use a try-catch cascade to determine the format.
+            filesize = os.stat(filename).st_size
+            if np.fromfile(filename, np.dtype(endian + 'i4'),
+                           count=1) != tot_parts:
+                with open(filename) as f:
+                    if int(f.readline()) != tot_parts:
+                        raise RuntimeError
+                self._aux_pdtypes[afield] = "ascii"
+            elif (filesize - 4) / 8 == tot_parts:
+                self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'd')])
+            elif (filesize - 4) / 4 == tot_parts:
+                if afield.startswith("i"):
+                    self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'i')])
+                else:
+                    self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'f')])
+            else:
+                raise RuntimeError
+
+        # Add the auxiliary fields to each ptype we have
+        for ptype in self._ptypes:
+            if any([ptype == field[0] for field in self._field_list]):
+                self._field_list += \
+                    [(ptype, afield) for afield in self._aux_fields]
         return self._field_list
 
     def _identify_fields(self, data_file):
@@ -335,7 +361,29 @@
         pos = data_file.ds._header_offset
         for ptype in self._ptypes:
             field_offsets[ptype] = pos
-            if data_file.total_particles[ptype] == 0: continue
+            if data_file.total_particles[ptype] == 0:
+                continue
             size = self._pdtypes[ptype].itemsize
             pos += data_file.total_particles[ptype] * size
         return field_offsets
+
+    def _calculate_particle_offsets_aux(self, data_file):
+        aux_fields_offsets = {}
+        tp = data_file.total_particles
+        for afield in self._aux_fields:
+            aux_fields_offsets[afield] = {}
+            if isinstance(self._aux_pdtypes[afield], np.dtype):
+                pos = 4  # i4
+                for ptype in self._ptypes:
+                    aux_fields_offsets[afield][ptype] = (pos, 0)
+                    if data_file.total_particles[ptype] == 0:
+                        continue
+                    size = np.dtype(self._aux_pdtypes[afield]).itemsize
+                    pos += data_file.total_particles[ptype] * size
+            else:
+                aux_fields_offsets[afield].update(
+                    {'Gas': (1, tp["DarkMatter"] + tp["Stars"]),
+                     'DarkMatter': (1 + tp["Gas"], tp["Stars"]),
+                     'Stars': (1 + tp["DarkMatter"] + tp["Gas"], 0)}
+                )
+        return aux_fields_offsets


https://bitbucket.org/yt_analysis/yt/commits/d6167462a041/
Changeset:   d6167462a041
Branch:      stable
User:        xarthisius
Date:        2015-10-05 19:04:13+00:00
Summary:     Backporting PR #1766 https://bitbucket.org/yt_analysis/yt/pull-requests/1766
Affected #:  1 file

diff -r fdbbedfeaa312ac79c555d28f62250a86f828a56 -r d6167462a04145b3244aa1eb4e1326e009876a19 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -198,15 +198,24 @@
                 ei = start_e
                 for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
                     if cn == 0: continue
+                    # The rather verbose form of the few next statements is a
+                    # result of code optimization and shouldn't be changed
+                    # without checking for perfomance degradation. See
+                    # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
+                    # for details.
                     if self.method == "invert_cdf":
-                        cumspec = cumspec_c + Z*cumspec_m
-                        cumspec /= cumspec[-1]
+                        cumspec = cumspec_c
+                        cumspec += Z * cumspec_m
+                        norm_factor = 1.0 / cumspec[-1]
+                        cumspec *= norm_factor
                         randvec = np.random.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
-                        tot_spec = cspec.d+Z*mspec.d
-                        tot_spec /= tot_spec.sum()
+                        tot_spec = cspec.d
+                        tot_spec += Z * mspec.d
+                        norm_factor = 1.0 / tot_spec.sum()
+                        tot_spec *= norm_factor
                         eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e


https://bitbucket.org/yt_analysis/yt/commits/63eca5e094aa/
Changeset:   63eca5e094aa
Branch:      stable
User:        ngoldbaum
Date:        2015-10-22 05:35:09+00:00
Summary:     Backporting PR #1787 https://bitbucket.org/yt_analysis/yt/pull-requests/1787
Affected #:  3 files

diff -r d6167462a04145b3244aa1eb4e1326e009876a19 -r 63eca5e094aa8e2296152fb6101ea9ebc6577a80 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -23,6 +23,7 @@
 
 from yt.frontends.sph.data_structures import \
     ParticleDataset
+from yt.funcs import deprecate
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.data_objects.static_output import \
@@ -32,7 +33,6 @@
 from yt.utilities.physical_constants import \
     G, \
     cm_per_kpc
-from yt import YTQuantity
 
 from .fields import \
     TipsyFieldInfo
@@ -169,12 +169,14 @@
         periodic = self.parameters.get('bPeriodic', True)
         period = self.parameters.get('dPeriod', None)
         self.periodicity = (periodic, periodic, periodic)
-        self.comoving = self.parameters.get('bComove', False)
-        if self.comoving and period is None:
+        self.cosmological_simulation = float(self.parameters.get(
+            'bComove', self._cosmology_parameters is not None))
+        if self.cosmological_simulation and period is None:
             period = 1.0
         if self.bounding_box is None:
             if periodic and period is not None:
-                # If we are periodic, that sets our domain width to either 1 or dPeriod.
+                # If we are periodic, that sets our domain width to
+                # either 1 or dPeriod.
                 self.domain_left_edge = np.zeros(3, "float64") - 0.5*period
                 self.domain_right_edge = np.zeros(3, "float64") + 0.5*period
             else:
@@ -189,19 +191,22 @@
 
         # If the cosmology parameters dictionary got set when data is
         # loaded, we can assume it's a cosmological data set
-        if self.comoving or self._cosmology_parameters is not None:
+        if self.cosmological_simulation == 1.0:
             cosm = self._cosmology_parameters or {}
-            self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
-            self.cosmological_simulation = 1
-            dcosm = dict(current_redshift=(1.0/self.scale_factor)-1.0,
-                         omega_lambda=self.parameters.get('dLambda', cosm.get('omega_lambda',0.0)),
-                         omega_matter=self.parameters.get('dOmega0', cosm.get('omega_matter',0.0)),
-                         hubble_constant=self.parameters.get('dHubble0', cosm.get('hubble_constant',1.0)))
+            # In comoving simulations, time stores the scale factor a
+            self.scale_factor = hvals["time"]
+            dcosm = dict(
+                current_redshift=(1.0/self.scale_factor)-1.0,
+                omega_lambda=self.parameters.get(
+                    'dLambda', cosm.get('omega_lambda',0.0)),
+                omega_matter=self.parameters.get(
+                    'dOmega0', cosm.get('omega_matter',0.0)),
+                hubble_constant=self.parameters.get(
+                    'dHubble0', cosm.get('hubble_constant',1.0)))
             for param in dcosm.keys():
                 pval = dcosm[param]
                 setattr(self, param, pval)
         else:
-            self.cosmological_simulation = 0.0
             kpc_unit = self.parameters.get('dKpcUnit', 1.0)
             self._unit_base['cm'] = 1.0 / (kpc_unit * cm_per_kpc)
 
@@ -220,50 +225,57 @@
         super(TipsyDataset, self)._set_derived_attrs()
 
     def _set_code_unit_attributes(self):
+        # First try to set units based on parameter file
         if self.cosmological_simulation:
             mu = self.parameters.get('dMsolUnit', 1.)
+            self.mass_unit = self.quan(mu, 'Msun')
             lu = self.parameters.get('dKpcUnit', 1000.)
             # In cosmological runs, lengths are stored as length*scale_factor
             self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
-            self.mass_unit = self.quan(mu, 'Msun')
-            density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
-            # If self.comoving is set, we know this is a gasoline data set,
-            # and we do the conversion on the hubble constant.
-            if self.comoving:
-                # Gasoline's hubble constant, dHubble0, is stored units of
-                # proper code time.
-                self.hubble_constant *= np.sqrt(G.in_units(
-                    'kpc**3*Msun**-1*s**-2') * density_unit).value / (
-                    3.2407793e-18)
-            cosmo = Cosmology(self.hubble_constant,
-                              self.omega_matter, self.omega_lambda)
-            self.current_time = cosmo.hubble_time(self.current_redshift)
+            density_unit = self.mass_unit / (self.length_unit / self.scale_factor)**3
+            if 'dHubble0' in self.parameters:
+                # Gasoline's internal hubble constant, dHubble0, is stored in
+                # units of proper code time
+                self.hubble_constant *= np.sqrt(G * density_unit)
+                # Finally, we scale the hubble constant by 100 km/s/Mpc
+                self.hubble_constant /= self.quan(100, 'km/s/Mpc')
+                # If we leave it as a YTQuantity, the cosmology object
+                # used below will add units back on.
+                self.hubble_constant = self.hubble_constant.in_units("").d
         else:
             mu = self.parameters.get('dMsolUnit', 1.0)
             self.mass_unit = self.quan(mu, 'Msun')
             lu = self.parameters.get('dKpcUnit', 1.0)
             self.length_unit = self.quan(lu, 'kpc')
-            density_unit = self.mass_unit / self.length_unit**3
-        self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
         # If unit base is defined by the user, override all relevant units
         if self._unit_base is not None:
-            length = self._unit_base.get('length', self.length_unit)
-            length = self.quan(*length) if isinstance(length, tuple) else self.quan(length)
-            self.length_unit = length
+            for my_unit in ["length", "mass", "time"]:
+                if my_unit in self._unit_base:
+                    my_val = self._unit_base[my_unit]
+                    my_val = \
+                      self.quan(*my_val) if isinstance(my_val, tuple) \
+                      else self.quan(my_val)
+                    setattr(self, "%s_unit" % my_unit, my_val)
 
-            mass = self._unit_base.get('mass', self.mass_unit)
-            mass = self.quan(*mass) if isinstance(mass, tuple) else self.quan(mass)
-            self.mass_unit = mass
+        # Finally, set the dependent units
+        if self.cosmological_simulation:
+            cosmo = Cosmology(self.hubble_constant,
+                              self.omega_matter, self.omega_lambda)
+            self.current_time = cosmo.hubble_time(self.current_redshift)
+            # mass units are rho_crit(z=0) * domain volume
+            mu = cosmo.critical_density(0.0) * \
+              (1 + self.current_redshift)**3 * self.length_unit**3
+            self.mass_unit = self.quan(mu.in_units("Msun"), "Msun")
+            density_unit = self.mass_unit / (self.length_unit / self.scale_factor)**3
+            # need to do this again because we've modified the hubble constant
+            self.unit_registry.modify("h", self.hubble_constant)
+        else:
+            density_unit = self.mass_unit / self.length_unit**3
 
-            density_unit = self.mass_unit / self.length_unit**3
+        if not hasattr(self, "time_unit"):
             self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
-            time = self._unit_base.get('time', self.time_unit)
-            time = self.quan(*time) if isinstance(time, tuple) else self.quan(time)
-            self.time_unit = time
-
-
     @staticmethod
     def _validate_header(filename):
         '''
@@ -305,3 +317,8 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         return TipsyDataset._validate_header(args[0])[0]
+
+    @property
+    @deprecate(replacement='cosmological_simulation')
+    def comoving(self):
+        return self.cosmological_simulation == 1.0

diff -r d6167462a04145b3244aa1eb4e1326e009876a19 -r 63eca5e094aa8e2296152fb6101ea9ebc6577a80 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -39,7 +39,7 @@
                                 hubble_constant = 0.702)
     kwargs = dict(field_dtypes = {"Coordinates": "d"},
                   cosmology_parameters = cosmology_parameters,
-                  unit_base = {'length': (1.0/60.0, "Mpccm/h")},
+                  unit_base = {'length': (60.0, "Mpccm/h")},
                   n_ref = 64)
     ds = data_dir_load(pkdgrav, TipsyDataset, (), kwargs)
     yield assert_equal, str(ds), "halo1e11_run1.00400"
@@ -70,7 +70,7 @@
                                 omega_matter = 0.272,
                                 hubble_constant = 0.702)
     kwargs = dict(cosmology_parameters = cosmology_parameters,
-                  unit_base = {'length': (1.0/60.0, "Mpccm/h")},
+                  unit_base = {'length': (60.0, "Mpccm/h")},
                   n_ref = 64)
     ds = data_dir_load(gasoline, TipsyDataset, (), kwargs)
     yield assert_equal, str(ds), "agora_1e11.00400"

diff -r d6167462a04145b3244aa1eb4e1326e009876a19 -r 63eca5e094aa8e2296152fb6101ea9ebc6577a80 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -221,24 +221,29 @@
     if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return
     mylog.info(*args)
 
-def deprecate(func):
-    """
-    This decorator issues a deprecation warning.
+def deprecate(replacement):
+    def real_deprecate(func):
+        """
+        This decorator issues a deprecation warning.
 
-    This can be used like so:
+        This can be used like so:
 
-    .. code-block:: python
+        .. code-block:: python
 
-       @deprecate
-       def some_really_old_function(...):
+        @deprecate("new_function")
+        def some_really_old_function(...):
 
-    """
-    @wraps(func)
-    def run_func(*args, **kwargs):
-        warnings.warn("%s has been deprecated and may be removed without notice!" \
-                % func.__name__, DeprecationWarning, stacklevel=2)
-        func(*args, **kwargs)
-    return run_func
+        """
+        @wraps(func)
+        def run_func(*args, **kwargs):
+            message = "%s has been deprecated and may be removed without notice!"
+            if replacement is not None:
+                message += " Use %s instead." % replacement
+            warnings.warn(message % func.__name__, DeprecationWarning,
+                          stacklevel=2)
+            func(*args, **kwargs)
+        return run_func
+    return real_deprecate
 
 def pdb_run(func):
     """


https://bitbucket.org/yt_analysis/yt/commits/605b6a92a057/
Changeset:   605b6a92a057
Branch:      stable
User:        chummels
Date:        2015-10-10 03:12:47+00:00
Summary:     LightRay was using reverse of LOS velocity.
Affected #:  1 file

diff -r 63eca5e094aa8e2296152fb6101ea9ebc6577a80 -r 605b6a92a057240171259c313d2a58f448501509 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -442,7 +442,8 @@
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
+                    # line of sight velocity is reversed relative to ray
+                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
                                                      line_of_sight).sum(axis=1)[asort])
                     del sub_vel
 


https://bitbucket.org/yt_analysis/yt/commits/2f7fe0d6cc6c/
Changeset:   2f7fe0d6cc6c
Branch:      stable
User:        ngoldbaum
Date:        2015-10-16 00:30:13+00:00
Summary:     Document --answer-name
Affected #:  1 file

diff -r 605b6a92a057240171259c313d2a58f448501509 -r 2f7fe0d6cc6cd63e3c71cec97f7c732b4f1d8166 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -302,18 +302,19 @@
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
 
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file). To run the tipsy frontend's answer tests using a different
-yt changeset, update to that changeset, recompile if necessary, and run the
-tests using the following command:
+``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+frontend's answer tests using a different yt changeset, update to that
+changeset, recompile if necessary, and run the tests using the following
+command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
 
 The results from a nose testing session are pretty straightforward to
 understand, the results for each test are printed directly to STDOUT.  If a test


https://bitbucket.org/yt_analysis/yt/commits/95c7c7406db0/
Changeset:   95c7c7406db0
Branch:      stable
User:        ngoldbaum
Date:        2015-10-21 18:17:14+00:00
Summary:     Fix loading athena data in python3 with provided parameters
Affected #:  2 files

diff -r 2f7fe0d6cc6cd63e3c71cec97f7c732b4f1d8166 -r 95c7c7406db09f526164b4ffd70e13a7bd4cf365 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -460,7 +460,7 @@
             units_override = {}
         # This is for backwards-compatibility
         already_warned = False
-        for k,v in self.specified_parameters.items():
+        for k, v in list(self.specified_parameters.items()):
             if k.endswith("_unit") and k not in units_override:
                 if not already_warned:
                     mylog.warning("Supplying unit conversions from the parameters dict is deprecated, "+

diff -r 2f7fe0d6cc6cd63e3c71cec97f7c732b4f1d8166 -r 95c7c7406db09f526164b4ffd70e13a7bd4cf365 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -23,6 +23,8 @@
 from yt.config import ytcfg
 from yt.convenience import load
 
+import yt.units as u
+
 _fields_cloud = ("scalar[0]", "density", "total_energy")
 
 cloud = "ShockCloud/id0/Cloud.0050.vtk"
@@ -62,9 +64,9 @@
 
 sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
 
-uo_sloshing = {"length_unit":(1.0,"Mpc"),
-               "time_unit":(1.0,"Myr"),
-               "mass_unit":(1.0e14,"Msun")}
+uo_sloshing = {"length_unit": (1.0,"Mpc"),
+               "time_unit": (1.0,"Myr"),
+               "mass_unit": (1.0e14,"Msun")}
 
 @requires_file(sloshing)
 def test_nprocs():
@@ -77,6 +79,11 @@
     sp2 = ds2.sphere("c", (100.,"kpc"))
     prj2 = ds1.proj("density",0)
 
+    ds3 = load(sloshing, parameters=uo_sloshing)
+    assert_equal(ds3.length_unit, u.Mpc)
+    assert_equal(ds3.time_unit, u.Myr)
+    assert_equal(ds3.mass_unit, 1e14*u.Msun)
+
     yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
     yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
     for ax in "xyz":


https://bitbucket.org/yt_analysis/yt/commits/831567c08d6b/
Changeset:   831567c08d6b
Branch:      stable
User:        jzuhone
Date:        2015-10-25 14:18:24+00:00
Summary:     Backporting PR #1831 https://bitbucket.org/yt_analysis/yt/pull-requests/1831
Affected #:  2 files

diff -r 95c7c7406db09f526164b4ffd70e13a7bd4cf365 -r 831567c08d6b1caa6055ddc59d096518edcb30f7 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -492,12 +492,15 @@
     def set_code_units(self):
         super(AthenaDataset, self).set_code_units()
         mag_unit = getattr(self, "magnetic_unit", None)
+        vel_unit = getattr(self, "velocity_unit", None)
         if mag_unit is None:
             self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                          (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
-
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        if vel_unit is None:
+            self.velocity_unit = self.length_unit/self.time_unit
+        self.unit_registry.modify("code_velocity", self.velocity_unit)
 
     def _parse_parameter_file(self):
         self._handle = open(self.parameter_filename, "rb")

diff -r 95c7c7406db09f526164b4ffd70e13a7bd4cf365 -r 831567c08d6b1caa6055ddc59d096518edcb30f7 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -201,7 +201,7 @@
                 else:
                     current_field_units = \
                         just_one(current_field.attrs['field_units'])
-                self.field_units[field_name] = current_field_units
+                self.field_units[field_name] = current_field_units.decode("utf8")
             else:
                 self.field_units[field_name] = ""

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list