[yt-svn] commit/yt: xarthisius: Merged in jzuhone/yt-3.x/yt-3.0 (pull request #1001)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Jul 18 08:09:16 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/0dd4980aa84e/
Changeset:   0dd4980aa84e
Branch:      yt-3.0
User:        xarthisius
Date:        2014-07-18 17:09:07
Summary:     Merged in jzuhone/yt-3.x/yt-3.0 (pull request #1001)

Some GDF writing work
Affected #:  12 files

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -45,6 +45,8 @@
     parallel_objects, parallel_root_only, ParallelAnalysisInterface
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
+from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.frontends.stream.api import load_uniform_grid
 
 from yt.fields.field_exceptions import \
     NeedsGridType,\
@@ -571,6 +573,10 @@
     def LeftEdge(self):
         return self.left_edge
 
+    @property
+    def RightEdge(self):
+        return self.right_edge
+
     def deposit(self, positions, fields = None, method = None):
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
@@ -581,6 +587,47 @@
         vals = op.finalize()
         return vals.reshape(self.ActiveDimensions, order="C")
 
+    def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None,
+                     **kwargs):
+        r"""
+        Write the covering grid data to a GDF file.
+
+        Parameters
+        ----------
+        gdf_path : string
+            Pathname of the GDF file to write.
+        fields : list of strings
+            Fields to write to the GDF file.
+        nprocs : integer, optional
+            Split the covering grid into *nprocs* subgrids before
+            writing to the GDF file. Default: 1
+        field_units : dictionary, optional
+            Dictionary of units to convert fields to. If not set, fields are
+            in their default units.
+        All remaining keyword arguments are passed to
+        yt.utilities.grid_data_format.writer.write_to_gdf.
+
+        Examples
+        --------
+        >>> cube.write_to_gdf("clumps.h5", ["density","temperature"], nprocs=16,
+        ...                   clobber=True)
+        """
+        data = {}
+        for field in fields:
+            if field in field_units:
+                units = field_units[field]
+            else:
+                units = str(self[field].units)
+            data[field] = (self[field].in_units(units).v, units)
+        le = self.left_edge.v
+        re = self.right_edge.v
+        bbox = np.array([[l,r] for l,r in zip(le, re)])
+        ds = load_uniform_grid(data, self.ActiveDimensions, bbox=bbox,
+                               length_unit=self.pf.length_unit, time_unit=self.pf.time_unit,
+                               mass_unit=self.pf.mass_unit, nprocs=nprocs,
+                               sim_time=self.pf.current_time.v)
+        write_to_gdf(ds, gdf_path, **kwargs)
+
 class YTArbitraryGridBase(YTCoveringGridBase):
     """A 3D region with arbitrary bounds and dimensions.
 

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -362,6 +362,7 @@
         if storage_filename is None:
             storage_filename = '%s.yt' % filename.split('/')[-1]
         self.storage_filename = storage_filename
+        self.backup_filename = self.filename[:-4] + "_backup.gdf"
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -63,7 +63,7 @@
                 self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = \
             self.dds
-
+        self.dds = self.pf.arr(self.dds, "code_length")
 
 class GDFHierarchy(GridIndex):
 
@@ -186,17 +186,26 @@
             elif 'field_units' in current_field.attrs:
                 field_units = current_field.attrs['field_units']
                 if isinstance(field_units, types.StringTypes):
-                    current_fields_unit = current_field.attrs['field_units']
+                    current_field_units = current_field.attrs['field_units']
                 else:
-                    current_fields_unit = \
+                    current_field_units = \
                         just_one(current_field.attrs['field_units'])
                 self.field_units[field_name] = current_field_units
             else:
-                current_fields_unit = ""
+                self.field_units[field_name] = ""
+
+        if "dataset_units" in h5f:
+            for unit_name in h5f["/dataset_units"]:
+                current_unit = h5f["/dataset_units/%s" % unit_name]
+                value = current_unit.value
+                unit = current_unit.attrs["unit"]
+                setattr(self, unit_name, self.quan(value,unit))
+        else:
+            self.length_unit = self.quan(1.0, "cm")
+            self.mass_unit = self.quan(1.0, "g")
+            self.time_unit = self.quan(1.0, "s")
+
         h5f.close()
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -25,8 +25,9 @@
 class GDFFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", ("g/cm**3", ["density"], None)),
-        ("specific_energy", ("erg / g", ["thermal_energy"], None)),
-        ("pressure", ("", ["pressure"], None)),
+        ("specific_energy", ("erg/g", ["thermal_energy"], None)),
+        ("pressure", ("erg/cm**3", ["pressure"], None)),
+        ("temperature", ("K", ["temperature"], None)),
         ("velocity_x", ("cm/s", ["velocity_x"], None)),
         ("velocity_y", ("cm/s", ["velocity_y"], None)),
         ("velocity_z", ("cm/s", ["velocity_z"], None)),

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -337,8 +337,8 @@
 
     def _set_code_unit_attributes(self):
         base_units = self.stream_handler.code_units
-        attrs = ('length_unit', 'mass_unit', 'time_unit', 'velocity_unit')
-        cgs_units = ('cm', 'g', 's', 'cm/s')
+        attrs = ('length_unit', 'mass_unit', 'time_unit', 'velocity_unit', 'magnetic_unit')
+        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
         for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
             if isinstance(unit, basestring):
                 uq = self.quan(1.0, unit)
@@ -512,7 +512,8 @@
 
 def load_uniform_grid(data, domain_dimensions, length_unit=None, bbox=None,
                       nprocs=1, sim_time=0.0, mass_unit=None, time_unit=None,
-                      velocity_unit=None, periodicity=(True, True, True),
+                      velocity_unit=None, magnetic_unit=None,
+                      periodicity=(True, True, True),
                       geometry = "cartesian"):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
@@ -551,6 +552,8 @@
         Unit to use for times.  Defaults to unitless.
     velocity_unit : string
         Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string
+        Unit to use for magnetic fields. Defaults to unitless.
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
@@ -640,6 +643,8 @@
         time_unit = 'code_time'
     if velocity_unit is None:
         velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
 
     handler = StreamHandler(
         grid_left_edges,
@@ -651,7 +656,7 @@
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
         field_units,
-        (length_unit, mass_unit, time_unit, velocity_unit),
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
         particle_types=particle_types,
         periodicity=periodicity
     )
@@ -685,7 +690,8 @@
 def load_amr_grids(grid_data, domain_dimensions,
                    field_units=None, bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,
-                   periodicity=(True, True, True), geometry = "cartesian"):
+                   magnetic_unit=None, periodicity=(True, True, True),
+                   geometry = "cartesian"):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
     This should allow a sequence of grids of varying resolution of data to be
@@ -723,6 +729,8 @@
         Unit to use for times.  Defaults to unitless.
     velocity_unit : string or float
         Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string or float
+        Unit to use for magnetic fields.  Defaults to unitless.
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units specified by length_unit.
         Defaults to a cubic unit-length domain.
@@ -806,6 +814,8 @@
         time_unit = 'code_time'
     if velocity_unit is None:
         velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
 
     handler = StreamHandler(
         grid_left_edges,
@@ -817,7 +827,7 @@
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
         field_units,
-        (length_unit, mass_unit, time_unit, velocity_unit),
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
         particle_types=set_particle_types(grid_data[0])
     )
 
@@ -969,7 +979,8 @@
 
 def load_particles(data, length_unit = None, bbox=None,
                    sim_time=0.0, mass_unit = None, time_unit = None,
-                   velocity_unit=None, periodicity=(True, True, True),
+                   velocity_unit=None, magnetic_unit=None,
+                   periodicity=(True, True, True),
                    n_ref = 64, over_refine_factor = 1, geometry = "cartesian"):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -997,6 +1008,10 @@
         Conversion factor from simulation mass units to grams
     time_unit : float
         Conversion factor from simulation time units to seconds
+    velocity_unit : float
+        Conversion factor from simulation velocity units to cm/s
+    magnetic_unit : float
+        Conversion factor from simulation magnetic units to gauss
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
@@ -1057,6 +1072,8 @@
         time_unit = 'code_time'
     if velocity_unit is None:
         velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
 
     # I'm not sure we need any of this.
     handler = StreamHandler(
@@ -1069,7 +1086,7 @@
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
         field_units,
-        (length_unit, mass_unit, time_unit, velocity_unit),
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
         particle_types=particle_types,
         periodicity=periodicity
     )
@@ -1141,7 +1158,8 @@
 def load_hexahedral_mesh(data, connectivity, coordinates,
                          length_unit = None, bbox=None, sim_time=0.0,
                          mass_unit = None, time_unit = None,
-                         velocity_unit = None, periodicity=(True, True, True),
+                         velocity_unit = None, magnetic_unit = None,
+                         periodicity=(True, True, True),
                          geometry = "cartesian"):
     r"""Load a hexahedral mesh of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
@@ -1210,6 +1228,8 @@
         time_unit = 'code_time'
     if velocity_unit is None:
         velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
 
     # I'm not sure we need any of this.
     handler = StreamHandler(
@@ -1222,7 +1242,7 @@
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
         field_units,
-        (length_unit, mass_unit, time_unit, velocity_unit),
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
         particle_types=particle_types,
         periodicity=periodicity
     )

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -409,3 +409,10 @@
             """ % (self.field,)
         r += "\n".join([c for c in self.conditions])
         return r
+
+class YTGDFAlreadyExists(Exception):
+    def __init__(self, filename):
+        self.filename = filename
+
+    def __str__(self):
+        return "A file already exists at %s and clobber=False." % self.filename
\ No newline at end of file

diff -r d44a659ea62cc86fb53c327e17e9b70d6e5f53ef -r 0dd4980aa84e80807a030f12b77f07ae9902a935 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -18,11 +18,12 @@
 import numpy as np
 
 from yt import __version__ as yt_version
-
+from yt.utilities.exceptions import YTGDFAlreadyExists
 
 def write_to_gdf(pf, gdf_path, data_author=None, data_comment=None,
-                 particle_type_name="dark_matter"):
-    """
+                 dataset_units=None, particle_type_name="dark_matter",
+                 clobber=False):
+    r"""
     Write a parameter file to the given path in the Grid Data Format.
 
     Parameters
@@ -31,11 +32,38 @@
         The yt data to write out.
     gdf_path : string
         The path of the file to output.
+    data_author : string, optional
+        The name of the author who wrote the data. Default: None.
+    data_comment : string, optional
+        A descriptive comment. Default: None.
+    dataset_units : dictionary, optional
+        A dictionary of (value, unit) tuples to set the default units
+        of the dataset. Keys can be:
+            "length_unit"
+            "time_unit"
+            "mass_unit"
+            "velocity_unit"
+            "magnetic_unit"
+        If not specified, these will carry over from the parent
+        dataset.
+    particle_type_name : string, optional
+        The particle type of the particles in the dataset. Default: "dark_matter"
+    clobber : boolean, optional
+        Whether or not to clobber an already existing file. If False, attempting
+        to overwrite an existing file will result in an exception.
 
+    Examples
+    --------
+    >>> dataset_units = {"length_unit":(1.0,"Mpc"),
+    ...                  "time_unit":(1.0,"Myr")}
+    >>> write_to_gdf(ds, "clumps.h5", data_author="John ZuHone",
+    ...              dataset_units=dataset_units,
+    ...              data_comment="My Really Cool Dataset", clobber=True)
     """
 
     f = _create_new_gdf(pf, gdf_path, data_author, data_comment,
-                        particle_type_name)
+                        dataset_units=dataset_units,
+                        particle_type_name=particle_type_name, clobber=clobber)
 
     # now add the fields one-by-one
     for field_name in pf.field_list:
@@ -102,7 +130,7 @@
 
     # grab the display name and units from the field info container.
     display_name = fi.display_name
-    units = fi.get_units()
+    units = fi.units
 
     # check that they actually contain something...
     if display_name:
@@ -113,8 +141,6 @@
         sg.attrs["field_units"] = units
     else:
         sg.attrs["field_units"] = "None"
-    # @todo: the values must be in CGS already right?
-    sg.attrs["field_to_cgs"] = 1.0
     # @todo: is this always true?
     sg.attrs["staggering"] = 0
 
@@ -134,21 +160,22 @@
         # Check if this is a real field or particle data.
         grid.get_data(field_name)
         if fi.particle_type:  # particle data
-            pt_group[field_name] = grid[field_name]
+            pt_group[field_name] = grid[field_name].in_units(units)
         else:  # a field
-            grid_group[field_name] = grid[field_name]
+            grid_group[field_name] = grid[field_name].in_units(units)
 
 
 def _create_new_gdf(pf, gdf_path, data_author=None, data_comment=None,
-                    particle_type_name="dark_matter"):
+                    dataset_units=None, particle_type_name="dark_matter",
+                    clobber=False):
+
     # Make sure we have the absolute path to the file first
     gdf_path = os.path.abspath(gdf_path)
 
-    # Stupid check -- is the file already there?
-    # @todo: make this a specific exception/error.
-    if os.path.exists(gdf_path):
-        raise IOError("A file already exists in the location: %s. Please \
-                      provide a new one or remove that file." % gdf_path)
+    # Is the file already there? If so, are we allowing
+    # clobbering?
+    if os.path.exists(gdf_path) and not clobber:
+        raise YTGDFAlreadyExists(gdf_path)
 
     ###
     # Create and open the file with h5py
@@ -184,13 +211,27 @@
     g.attrs["field_ordering"] = 0
     # @todo: not yet supported by yt.
     g.attrs["boundary_conditions"] = np.array([0, 0, 0, 0, 0, 0], 'int32')
-
     if pf.cosmological_simulation:
         g.attrs["current_redshift"] = pf.current_redshift
         g.attrs["omega_matter"] = pf.omega_matter
         g.attrs["omega_lambda"] = pf.omega_lambda
         g.attrs["hubble_constant"] = pf.hubble_constant
 
+    if dataset_units is None:
+        dataset_units = {}
+
+    g = f.create_group("dataset_units")
+    for u in ["length","time","mass","velocity","magnetic"]:
+        unit_name = u+"_unit"
+        if unit_name in dataset_units:
+            value, units = dataset_units[unit_name]
+        else:
+            attr = getattr(pf, unit_name)
+            value = float(attr)
+            units = str(attr.units)
+        d = g.create_dataset(unit_name, data=value)
+        d.attrs["unit"] = units
+
     ###
     # "field_types" group
     ###
@@ -212,7 +253,7 @@
     f["grid_left_index"] = np.array(
         [grid.get_global_startindex() for grid in pf.index.grids]
     ).reshape(pf.index.grid_dimensions.shape[0], 3)
-    f["grid_level"] = pf.index.grid_levels
+    f["grid_level"] = pf.index.grid_levels.flat
     # @todo: Fill with proper values
     f["grid_parent_id"] = -np.ones(pf.index.grid_dimensions.shape[0])
     f["grid_particle_count"] = pf.index.grid_particle_count

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list