[yt-svn] commit/yt: MatthewTurk: Merged in hyschive/yt-hyschive (pull request #2163)
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed May 11 11:30:16 PDT 2016
1 new commit in yt:
https://bitbucket.org/yt_analysis/yt/commits/b61ebaeaae2e/
Changeset: b61ebaeaae2e
Branch: yt
User: MatthewTurk
Date: 2016-05-11 18:30:09+00:00
Summary: Merged in hyschive/yt-hyschive (pull request #2163)
Updating the _skeleton frontend
Affected #: 4 files
diff -r 87f89bdc4c237e8319a35335c5f8875c517f63c8 -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -34,7 +34,8 @@
`yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
To get started, make a new directory in ``yt/frontends`` with the name
-of your code. Copying the contents of the ``yt/frontends/_skeleton``
+of your code and add the name into ``yt/frontends/api.py``.
+Copying the contents of the ``yt/frontends/_skeleton``
directory will add a lot of boilerplate for the required classes and
methods that are needed. In particular, you'll have to create a
subclass of ``Dataset`` in the data_structures.py file. This subclass
diff -r 87f89bdc4c237e8319a35335c5f8875c517f63c8 -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -14,6 +14,8 @@
#-----------------------------------------------------------------------------
import os
+import numpy as np
+import weakref
from yt.data_objects.grid_patch import \
AMRGridPatch
@@ -25,15 +27,12 @@
class SkeletonGrid(AMRGridPatch):
_id_offset = 0
- def __init__(self, id, index, level, start, dimensions):
+ def __init__(self, id, index, level):
AMRGridPatch.__init__(self, id, filename=index.index_filename,
index=index)
- self.Parent = []
+ self.Parent = None
self.Children = []
self.Level = level
- self.start_index = start.copy()
- self.stop_index = self.start_index + dimensions
- self.ActiveDimensions = dimensions.copy()
def __repr__(self):
return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -43,14 +42,17 @@
def __init__(self, ds, dataset_type='skeleton'):
self.dataset_type = dataset_type
+ self.dataset = weakref.proxy(ds)
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
+ # float type for the simulation edges and must be float64 now
+ self.float_type = np.float64
GridIndex.__init__(self, ds, dataset_type)
def _detect_output_fields(self):
# This needs to set a self.field_list that contains all the available,
- # on-disk fields.
+ # on-disk fields. No derived fields should be defined here.
# NOTE: Each should be a tuple, where the first element is the on-disk
# fluid type or particle type. Convention suggests that the on-disk
# fluid type is usually the dataset_type and the on-disk particle type
@@ -69,7 +71,7 @@
# self.grid_particle_count (N, 1) <= int
# self.grid_levels (N, 1) <= int
# self.grids (N, 1) <= grid objects
- #
+ # self.max_level = self.grid_levels.max()
pass
def _populate_grid_objects(self):
@@ -94,6 +96,8 @@
Dataset.__init__(self, filename, dataset_type,
units_override=units_override)
self.storage_filename = storage_filename
+ # refinement factor between a grid and its subgrid
+ # self.refine_by = 2
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
@@ -114,10 +118,11 @@
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
- # will be updated to be in code units at a later time. This includes
- # the cosmological parameters.
+ # will be converted to YTArray automatically at a later time.
+ # This includes the cosmological parameters.
#
- # self.unique_identifier
+ # self.unique_identifier <= unique identifier for the dataset
+ # being read (e.g., UUID or ST_CTIME)
# self.parameters <= full of code-specific items of use
# self.domain_left_edge <= array of float64
# self.domain_right_edge <= array of float64
diff -r 87f89bdc4c237e8319a35335c5f8875c517f63c8 -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -31,13 +31,14 @@
# ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
)
- def __init__(self, ds):
- super(SkeletonFieldInfo, self).__init__(ds)
+ def __init__(self, ds, field_list):
+ super(SkeletonFieldInfo, self).__init__(ds, field_list)
# If you want, you can check self.field_list
def setup_fluid_fields(self):
# Here we do anything that might need info about the dataset.
- # You can use self.alias, self.add_output_field and self.add_field .
+ # You can use self.alias, self.add_output_field (for on-disk fields)
+ # and self.add_field (for derived fields).
pass
def setup_particle_fields(self, ptype):
diff -r 87f89bdc4c237e8319a35335c5f8875c517f63c8 -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -42,9 +42,18 @@
# dict gets returned at the end and it should be flat, with selected
# data. Note that if you're reading grid data, you might need to
# special-case a grid selector object.
+ # Also note that "chunks" is a generator for multiple chunks, each of
+ # which contains a list of grids. The returned numpy arrays should be
+ # in 64-bit float and contiguous along the z direction. Therefore, for
+ # a C-like input array with the dimension [x][y][z] or a
+ # Fortran-like input array with the dimension (z,y,x), a matrix
+ # transpose is required (e.g., using np_array.transpose() or
+ # np_array.swapaxes(0,2)).
pass
def _read_chunk_data(self, chunk, fields):
- # This reads the data from a single chunk, and is only used for
- # caching.
+ # This reads the data from a single chunk without doing any selection,
+ # and is only used for caching data that might be used by multiple
+ # different selectors later. For instance, this can speed up ghost zone
+ # computation.
pass
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160511/9e49fbd4/attachment.html>
More information about the yt-svn
mailing list