[yt-svn] commit/yt: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Apr 19 01:29:10 PDT 2014


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/836ea7cd85a9/
Changeset:   836ea7cd85a9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-18 20:52:45
Summary:     Updating skeleton frontend.
Affected #:  5 files

diff -r ceff32eb3b9ffea19fa635ae58e3b3fd1c01681e -r 836ea7cd85a92a6cc0c7d21221a633e97678d677 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -20,7 +20,7 @@
 
 from .fields import \
       SkeletonFieldInfo, \
-      add_flash_field
+      add_skeleton_field
 
 from .io import \
       IOHandlerSkeleton

diff -r ceff32eb3b9ffea19fa635ae58e3b3fd1c01681e -r 836ea7cd85a92a6cc0c7d21221a633e97678d677 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,36 +13,30 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import stat
 import numpy as np
-import weakref
 
-from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
-from yt.data_objects.index import \
-    AMRHierarchy
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import cm_per_mpc
-from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
-    #__slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = None
+    def __init__(self, id, index, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename=index.index_filename,
+                              index=index)
+        self.Parent = []
         self.Children = []
         self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
 
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -50,7 +44,6 @@
 class SkeletonHierarchy(AMRHierarchy):
 
     grid = SkeletonGrid
-    float_type = np.float64
     
     def __init__(self, pf, dataset_type='skeleton'):
         self.dataset_type = dataset_type
@@ -66,6 +59,10 @@
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,
         # on-disk fields.
+        # NOTE: Each should be a tuple, where the first element is the on-disk
+        # fluid type or particle type.  Convention suggests that the on-disk
+        # fluid type is usually the dataset_type and the on-disk particle type
+        # (for a single population of particles) is "io".
         pass
     
     def _count_grids(self):
@@ -96,30 +93,34 @@
 
 class SkeletonDataset(Dataset):
     _index_class = SkeletonHierarchy
-    _fieldinfo_fallback = SkeletonFieldInfo
-    _fieldinfo_known = KnownSkeletonFields
-    _handle = None
+    _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton',
-                 storage_filename = None,
-                 conversion_override = None):
-
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-
+    def __init__(self, filename, dataset_type='skeleton'):
+        self.fluid_types += ('skeleton',)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-    def _set_units(self):
-        # This needs to set up the dictionaries that convert from code units to
-        # CGS.  The needed items are listed in the second entry:
-        #   self.time_units         <= sec_conversion
-        #   self.conversion_factors <= mpc_conversion
-        #   self.units              <= On-disk fields
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        # self.length_unit = self.quan(1.0, "cm")
+        # self.mass_unit = self.quan(1.0, "g")
+        # self.time_unit = self.quan(1.0, "s")
+        # self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
         pass
 
     def _parse_parameter_file(self):
-        # This needs to set up the following items:
+        # This needs to set up the following items.  Note that these are all
+        # assumed to be in code units; domain_left_edge and domain_right_edge
+        # will be updated to be in code units at a later time.  This includes
+        # the cosmological parameters.
         #
         #   self.unique_identifier
         #   self.parameters             <= full of code-specific items of use

diff -r ceff32eb3b9ffea19fa635ae58e3b3fd1c01681e -r 836ea7cd85a92a6cc0c7d21221a633e97678d677 yt/frontends/_skeleton/definitions.py
--- a/yt/frontends/_skeleton/definitions.py
+++ b/yt/frontends/_skeleton/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r ceff32eb3b9ffea19fa635ae58e3b3fd1c01681e -r 836ea7cd85a92a6cc0c7d21221a633e97678d677 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,79 +13,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+from yt.funcs import mylog
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.utilities.physical_constants import \
-    kboltz
+    FieldInfoContainer
 
-# The first field container is where any fields that exist on disk go, along
-# with their conversion factors, display names, etc.
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
 
-KnownSkeletonFields = FieldInfoContainer()
-add_skeleton_field = KnownSkeletonFields.add_field
+class SkeletonFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = SkeletonFieldInfo.add_field
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-# Often, we want to translate between fields on disk and fields in yt.  This
-# construct shows how to do that.  Note that we use TranslationFunc.
+    def __init__(self, pf):
+        super(SkeletonFieldInfo, self).__init__(pf)
+        # If you want, you can check self.field_list
 
-translation_dict = {"x-velocity": "velx",
-                    "y-velocity": "vely",
-                    "z-velocity": "velz",
-                    "Density": "dens",
-                    "Temperature": "temp",
-                    "Pressure" : "pres", 
-                    "Grav_Potential" : "gpot",
-                    "particle_position_x" : "particle_posx",
-                    "particle_position_y" : "particle_posy",
-                    "particle_position_z" : "particle_posz",
-                    "particle_velocity_x" : "particle_velx",
-                    "particle_velocity_y" : "particle_vely",
-                    "particle_velocity_z" : "particle_velz",
-                    "particle_index" : "particle_tag",
-                    "Electron_Fraction" : "elec",
-                    "HI_Fraction" : "h   ",
-                    "HD_Fraction" : "hd  ",
-                    "HeI_Fraction": "hel ",
-                    "HeII_Fraction": "hep ",
-                    "HeIII_Fraction": "hepp",
-                    "HM_Fraction": "hmin",
-                    "HII_Fraction": "hp  ",
-                    "H2I_Fraction": "htwo",
-                    "H2II_Fraction": "htwp",
-                    "DI_Fraction": "deut",
-                    "DII_Fraction": "dplu",
-                    "ParticleMass": "particle_mass",
-                    "Flame_Fraction": "flam"}
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the parameter file.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
 
-for f,v in translation_dict.items():
-    if v not in KnownSkeletonFields:
-        pfield = v.startswith("particle")
-        add_skeleton_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    if f.endswith("_Fraction") :
-        dname = "%s\/Fraction" % f.split("_")[0]
-    else :
-        dname = f                    
-    ff = KnownSkeletonFields[v]
-    pfield = f.startswith("particle")
-    add_field(f, TranslationFunc(v),
-              take_log=KnownSkeletonFields[v].take_log,
-              units = ff.units, display_name=dname,
-              particle_type = pfield)
-
-# Here's an example of adding a new field:
-
-add_skeleton_field("dens", function=NullFunc, take_log=True,
-                convert_function=_get_convert("dens"),
-                units=r"g / cm**3")
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r ceff32eb3b9ffea19fa635ae58e3b3fd1c01681e -r 836ea7cd85a92a6cc0c7d21221a633e97678d677 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -23,12 +23,31 @@
     _particle_reader = False
     _dataset_type = "skeleton"
 
-    def _read_data(self, grid, field):
-        # This must return the array, of size/shape grid.ActiveDimensions, that
-        # corresponds to 'field'.
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # If this is not implemented, the IO handler will just slice a
-        # _read_data item.
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
         pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        pass
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass


https://bitbucket.org/yt_analysis/yt/commits/0c0a0b6f8dca/
Changeset:   0c0a0b6f8dca
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-04-19 10:29:05
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #840)

Updating skeleton frontend.
Affected #:  5 files

diff -r 402f4475430dfd4e0689382cb8813b5c46ff78a6 -r 0c0a0b6f8dca9b1536efe6032b6854a43d092079 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -20,7 +20,7 @@
 
 from .fields import \
       SkeletonFieldInfo, \
-      add_flash_field
+      add_skeleton_field
 
 from .io import \
       IOHandlerSkeleton

diff -r 402f4475430dfd4e0689382cb8813b5c46ff78a6 -r 0c0a0b6f8dca9b1536efe6032b6854a43d092079 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,36 +13,30 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import stat
 import numpy as np
-import weakref
 
-from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
-from yt.data_objects.index import \
-    AMRHierarchy
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import cm_per_mpc
-from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
-    #__slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = None
+    def __init__(self, id, index, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename=index.index_filename,
+                              index=index)
+        self.Parent = []
         self.Children = []
         self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
 
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -50,7 +44,6 @@
 class SkeletonHierarchy(AMRHierarchy):
 
     grid = SkeletonGrid
-    float_type = np.float64
     
     def __init__(self, pf, dataset_type='skeleton'):
         self.dataset_type = dataset_type
@@ -66,6 +59,10 @@
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,
         # on-disk fields.
+        # NOTE: Each should be a tuple, where the first element is the on-disk
+        # fluid type or particle type.  Convention suggests that the on-disk
+        # fluid type is usually the dataset_type and the on-disk particle type
+        # (for a single population of particles) is "io".
         pass
     
     def _count_grids(self):
@@ -96,30 +93,34 @@
 
 class SkeletonDataset(Dataset):
     _index_class = SkeletonHierarchy
-    _fieldinfo_fallback = SkeletonFieldInfo
-    _fieldinfo_known = KnownSkeletonFields
-    _handle = None
+    _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton',
-                 storage_filename = None,
-                 conversion_override = None):
-
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-
+    def __init__(self, filename, dataset_type='skeleton'):
+        self.fluid_types += ('skeleton',)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-    def _set_units(self):
-        # This needs to set up the dictionaries that convert from code units to
-        # CGS.  The needed items are listed in the second entry:
-        #   self.time_units         <= sec_conversion
-        #   self.conversion_factors <= mpc_conversion
-        #   self.units              <= On-disk fields
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        # self.length_unit = self.quan(1.0, "cm")
+        # self.mass_unit = self.quan(1.0, "g")
+        # self.time_unit = self.quan(1.0, "s")
+        # self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
         pass
 
     def _parse_parameter_file(self):
-        # This needs to set up the following items:
+        # This needs to set up the following items.  Note that these are all
+        # assumed to be in code units; domain_left_edge and domain_right_edge
+        # will be updated to be in code units at a later time.  This includes
+        # the cosmological parameters.
         #
         #   self.unique_identifier
         #   self.parameters             <= full of code-specific items of use

diff -r 402f4475430dfd4e0689382cb8813b5c46ff78a6 -r 0c0a0b6f8dca9b1536efe6032b6854a43d092079 yt/frontends/_skeleton/definitions.py
--- a/yt/frontends/_skeleton/definitions.py
+++ b/yt/frontends/_skeleton/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r 402f4475430dfd4e0689382cb8813b5c46ff78a6 -r 0c0a0b6f8dca9b1536efe6032b6854a43d092079 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,79 +13,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+from yt.funcs import mylog
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.utilities.physical_constants import \
-    kboltz
+    FieldInfoContainer
 
-# The first field container is where any fields that exist on disk go, along
-# with their conversion factors, display names, etc.
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
 
-KnownSkeletonFields = FieldInfoContainer()
-add_skeleton_field = KnownSkeletonFields.add_field
+class SkeletonFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = SkeletonFieldInfo.add_field
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-# Often, we want to translate between fields on disk and fields in yt.  This
-# construct shows how to do that.  Note that we use TranslationFunc.
+    def __init__(self, pf):
+        super(SkeletonFieldInfo, self).__init__(pf)
+        # If you want, you can check self.field_list
 
-translation_dict = {"x-velocity": "velx",
-                    "y-velocity": "vely",
-                    "z-velocity": "velz",
-                    "Density": "dens",
-                    "Temperature": "temp",
-                    "Pressure" : "pres", 
-                    "Grav_Potential" : "gpot",
-                    "particle_position_x" : "particle_posx",
-                    "particle_position_y" : "particle_posy",
-                    "particle_position_z" : "particle_posz",
-                    "particle_velocity_x" : "particle_velx",
-                    "particle_velocity_y" : "particle_vely",
-                    "particle_velocity_z" : "particle_velz",
-                    "particle_index" : "particle_tag",
-                    "Electron_Fraction" : "elec",
-                    "HI_Fraction" : "h   ",
-                    "HD_Fraction" : "hd  ",
-                    "HeI_Fraction": "hel ",
-                    "HeII_Fraction": "hep ",
-                    "HeIII_Fraction": "hepp",
-                    "HM_Fraction": "hmin",
-                    "HII_Fraction": "hp  ",
-                    "H2I_Fraction": "htwo",
-                    "H2II_Fraction": "htwp",
-                    "DI_Fraction": "deut",
-                    "DII_Fraction": "dplu",
-                    "ParticleMass": "particle_mass",
-                    "Flame_Fraction": "flam"}
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the parameter file.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
 
-for f,v in translation_dict.items():
-    if v not in KnownSkeletonFields:
-        pfield = v.startswith("particle")
-        add_skeleton_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    if f.endswith("_Fraction") :
-        dname = "%s\/Fraction" % f.split("_")[0]
-    else :
-        dname = f                    
-    ff = KnownSkeletonFields[v]
-    pfield = f.startswith("particle")
-    add_field(f, TranslationFunc(v),
-              take_log=KnownSkeletonFields[v].take_log,
-              units = ff.units, display_name=dname,
-              particle_type = pfield)
-
-# Here's an example of adding a new field:
-
-add_skeleton_field("dens", function=NullFunc, take_log=True,
-                convert_function=_get_convert("dens"),
-                units=r"g / cm**3")
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r 402f4475430dfd4e0689382cb8813b5c46ff78a6 -r 0c0a0b6f8dca9b1536efe6032b6854a43d092079 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -23,12 +23,31 @@
     _particle_reader = False
     _dataset_type = "skeleton"
 
-    def _read_data(self, grid, field):
-        # This must return the array, of size/shape grid.ActiveDimensions, that
-        # corresponds to 'field'.
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # If this is not implemented, the IO handler will just slice a
-        # _read_data item.
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
         pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        pass
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list