[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Sep 2 15:52:26 PDT 2014


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/8e90e5456725/
Changeset:   8e90e5456725
Branch:      yt
User:        astrofrog
Date:        2014-09-02 12:30:04
Summary:     Fixed unit handling in load_octree
Affected #:  1 file

diff -r 2c3d2e84074ac8260feb8f056b0720c3a7442e82 -r 8e90e545672581ded88684de7bfda3d68288a612 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1197,6 +1197,14 @@
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
+    mass_unit : string
+        Unit to use for masses.  Defaults to unitless.
+    time_unit : string
+        Unit to use for times.  Defaults to unitless.
+    velocity_unit : string
+        Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string
+        Unit to use for magnetic fields. Defaults to unitless.
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
@@ -1369,8 +1377,11 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_octree"
 
-def load_octree(octree_mask, data, sim_unit_to_cm,
-                bbox=None, sim_time=0.0, periodicity=(True, True, True),
+def load_octree(octree_mask, data,
+                bbox=None, sim_time=0.0, length_unit=None,
+                mass_unit=None, time_unit=None,
+                velocity_unit=None, magnetic_unit=None,
+                periodicity=(True, True, True),
                 over_refine_factor = 1, partial_coverage = 1):
     r"""Load an octree mask into yt.
 
@@ -1390,12 +1401,20 @@
     data : dict
         A dictionary of 1D arrays.  Note that these must of the size of the
         number of "False" values in the ``octree_mask``.
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
     bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm
+        Size of computational domain in units of length
     sim_time : float, optional
         The simulation time in seconds
+    length_unit : string
+        Unit to use for lengths.  Defaults to unitless.
+    mass_unit : string
+        Unit to use for masses.  Defaults to unitless.
+    time_unit : string
+        Unit to use for times.  Defaults to unitless.
+    velocity_unit : string
+        Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string
+        Unit to use for magnetic fields. Defaults to unitless.
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
@@ -1415,6 +1434,7 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
     update_field_names(data)
 
+    field_units, data = unitify_data(data)
     sfh = StreamDictFieldHandler()
     
     particle_types = set_particle_types(data)
@@ -1424,6 +1444,17 @@
     grid_right_edges = domain_right_edge
     grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
 
+    if length_unit is None:
+        length_unit = 'code_length'
+    if mass_unit is None:
+        mass_unit = 'code_mass'
+    if time_unit is None:
+        time_unit = 'code_time'
+    if velocity_unit is None:
+        velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
+
     # I'm not sure we need any of this.
     handler = StreamHandler(
         grid_left_edges,
@@ -1434,6 +1465,8 @@
         np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        field_units,
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
         particle_types=particle_types,
         periodicity=periodicity
     )
@@ -1450,12 +1483,6 @@
     sds = StreamOctreeDataset(handler)
     sds.octree_mask = octree_mask
     sds.partial_coverage = partial_coverage
-    sds.units["cm"] = sim_unit_to_cm
-    sds.units['1'] = 1.0
-    sds.units["unitary"] = 1.0
-    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     sds.over_refine_factor = over_refine_factor
-    for unit in mpc_conversion.keys():
-        sds.units[unit] = mpc_conversion[unit] * box_in_mpc
 
     return sds


https://bitbucket.org/yt_analysis/yt/commits/6f6f243af3d3/
Changeset:   6f6f243af3d3
Branch:      yt
User:        astrofrog
Date:        2014-09-02 12:43:46
Summary:     Added type check for octree_mask
Affected #:  1 file

diff -r 8e90e545672581ded88684de7bfda3d68288a612 -r 6f6f243af3d3d059713e40811f25b09407904039 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1424,6 +1424,9 @@
 
     """
 
+    if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8:
+        raise TypeError("octree_mask should be a Numpy array with type uint8")
+
     nz = (1 << (over_refine_factor))
     domain_dimensions = np.array([nz, nz, nz])
     nprocs = 1


https://bitbucket.org/yt_analysis/yt/commits/5479e1f7d829/
Changeset:   5479e1f7d829
Branch:      yt
User:        MatthewTurk
Date:        2014-09-02 13:38:55
Summary:     Fixing a few lingering issues with load_octree
Affected #:  2 files

diff -r 6f6f243af3d3d059713e40811f25b09407904039 -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1282,7 +1282,6 @@
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.ds = ds
-        self.index = self.ds.index
         self.oct_handler = oct_handler
         self._last_mask = None
         self._last_selector_id = None

diff -r 6f6f243af3d3d059713e40811f25b09407904039 -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -243,7 +243,9 @@
         for chunk in chunks:
             assert(len(chunk.objs) == 1)
             for subset in chunk.objs:
-                field_vals = self.fields[subset.domain_id -
-                                    subset._domain_offset]
+                field_vals = {}
+                for field in fields:
+                    field_vals[field] = self.fields[
+                        subset.domain_id - subset._domain_offset][field]
                 subset.fill(field_vals, rv, selector, ind)
         return rv


https://bitbucket.org/yt_analysis/yt/commits/0e8cce58116d/
Changeset:   0e8cce58116d
Branch:      yt
User:        MatthewTurk
Date:        2014-09-03 00:51:51
Summary:     Merging PR 1179
Affected #:  14 files

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b doc/source/cookbook/custom_colorbar_tickmarks.rst
--- a/doc/source/cookbook/custom_colorbar_tickmarks.rst
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.rst
@@ -1,4 +1,4 @@
-Custom Colorabar Tickmarks
---------------------------
+Custom Colorbar Tickmarks
+-------------------------
 
 .. notebook:: custom_colorbar_tickmarks.ipynb

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -513,6 +513,9 @@
         fill, gen = self.index._split_fields(fields_to_get)
         particles = []
         for field in gen:
+            if field[0] == 'deposit':
+                fill.append(field)
+                continue
             finfo = self.ds._get_field_info(*field)
             try:
                 finfo.check_available(self)
@@ -743,6 +746,13 @@
             self.right_edge + level_state.current_dx)
         level_state.data_source.min_level = level_state.current_level
         level_state.data_source.max_level = level_state.current_level
+        self._pdata_source = self.ds.region(
+            self.center,
+            self.left_edge - level_state.current_dx,
+            self.right_edge + level_state.current_dx)
+        self._pdata_source.min_level = level_state.current_level
+        self._pdata_source.max_level = level_state.current_level
+
 
     def _fill_fields(self, fields):
         ls = self._initialize_level_state(fields)

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -34,6 +34,7 @@
 
 def test_slice():
     fns = []
+    grid_eps = np.finfo(np.float64).eps
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
@@ -52,6 +53,7 @@
             yax = ds.coordinates.y_axis[ax]
             for wf in ["density", None]:
                 slc = ds.slice(ax, slc_pos)
+                shifted_slc = ds.slice(ax, slc_pos + grid_eps)
                 yield assert_equal, slc["ones"].sum(), slc["ones"].size
                 yield assert_equal, slc["ones"].min(), 1.0
                 yield assert_equal, slc["ones"].max(), 1.0
@@ -66,6 +68,7 @@
                     p.save(name=tmpname)
                     fns.append(tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
+                shifted_frb = shifted_slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['ones', 'density']:
                     fi = ds._get_field_info(slc_field)
                     yield assert_equal, frb[slc_field].info['data_source'], \
@@ -84,6 +87,8 @@
                         slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
                         slc_pos
+                    yield assert_equal, frb[slc_field], \
+                        shifted_frb[slc_field]
             # wf == None
             yield assert_equal, wf, None
     teardown_func(fns)

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -125,6 +125,33 @@
              display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
              units = "g/cm**3")
 
+    def _get_density_weighted_deposit_field(fname, units, method):
+        def _deposit_field(field, data):
+            """
+            Create a grid field for particle quantities weighted by particle
+            mass, using cloud-in-cell deposit.
+            """
+            pos = data[ptype, "particle_position"]
+            # Get back into density
+            pden = data[ptype, 'particle_mass']
+            top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)
+            bottom = data.deposit(pos, [pden], method=method)
+            top[bottom == 0] = 0.0
+            bnz = bottom.nonzero()
+            top[bnz] /= bottom[bnz]
+            d = data.ds.arr(top, input_units=units)
+            return d
+        return _deposit_field
+
+    for ax in 'xyz':
+        for method, name in zip(("cic", "sum"), ("cic", "nn")):
+            function = _get_density_weighted_deposit_field(
+                "particle_velocity_%s" % ax, "cm/s", method)
+            registry.add_field(
+                ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)),
+                function=function, units="cm/s", take_log=False,
+                validators=[ValidateSpatial(0)])
+
     # Now some translation functions.
 
     def particle_ones(field, data):
@@ -490,31 +517,6 @@
               validators=[ValidateParameter("normal"), 
                           ValidateParameter("center")])
 
-    def _get_cic_field(fname, units):
-        def _cic_particle_field(field, data):
-            """
-            Create a grid field for particle quantities weighted by particle
-            mass, using cloud-in-cell deposit.
-            """
-            pos = data[ptype, "particle_position"]
-            # Get back into density
-            pden = data[ptype, 'particle_mass'] / data["index", "cell_volume"] 
-            top = data.deposit(pos, [data[('all', particle_field)]*pden],
-                               method = 'cic')
-            bottom = data.deposit(pos, [pden], method = 'cic')
-            top[bottom == 0] = 0.0
-            bnz = bottom.nonzero()
-            top[bnz] /= bottom[bnz]
-            d = data.ds.arr(top, input_units = units)
-            return top
-
-    for ax in 'xyz':
-        registry.add_field(
-            ("deposit", "%s_cic_velocity_%s" % (ptype, ax)),
-            function=_get_cic_field(svel % ax, "cm/s"),
-            units = "cm/s", take_log=False,
-            validators=[ValidateSpatial(0)])
-
 def add_particle_average(registry, ptype, field_name, 
                          weight = "particle_mass",
                          density = True):

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -25,6 +25,33 @@
     cdef extern from "alloca.h":
         void *alloca(int)
 
+cdef extern from "cosmology.h":
+    ctypedef struct CosmologyParameters "CosmologyParameters" :
+        pass
+    CosmologyParameters *cosmology_allocate()
+    void cosmology_free(CosmologyParameters *c)
+    void cosmology_set_fixed(CosmologyParameters *c)
+
+    void cosmology_set_OmegaM(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaB(CosmologyParameters *c, double value)
+    void cosmology_set_OmegaL(CosmologyParameters *c, double value)
+    void cosmology_set_h(CosmologyParameters *c, double value)
+    void cosmology_set_DeltaDC(CosmologyParameters *c, double value)
+
+    double abox_from_auni(CosmologyParameters *c, double a) nogil
+    double tcode_from_auni(CosmologyParameters *c, double a) nogil
+    double tphys_from_auni(CosmologyParameters *c, double a) nogil
+
+    double auni_from_abox(CosmologyParameters *c, double v) nogil
+    double auni_from_tcode(CosmologyParameters *c, double v) nogil
+    double auni_from_tphys(CosmologyParameters *c, double v) nogil
+
+    double abox_from_tcode(CosmologyParameters *c, double tcode) nogil
+    double tcode_from_abox(CosmologyParameters *c, double abox) nogil
+
+    double tphys_from_abox(CosmologyParameters *c, double abox) nogil
+    double tphys_from_tcode(CosmologyParameters *c, double tcode) nogil
+
 cdef extern from "artio.h":
     ctypedef struct artio_fileset_handle "artio_fileset" :
         pass
@@ -138,6 +165,10 @@
     cdef public object parameters 
     cdef artio_fileset_handle *handle
 
+    # cosmology parameter for time unit conversion
+    cdef CosmologyParameters *cosmology
+    cdef float tcode_to_years
+
     # common attributes
     cdef public int num_grid
     cdef int64_t num_root_cells
@@ -178,6 +209,19 @@
         self.sfc_min = 0
         self.sfc_max = self.num_root_cells-1
 
+        # initialize cosmology module
+        if self.parameters.has_key("abox"):
+            self.cosmology = cosmology_allocate()
+            cosmology_set_OmegaM(self.cosmology, self.parameters['OmegaM'][0])
+            cosmology_set_OmegaL(self.cosmology, self.parameters['OmegaL'][0])
+            cosmology_set_OmegaB(self.cosmology, self.parameters['OmegaB'][0])
+            cosmology_set_h(self.cosmology, self.parameters['hubble'][0])
+            cosmology_set_DeltaDC(self.cosmology, self.parameters['DeltaDC'][0])
+            cosmology_set_fixed(self.cosmology)
+        else:
+            self.cosmology = NULL
+            self.tcode_to_years = self.parameters['time_unit'][0]/(365.25*86400)
+
         # grid detection
         self.min_level = 0
         self.max_level = self.parameters['grid_max_level'][0]
@@ -239,6 +283,8 @@
         if self.primary_variables : free(self.primary_variables)
         if self.secondary_variables : free(self.secondary_variables)
 
+        if self.cosmology : cosmology_free(self.cosmology)
+
         if self.handle : artio_fileset_close(self.handle)
   
     def read_parameters(self) :
@@ -288,6 +334,100 @@
 
             self.parameters[key] = parameter
 
+    def abox_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return abox_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("abox_from_auni called for non-cosmological ARTIO fileset!")
+
+    def tcode_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return tcode_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tcode_from_auni called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_auni(self, np.float64_t a):
+        if self.cosmology:
+            return tphys_from_auni(self.cosmology, a)
+        else:
+            raise RuntimeError("tphys_from_auni called for non-cosmological ARTIO fileset!")
+
+    def auni_from_abox(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_abox(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_abox called for non-cosmological ARTIO fileset!")
+
+    def auni_from_tcode(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_tcode(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tcode called for non-cosmological ARTIO fileset!")
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def auni_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, nauni
+        cdef np.ndarray[np.float64_t, ndim=1] auni
+        cdef CosmologyParameters *cosmology = self.cosmology
+        if not cosmology:
+            raise RuntimeError("auni_from_tcode_array called for non-cosmological ARTIO fileset!")
+        auni = np.empty_like(tcode)
+        nauni = auni.shape[0]
+        with nogil:
+            for i in range(nauni):
+                auni[i] = auni_from_tcode(self.cosmology, tcode[i])
+        return auni
+
+    def auni_from_tphys(self, np.float64_t v):
+        if self.cosmology:
+            return auni_from_tphys(self.cosmology, v)
+        else:
+            raise RuntimeError("auni_from_tphys called for non-cosmological ARTIO fileset!")
+
+    def abox_from_tcode(self, np.float64_t abox):
+        if self.cosmology:
+            return abox_from_tcode(self.cosmology, abox)
+        else:
+            raise RuntimeError("abox_from_tcode called for non-cosmological ARTIO fileset!")
+
+    def tcode_from_abox(self, np.float64_t abox):
+        if self.cosmology:
+            return tcode_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tcode_from_abox called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_abox(self, np.float64_t abox):
+        if self.cosmology:
+            return tphys_from_abox(self.cosmology, abox)
+        else:
+            raise RuntimeError("tphys_from_abox called for non-cosmological ARTIO fileset!")
+
+    def tphys_from_tcode(self, np.float64_t tcode):
+        if self.cosmology:
+            return tphys_from_tcode(self.cosmology, tcode)
+        else:
+            return self.tcode_to_years*tcode
+
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def tphys_from_tcode_array(self, np.ndarray[np.float64_t] tcode):
+        cdef int i, ntphys
+        cdef np.ndarray[np.float64_t, ndim=1] tphys
+        cdef CosmologyParameters *cosmology = self.cosmology
+        tphys = np.empty_like(tcode)
+        ntphys = tcode.shape[0]
+
+        if cosmology:
+            tphys = np.empty_like(tcode)
+            ntphys = tcode.shape[0]
+            with nogil:
+                for i in range(ntphys):
+                    tphys[i] = tphys_from_tcode(cosmology, tcode[i])
+            return tphys
+        else:
+            return tcode*self.tcode_to_years
+
 #    @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/artio/artio_headers/cosmology.c
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -0,0 +1,524 @@
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+
+#ifndef ERROR
+#include <stdio.h>
+#define ERROR(msg) { fprintf(stderr,"%s\n",msg); exit(1); }
+#endif
+
+#ifndef ASSERT
+#include <stdio.h>
+#define ASSERT(exp) { if(!(exp)) { fprintf(stderr,"Failed assertion %s, line: %d\n",#exp,__LINE__); exit(1); } }
+#endif
+
+#ifndef HEAPALLOC
+#include <stdlib.h>
+#define HEAPALLOC(type,size)	(type *)malloc((size)*sizeof(type))
+#endif
+
+#ifndef NEWARR
+#include <stdlib.h>
+#define NEWARR(size)   HEAPALLOC(double,size)
+#endif
+
+#ifndef DELETE
+#include <stdlib.h>
+#define DELETE(ptr)    free(ptr)
+#endif
+
+
+#include "cosmology.h"
+
+
+struct CosmologyParametersStruct
+{
+  int set;
+  int ndex;
+  int size;
+  double *la;
+  double *aUni;
+  double *aBox;
+  double *tCode;
+  double *tPhys;
+  double *dPlus;
+  double *qPlus;
+  double aLow;
+  double tCodeOffset;
+
+  double OmegaM;
+  double OmegaD;
+  double OmegaB;
+  double OmegaL;
+  double OmegaK;
+  double OmegaR;
+  double h;
+  double DeltaDC;
+  int flat;
+  double Omh2;
+  double Obh2;
+};
+
+void cosmology_clear_table(CosmologyParameters *c);
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax);
+void cosmology_fill_table_abox(CosmologyParameters *c, int istart, int n);
+
+CosmologyParameters *cosmology_allocate() {
+	CosmologyParameters *c = HEAPALLOC(CosmologyParameters,1);
+	if ( c != NULL ) {
+		memset(c, 0, sizeof(CosmologyParameters));
+
+		c->ndex = 200;
+		c->aLow = 1.0e-2;
+	}
+	return c;
+}
+
+void cosmology_free(CosmologyParameters *c) {
+	cosmology_clear_table(c);
+	DELETE(c);
+}
+
+int cosmology_is_set(CosmologyParameters *c)
+{
+  return (c->OmegaM>0.0 && c->OmegaB>0.0 && c->h>0.0);
+}
+
+
+void cosmology_fail_on_reset(const char *name, double old_value, double new_value)
+{
+  char str[150];
+  sprintf(str,"Trying to change %s from %lg to %lg...\nCosmology has been fixed and cannot be changed.\n",name,old_value,new_value);
+  ERROR(str);
+}
+
+
+void cosmology_set_OmegaM(CosmologyParameters *c, double v)
+{
+  if(v < 1.0e-3) v = 1.0e-3;
+  if(fabs(c->OmegaM-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaM",c->OmegaM,v);
+      c->OmegaM = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaL(CosmologyParameters *c, double v)
+{
+  if(fabs(c->OmegaL-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaL",c->OmegaL,v);
+      c->OmegaL = v;
+      c->flat = (fabs(c->OmegaM+c->OmegaL-1.0) > 1.0e-5) ? 0 : 1;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_OmegaB(CosmologyParameters *c, double v)
+{
+  if(v < 0.0) v = 0.0;
+  if(fabs(c->OmegaB-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("OmegaB",c->OmegaB,v);
+      c->OmegaB = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_h(CosmologyParameters *c, double v)
+{
+  if(fabs(c->h-v) > 1.0e-5)
+    {
+      if(c->set) cosmology_fail_on_reset("h",c->h,v);
+      c->h = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_set_DeltaDC(CosmologyParameters *c, double v)
+{
+  if(fabs(c->DeltaDC-v) > 1.0e-3)
+    {
+      if(c->set) cosmology_fail_on_reset("DeltaDC",c->DeltaDC,v);
+      c->DeltaDC = v;
+      cosmology_clear_table(c);
+    }
+}
+
+
+void cosmology_init(CosmologyParameters *c)
+{
+  if(c->size == 0) /* reset only if the state is dirty */
+    {
+      if(!cosmology_is_set(c)) ERROR("Not all of the required cosmological parameters have been set; the minimum required set is (OmegaM,OmegaB,h).");
+
+      if(c->OmegaB > c->OmegaM) c->OmegaB = c->OmegaM;
+      c->OmegaD = c->OmegaM - c->OmegaB;
+      if(c->flat)
+	{
+	  c->OmegaK = 0.0;
+	  c->OmegaL = 1.0 - c->OmegaM;
+	}
+      else
+	{
+	  c->OmegaK = 1.0 - (c->OmegaM+c->OmegaL);
+	}
+      c->OmegaR = 4.166e-5/(c->h*c->h);
+
+      c->Omh2 = c->OmegaM*c->h*c->h;
+      c->Obh2 = c->OmegaB*c->h*c->h;
+
+      cosmology_fill_table(c,c->aLow,1.0);
+
+      c->tCodeOffset = 0.0;  /*  Do need to set it to zero first */
+#ifndef NATIVE_TCODE_NORMALIZATION
+      c->tCodeOffset = 0.0 - tCode(c,inv_aBox(c,1.0));
+#endif
+    }      
+}
+
+
+void cosmology_set_fixed(CosmologyParameters *c)
+{
+  cosmology_init(c);
+  c->set = 1;
+}
+
+
+double cosmology_mu(CosmologyParameters *c, double a)
+{
+  return sqrt(((a*a*c->OmegaL+c->OmegaK)*a+c->OmegaM)*a+c->OmegaR);
+}
+
+
+double cosmology_dc_factor(CosmologyParameters *c, double dPlus)
+{
+  double dc = 1.0 + dPlus*c->DeltaDC;
+  return 1.0/pow((dc>0.001)?dc:0.001,1.0/3.0);
+}
+
+
+void cosmology_fill_table_integrate(CosmologyParameters *c, double a, double y[], double f[])
+{
+  double mu = cosmology_mu(c, a);
+  double abox = a*cosmology_dc_factor(c, y[2]);
+  
+  f[0] = a/(abox*abox*mu);
+  f[1] = a/mu;
+  f[2] = y[3]/(a*mu);
+  f[3] = 1.5*c->OmegaM*y[2]/mu;
+}
+
+
+void cosmology_fill_table_piece(CosmologyParameters *c, int istart, int n)
+{
+  int i, j;
+  double tPhysUnit = (3.0856775813e17/(365.25*86400))/c->h;  /* 1/H0 in Julian years */
+
+  double x, aeq = c->OmegaR/c->OmegaM;
+  double tCodeFac = 1.0/sqrt(aeq);
+  double tPhysFac = tPhysUnit*aeq*sqrt(aeq)/sqrt(c->OmegaM);
+
+  double da, a0, y0[4], y1[4];
+  double f1[4], f2[4], f3[4], f4[4];
+
+  for(i=istart; i<n; i++)
+    {
+      c->aUni[i] = pow(10.0,c->la[i]);
+    }
+
+  /*
+  //  Small a regime, use analytical formulae for matter + radiation model
+  */  
+  for(i=istart; c->aUni[i]<(c->aLow+1.0e-9) && i<n; i++)
+    {
+      x = c->aUni[i]/aeq;
+
+      c->tPhys[i] = tPhysFac*2*x*x*(2+sqrt(x+1))/(3*pow(1+sqrt(x+1),2.0));
+      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after euality; it is very small for x > 10, I keep ot just for completeness; */
+      c->qPlus[i] = c->aUni[i]*cosmology_mu(c,c->aUni[i])*(1 + ((2+6*x)/(x*sqrt(1+x))+3*log(x)-6*log(1+sqrt(1+x)))/(log(64)-9)); /* this is a^2*dDPlus/dt/H0 */
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+      c->tCode[i] = 1.0 - tCodeFac*asinh(sqrt(aeq/c->aBox[i]));
+    }
+  
+  /*
+  //  Large a regime, solve ODEs
+  */
+  ASSERT(i > 0);
+
+  tCodeFac = 0.5*sqrt(c->OmegaM);
+  tPhysFac = tPhysUnit;
+
+  y1[0] = c->tCode[i-1]/tCodeFac;
+  y1[1] = c->tPhys[i-1]/tPhysFac;
+  y1[2] = c->dPlus[i-1];
+  y1[3] = c->qPlus[i-1];
+
+  for(; i<n; i++)
+    {
+      a0 = c->aUni[i-1];
+      da = c->aUni[i] - a0;
+
+      /*  RK4 integration */
+      for(j=0; j<4; j++) y0[j] = y1[j];
+      cosmology_fill_table_integrate(c, a0,y1,f1);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f1[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f2);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + 0.5*da*f2[j];
+      cosmology_fill_table_integrate(c, a0+0.5*da,y1,f3);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*f3[j];
+      cosmology_fill_table_integrate(c, a0+da,y1,f4);
+
+      for(j=0; j<4; j++) y1[j] = y0[j] + da*(f1[j]+2*f2[j]+2*f3[j]+f4[j])/6.0;
+
+      c->tCode[i] = tCodeFac*y1[0];
+      c->tPhys[i] = tPhysFac*y1[1];
+      c->dPlus[i] = y1[2];
+      c->qPlus[i] = y1[3];
+
+      c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);
+    }
+} 
+
+
+void cosmology_fill_table(CosmologyParameters *c, double amin, double amax)
+{
+  int i, imin, imax, iold;
+  double dla = 1.0/c->ndex;
+  double lamin, lamax;
+  double *old_la = c->la;
+  double *old_aUni = c->aUni;
+  double *old_aBox = c->aBox;
+  double *old_tCode = c->tCode;
+  double *old_tPhys = c->tPhys;
+  double *old_dPlus = c->dPlus;
+  double *old_qPlus = c->qPlus;
+  int old_size = c->size;
+
+  if(amin > c->aLow) amin = c->aLow;
+  lamin = dla*floor(c->ndex*log10(amin));
+  lamax = dla*ceil(c->ndex*log10(amax));
+
+  c->size = 1 + (int)(0.5+c->ndex*(lamax-lamin)); 
+  ASSERT(fabs(lamax-lamin-dla*(c->size-1)) < 1.0e-14);
+
+  c->la = NEWARR(c->size);     ASSERT(c->la != NULL);
+  c->aUni = NEWARR(c->size);   ASSERT(c->aUni != NULL);
+  c->aBox = NEWARR(c->size);   ASSERT(c->aBox != NULL);
+  c->tCode = NEWARR(c->size);  ASSERT(c->tCode != NULL);
+  c->tPhys = NEWARR(c->size);  ASSERT(c->tPhys != NULL);
+  c->dPlus = NEWARR(c->size);  ASSERT(c->dPlus != NULL);
+  c->qPlus = NEWARR(c->size);  ASSERT(c->qPlus != NULL);
+
+  /*
+  //  New log10(aUni) table
+  */
+  for(i=0; i<c->size; i++)
+    {
+      c->la[i] = lamin + dla*i;
+    }
+
+  if(old_size == 0)
+    {
+      /*
+      //  Filling the table for the first time
+      */
+      cosmology_fill_table_piece(c,0,c->size);
+    }
+  else
+    {
+      /*
+      //  Find if we need to expand the lower end
+      */
+      if(lamin < old_la[0])
+	{
+	  imin = (int)(0.5+c->ndex*(old_la[0]-lamin));
+	  ASSERT(fabs(old_la[0]-lamin-dla*imin) < 1.0e-14);
+	}
+      else imin = 0;
+
+      /*
+      //  Find if we need to expand the upper end
+      */
+      if(lamax > old_la[old_size-1])
+	{
+	  imax = (int)(0.5+c->ndex*(old_la[old_size-1]-lamin));
+	  ASSERT(fabs(old_la[old_size-1]-lamin-dla*imax) < 1.0e-14);
+	}
+      else imax = c->size - 1;
+  
+      /*
+      //  Re-use the rest
+      */
+      if(lamin > old_la[0])
+	{
+	  iold = (int)(0.5+c->ndex*(lamin-old_la[0]));
+	  ASSERT(fabs(lamin-old_la[0]-dla*iold) < 1.0e-14);
+	}
+      else iold = 0;
+
+      memcpy(c->aUni+imin,old_aUni+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->aBox+imin,old_aBox+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tCode+imin,old_tCode+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->tPhys+imin,old_tPhys+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->dPlus+imin,old_dPlus+iold,sizeof(double)*(imax-imin+1));
+      memcpy(c->qPlus+imin,old_qPlus+iold,sizeof(double)*(imax-imin+1));
+
+      DELETE(old_la);
+      DELETE(old_aUni);
+      DELETE(old_aBox);
+      DELETE(old_tCode);
+      DELETE(old_tPhys);
+      DELETE(old_dPlus);
+      DELETE(old_qPlus);
+
+      /*
+      //  Fill in additional pieces
+      */
+      if(imin > 0) cosmology_fill_table_piece(c,0,imin);
+      if(imax < c->size-1) cosmology_fill_table_piece(c,imax,c->size);
+    }
+}
+
+
+void cosmology_clear_table(CosmologyParameters *c)
+{
+  if(c->size > 0)
+    {
+      DELETE(c->la);
+      DELETE(c->aUni);
+      DELETE(c->aBox);
+      DELETE(c->tCode);
+      DELETE(c->tPhys);
+      DELETE(c->dPlus);
+      DELETE(c->qPlus);
+
+      c->size = 0;
+      c->la = NULL;
+      c->aUni = NULL;
+      c->aBox = NULL;
+      c->tCode = NULL;
+      c->tPhys = NULL;
+      c->dPlus = NULL;
+      c->qPlus = NULL;
+    }
+}
+
+
+void cosmology_check_range(CosmologyParameters *c, double a)
+{
+  ASSERT((a > 1.0e-9) && (a < 1.0e9));
+
+  if(c->size == 0) cosmology_init(c);
+
+  if(a < c->aUni[0])
+    {
+      cosmology_fill_table(c,a,c->aUni[c->size-1]);
+    }
+
+  if(a > c->aUni[c->size-1])
+    {
+      cosmology_fill_table(c,c->aUni[0],a);
+    }
+}
+
+
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax)
+{
+  cosmology_check_range(c, amin);
+  cosmology_check_range(c, amax);
+}
+
+
+double cosmology_get_value_from_table(CosmologyParameters *c, double a, double table[])
+{
+  int idx = (int)(c->ndex*(log10(a)-c->la[0]));
+
+  ASSERT(idx>=0 && idx<c->size);
+
+  /*
+  //  Do it as a function of aUni rather than la to ensure exact inversion
+  */
+  return table[idx] + (table[idx+1]-table[idx])/(c->aUni[idx+1]-c->aUni[idx])*(a-c->aUni[idx]);
+}
+
+
+int cosmology_find_index(CosmologyParameters *c, double v, double table[])
+{
+  int ic, il = 0;
+  int ih = c->size - 1;
+
+  if(v < table[0])
+    {
+      return -1;
+    }
+  if(v > table[c->size-1])
+    {
+      return c->size + 1;
+    }
+
+  while((ih-il) > 1)
+    {
+      ic = (il+ih)/2;
+      if(v > table[ic]) /* special, not fully optimal form to avoid checking that il < c->size-1 */
+	il = ic;
+      else
+	ih = ic;
+    }
+
+  ASSERT(il+1 < c->size);
+
+  return il;
+}
+
+
+/*
+//  Direct and inverse functions
+*/
+#define DEFINE_FUN(name,offset)			\
+double name(CosmologyParameters *c, double a) \
+{ \
+  cosmology_check_range(c,a); \
+  return cosmology_get_value_from_table(c,a,c->name) + offset; \
+} \
+double inv_##name(CosmologyParameters *c, double v) \
+{ \
+  int idx; \
+  double *table; \
+  if(c->size == 0) cosmology_init(c); \
+  v -= offset; \
+  table = c->name; \
+  idx = cosmology_find_index(c,v,table); \
+  while(idx < 0) \
+    { \
+      cosmology_check_range(c,0.5*c->aUni[0]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  while(idx > c->size) \
+    { \
+      cosmology_check_range(c,2.0*c->aUni[c->size-1]); \
+      table = c->name; \
+      idx = cosmology_find_index(c,v,table); \
+    } \
+  return c->aUni[idx] + (c->aUni[idx+1]-c->aUni[idx])/(table[idx+1]-table[idx])*(v-table[idx]); \
+}
+
+DEFINE_FUN(aBox,0.0);
+DEFINE_FUN(tCode,c->tCodeOffset);
+DEFINE_FUN(tPhys,0.0);
+DEFINE_FUN(dPlus,0.0);
+DEFINE_FUN(qPlus,0.0);
+
+#undef DEFINE_FUN

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/artio/artio_headers/cosmology.h
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/cosmology.h
@@ -0,0 +1,101 @@
+#ifndef __COSMOLOGY_H__
+#define __COSMOLOGY_H__
+
+typedef struct CosmologyParametersStruct CosmologyParameters;
+
+#define COSMOLOGY_DECLARE_PRIMARY_PARAMETER(name) \
+void cosmology_set_##name(CosmologyParameters *c, double value)
+
+#define cosmology_set(c,name,value)	\
+cosmology_set_##name(c,value)
+
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaM);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaB);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(OmegaL);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(h);
+COSMOLOGY_DECLARE_PRIMARY_PARAMETER(DeltaDC);
+
+#undef COSMOLOGY_DECLARE_PRIMARY_PARAMETER
+
+CosmologyParameters *cosmology_allocate();
+void cosmology_free(CosmologyParameters *c);
+
+/*
+//  Check that all required cosmological parameters have been set.
+//  The minimum set is OmegaM, OmegaB, and h. By default, zero OmegaL,
+//  OmegaK, and the DC mode are assumed.
+*/
+int cosmology_is_set(CosmologyParameters *c);
+
+
+/*
+//  Freeze the cosmology and forbid any further changes to it.
+//  In codes that include user-customizable segments (like plugins),
+//  this function van be used for insuring that a user does not
+//  change the cosmology in mid-run.
+*/
+void cosmology_set_fixed(CosmologyParameters *c);
+
+
+/*
+//  Manual initialization. This does not need to be called, 
+//  the initialization is done automatically on the first call
+//  to a relevant function.
+*/
+void cosmology_init(CosmologyParameters *c);
+
+
+/*
+//  Set the range of global scale factors for thread-safe
+//  calls to direct functions until the argument leaves the range.
+*/
+void cosmology_set_thread_safe_range(CosmologyParameters *c, double amin, double amax);
+
+/*
+//  Direct functions take the global cosmological scale factor as the argument.
+//  These functionsare are thread-safe if called with the argument in the
+//  range set by a prior call to cosmology_set_thread_safe_range(...).
+//  Calling them with the argument outside that range is ok, but breaks
+//  thread-safety assurance.
+*/
+
+#define DEFINE_FUN(name)         \
+double name(CosmologyParameters *c, double a); \
+double inv_##name(CosmologyParameters *c, double v);
+
+DEFINE_FUN(aBox);
+DEFINE_FUN(tCode);
+DEFINE_FUN(tPhys);
+DEFINE_FUN(dPlus);
+DEFINE_FUN(qPlus); /* Q+ = a^2 dD+/(H0 dt) */
+
+#undef DEFINE_FUN
+
+/*
+//  Conversion macros
+*/
+#define  abox_from_auni(c,a)   aBox(c,a)
+#define tcode_from_auni(c,a)  tCode(c,a)
+#define tphys_from_auni(c,a)  tPhys(c,a)
+#define dplus_from_auni(c,a)  dPlus(c,a)
+
+#define auni_from_abox(c,v)   inv_aBox(c,v)
+#define auni_from_tcode(c,v)  inv_tCode(c,v)
+#define auni_from_tphys(c,v)  inv_tPhys(c,v)
+#define auni_from_dplus(c,v)  inv_dPlus(c,v)
+
+#define abox_from_tcode(c,tcode)   aBox(c,inv_tCode(c,tcode))
+#define tcode_from_abox(c,abox)    tCode(c,inv_aBox(c,abox))
+
+#define tphys_from_abox(c,abox)    tPhys(c,inv_aBox(c,abox))
+#define tphys_from_tcode(c,tcode)  tPhys(c,inv_tCode(c,tcode))
+#define dplus_from_tcode(c,tcode)  dPlus(c,inv_tCode(c,tcode))
+
+/*
+//  Hubble parameter in km/s/Mpc; defined as macro so that it can be
+//  undefined if needed to avoid the name clash.
+*/
+double cosmology_mu(CosmologyParameters *c, double a);
+#define Hubble(c,a) (100*c->h*cosmology_mu(c,a)/(a*a))
+
+#endif /* __COSMOLOGY_H__ */

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -26,8 +26,7 @@
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
-    ARTIOFieldInfo, \
-    b2t
+    ARTIOFieldInfo
 from yt.fields.particle_fields import \
     standard_particle_fields
 
@@ -387,37 +386,40 @@
             self.particle_types = ()
         self.particle_types_raw = self.particle_types
 
-        self.current_time = b2t(self.artio_parameters["tl"][0])
+        self.current_time = self.quan(self._handle.tphys_from_tcode(self.artio_parameters["tl"][0]),"yr")
 
         # detect cosmology
         if "abox" in self.artio_parameters:
+            self.cosmological_simulation = True
+
             abox = self.artio_parameters["abox"][0]
-            self.cosmological_simulation = True
             self.omega_lambda = self.artio_parameters["OmegaL"][0]
             self.omega_matter = self.artio_parameters["OmegaM"][0]
             self.hubble_constant = self.artio_parameters["hubble"][0]
-            self.current_redshift = 1.0/self.artio_parameters["abox"][0] - 1.0
+            self.current_redshift = 1.0/self.artio_parameters["auni"][0] - 1.0
+            self.current_redshift_box = 1.0/abox - 1.0
 
             self.parameters["initial_redshift"] =\
                 1.0 / self.artio_parameters["auni_init"][0] - 1.0
             self.parameters["CosmologyInitialRedshift"] =\
                 self.parameters["initial_redshift"]
-        else:
-            self.cosmological_simulation = False
 
-        #units
-        if self.cosmological_simulation:
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
             self.parameters['unit_t'] =\
                 self.artio_parameters["time_unit"][0] * abox**2
             self.parameters['unit_l'] =\
                 self.artio_parameters["length_unit"][0] * abox
+
+            if self.artio_parameters["DeltaDC"][0] != 0:
+                mylog.warn("DeltaDC != 0, which implies auni != abox.  Be sure you understand which expansion parameter is appropriate for your use! (Gnedin, Kravtsov, & Rudd 2011)")
         else:
+            self.cosmological_simulation = False
+
             self.parameters['unit_l'] = self.artio_parameters["length_unit"][0]
             self.parameters['unit_t'] = self.artio_parameters["time_unit"][0]
             self.parameters['unit_m'] = self.artio_parameters["mass_unit"][0]
 
-        # hard coded assumption of 3D periodicity (add to dataset)
+        # hard coded assumption of 3D periodicity
         self.periodicity = (True, True, True)
 
     @classmethod

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -19,6 +19,8 @@
 from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
+from yt.fields.field_detector import \
+    FieldDetector
 from yt.units.yt_array import \
     YTArray
 
@@ -62,7 +64,8 @@
         ("MASS", ("code_mass", ["particle_mass"], None)),
         ("PID", ("", ["particle_index"], None)),
         ("SPECIES", ("", ["particle_type"], None)),
-        ("BIRTH_TIME", ("code_time", ["creation_time"], None)),
+        ("BIRTH_TIME", ("", [], None)),  # code-units defined as dimensionless to 
+                                         # avoid incorrect conversion 
         ("INITIAL_MASS", ("code_mass", ["initial_mass"], None)),
         ("METALLICITY_SNIa", ("", ["metallicity_snia"], None)),
         ("METALLICITY_SNII", ("", ["metallicity_snii"], None)),
@@ -111,123 +114,35 @@
                        take_log=True)
 
     def setup_particle_fields(self, ptype):
+        if ptype == "STAR":
+            def _creation_time(field,data):
+                # this test is necessary to avoid passing invalid tcode values 
+                # to the function tphys_from_tcode during field detection
+                # (1.0 is not a valid tcode value)
+                if isinstance(data, FieldDetector):
+                    return data["STAR","BIRTH_TIME"]
+                return YTArray(data.ds._handle.tphys_from_tcode_array(data["STAR","BIRTH_TIME"]),"yr")
 
-        def _particle_age(field, data):
-            return b2t(data[ptype,"creation_time"])
-        self.add_field((ptype, "particle_age"), function=_particle_age, units="s",
-                  particle_type=True)
+            def _age(field, data):
+                if isinstance(data, FieldDetector):
+                    return data["STAR","creation_time"]
+                return data.ds.current_time - data["STAR","creation_time"]
+
+            self.add_field((ptype, "creation_time"), function=_creation_time, units="yr",
+                        particle_type=True)
+            self.add_field((ptype, "age"), function=_age, units="yr",
+                        particle_type=True)
+
+            if self.ds.cosmological_simulation:
+                def _creation_redshift(field,data):
+                    # this test is necessary to avoid passing invalid tcode values 
+                    # to the function auni_from_tcode during field detection
+                    # (1.0 is not a valid tcode value)
+                    if isinstance(data, FieldDetector):
+                        return data["STAR","BIRTH_TIME"]
+                    return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0
+
+                self.add_field((ptype, "creation_redshift"), function=_creation_redshift,
+                        particle_type=True)
 
         super(ARTIOFieldInfo, self).setup_particle_fields(ptype)
-
-#stolen from frontends/art/
-#All of these functions are to convert from hydro time var to
-#proper time
-sqrt = np.sqrt
-sign = np.sign
-
-
-def find_root(f, a, b, tol=1e-6):
-    c = (a+b)/2.0
-    last = -np.inf
-    assert(sign(f(a)) != sign(f(b)))
-    while np.abs(f(c)-last) > tol:
-        last = f(c)
-        if sign(last) == sign(f(b)):
-            b = c
-        else:
-            a = c
-        c = (a+b)/2.0
-    return c
-
-
-def quad(fintegrand, xmin, xmax, n=1e4):
-    spacings = np.logspace(np.log10(xmin), np.log10(xmax), n)
-    integrand_arr = fintegrand(spacings)
-    val = np.trapz(integrand_arr, dx=np.diff(spacings))
-    return val
-
-
-def a2b(at, Om0=0.27, Oml0=0.73, h=0.700):
-    def f_a2b(x):
-        val = 0.5*sqrt(Om0) / x**3.0
-        val /= sqrt(Om0/x**3.0 + Oml0 + (1.0-Om0-Oml0)/x**2.0)
-        return val
-    #val, err = si.quad(f_a2b,1,at)
-    val = quad(f_a2b, 1, at)
-    return val
-
-
-def b2a(bt, **kwargs):
-    #converts code time into expansion factor
-    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
-    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
-    f_b2a = lambda at: a2b(at, **kwargs)-bt
-    return find_root(f_b2a, 1e-4, 1.1)
-    #return so.brenth(f_b2a,1e-4,1.1)
-    #return brent.brent(f_b2a)
-
-
-def a2t(at, Om0=0.27, Oml0=0.73, h=0.700):
-    integrand = lambda x: 1./(x*sqrt(Oml0+Om0*x**-3.0))
-    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
-    current_time = quad(integrand, 1e-4, at)
-    #spacings = np.logspace(-5,np.log10(at),1e5)
-    #integrand_arr = integrand(spacings)
-    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-    current_time *= 9.779/h
-    return current_time
-
-
-def b2t(tb, n=1e2, logger=None, **kwargs):
-    tb = np.array(tb)
-    if len(np.atleast_1d(tb)) == 1: 
-        return a2t(b2a(tb))
-    if tb.shape == ():
-        return None 
-    if len(tb) < n:
-        n = len(tb)
-    age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
-    age_max = a2t(b2a(tb.min(), **kwargs), **kwargs)
-    tbs = -1.*np.logspace(np.log10(-tb.min()),
-                          np.log10(-tb.max()), n)
-    ages = []
-    for i, tbi in enumerate(tbs):
-        ages += a2t(b2a(tbi)),
-        if logger:
-            logger(i)
-    ages = np.array(ages)
-    fb2t = np.interp(tb, tbs, ages)
-    #fb2t = interp1d(tbs,ages)
-    return fb2t*1e9*31556926
-
-
-def spread_ages(ages, logger=None, spread=.0e7*365*24*3600):
-    #stars are formed in lumps; spread out the ages linearly
-    da = np.diff(ages)
-    assert np.all(da <= 0)
-    #ages should always be decreasing, and ordered so
-    agesd = np.zeros(ages.shape)
-    idx, = np.where(da < 0)
-    idx += 1
-    #mark the right edges
-    #spread this age evenly out to the next age
-    lidx = 0
-    lage = 0
-    for i in idx:
-        n = i-lidx
-        #n stars affected
-        rage = ages[i]
-        lage = max(rage-spread, 0.0)
-        agesd[lidx:i] = np.linspace(lage, rage, n)
-        lidx = i
-        #lage=rage
-        if logger:
-            logger(i)
-    #we didn't get the last iter
-    i = ages.shape[0]-1
-    n = i-lidx
-    #n stars affected
-    rage = ages[i]
-    lage = max(rage-spread, 0.0)
-    agesd[lidx:i] = np.linspace(lage, rage, n)
-    return agesd

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/frontends/stream/tests/test_stream_amrgrids.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_amrgrids.py
@@ -0,0 +1,28 @@
+from yt.testing import *
+import numpy as np
+from yt.utilities.exceptions import YTIntDomainOverflow
+
+from yt import load_amr_grids, ProjectionPlot
+
+def test_qt_overflow():
+    grid_data = []
+
+    grid_dict = {}
+
+    grid_dict['left_edge'] = [-1.0, -1.0, -1.0]
+    grid_dict['right_edge'] = [1.0, 1.0, 1.0]
+    grid_dict['dimensions'] = [8, 8, 8]
+    grid_dict['level'] = 0
+
+    grid_dict['density'] = np.ones((8,8,8))
+
+    grid_data.append(grid_dict)
+
+    domain_dimensions = np.array([8, 8, 8])
+
+    spf = load_amr_grids(grid_data, domain_dimensions)
+
+    def make_proj():
+        p = ProjectionPlot(spf, 'x', ["density"], center='c', origin='native')
+        return p
+    yield assert_raises, YTIntDomainOverflow, make_proj

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,6 +36,10 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
+# use this as an epsilon test for grids aligned with selector
+# define here to avoid the gil later
+cdef np.float64_t grid_eps = np.finfo(np.float64).eps
+
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box
@@ -111,20 +115,19 @@
         self.min_level = getattr(dobj, "min_level", 0)
         self.max_level = getattr(dobj, "max_level", 99)
         self.overlap_cells = 0
-
-        for i in range(3) :
-            ds = getattr(dobj, 'ds', None)
-            if ds is None:
-                for i in range(3):
-                    # NOTE that this is not universal.
-                    self.domain_width[i] = 1.0
-                    self.periodicity[i] = False
-            else:
-                DLE = _ensure_code(ds.domain_left_edge)
-                DRE = _ensure_code(ds.domain_right_edge)
-                for i in range(3):
-                    self.domain_width[i] = DRE[i] - DLE[i]
-                    self.periodicity[i] = ds.periodicity[i]
+        
+        ds = getattr(dobj, 'ds', None)
+        if ds is None:
+            for i in range(3):
+                # NOTE that this is not universal.
+                self.domain_width[i] = 1.0
+                self.periodicity[i] = False
+        else:
+            DLE = _ensure_code(ds.domain_left_edge)
+            DRE = _ensure_code(ds.domain_right_edge)
+            for i in range(3):
+                self.domain_width[i] = DRE[i] - DLE[i]
+                self.periodicity[i] = ds.periodicity[i]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -802,14 +805,9 @@
         cdef np.float64_t h, d, r2, temp, spos
         cdef int i, j, k
         h = d = 0
-        for ax in range(3):
-            temp = 1e30
-            for i in range(3):
-                if self.periodicity[ax] == 0 and i != 1: continue
-                spos = pos[ax] + (i-1)*self.domain_width[ax]
-                if fabs(spos - self.center[ax]) < fabs(temp):
-                    temp = spos - self.center[ax]
-            h += temp * self.norm_vec[ax]
+        for i in range(3):
+            temp = self.difference(pos[i], self.center[i], i)
+            h += temp * self.norm_vec[i]
             d += temp*temp
         r2 = (d - h*h)
         if fabs(h) <= self.height and r2 <= self.radius2: return 1
@@ -1022,7 +1020,7 @@
     @cython.cdivision(True)
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
         if pos[self.axis] + 0.5*dds[self.axis] > self.coord \
-           and pos[self.axis] - 0.5*dds[self.axis] <= self.coord:
+           and pos[self.axis] - 0.5*dds[self.axis] - grid_eps <= self.coord:
             return 1
         return 0
 
@@ -1044,7 +1042,7 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
-        if left_edge[self.axis] <= self.coord < right_edge[self.axis]:
+        if left_edge[self.axis] - grid_eps <= self.coord < right_edge[self.axis]:
             return 1
         return 0
 

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -313,6 +313,15 @@
         return "Particle bounds %s and %s exceed domain bounds %s and %s" % (
             self.mi, self.ma, self.dle, self.dre)
 
+class YTIntDomainOverflow(YTException):
+    def __init__(self, dims, dd):
+        self.dims = dims
+        self.dd = dd
+
+    def __str__(self):
+        return "Integer domain overflow: %s in %s" % (
+            self.dims, self.dd)
+
 class YTIllDefinedFilter(YTException):
     def __init__(self, filter, s1, s2):
         self.filter = filter
@@ -415,4 +424,4 @@
         self.filename = filename
 
     def __str__(self):
-        return "A file already exists at %s and clobber=False." % self.filename
\ No newline at end of file
+        return "A file already exists at %s and clobber=False." % self.filename

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -25,6 +25,8 @@
 from cython.operator cimport dereference as deref, preincrement as inc
 from fp_utils cimport fmax
 
+from yt.utilities.exceptions import YTIntDomainOverflow
+
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -108,6 +110,7 @@
     cdef QTN_combine *combine
     cdef np.float64_t bounds[4]
     cdef np.float64_t dds[2]
+    cdef np.int64_t last_dims[2]
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
                   int nvals, bounds, style = "integrate"):
@@ -246,13 +249,15 @@
     def get_args(self):
         return (self.top_grid_dims[0], self.top_grid_dims[1], self.nvals)
 
-    cdef void add_to_position(self,
+    cdef int add_to_position(self,
                  int level, np.int64_t pos[2],
                  np.float64_t *val,
                  np.float64_t weight_val, skip = 0):
         cdef int i, j, L
         cdef QuadTreeNode *node
         node = self.find_on_root_level(pos, level)
+        if node == NULL:
+            return -1
         cdef np.int64_t fac
         for L in range(level):
             if node.children[0][0] == NULL:
@@ -263,8 +268,9 @@
             i = (pos[0] >= fac*(2*node.pos[0]+1))
             j = (pos[1] >= fac*(2*node.pos[1]+1))
             node = node.children[i][j]
-        if skip == 1: return
+        if skip == 1: return 0
         self.combine(node, val, weight_val, self.nvals)
+        return 0
             
     @cython.cdivision(True)
     cdef QuadTreeNode *find_on_root_level(self, np.int64_t pos[2], int level):
@@ -273,8 +279,12 @@
         cdef np.int64_t i, j
         i = <np.int64_t> (pos[0] / self.po2[level])
         j = <np.int64_t> (pos[1] / self.po2[level])
+        if i > self.top_grid_dims[0] or i < 0 or \
+           j > self.top_grid_dims[1] or j < 0:
+            self.last_dims[0] = i
+            self.last_dims[1] = j
+            return NULL
         return self.root_nodes[i][j]
-        
     
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -322,13 +332,17 @@
             np.ndarray[np.int64_t, ndim=1] pys,
             np.ndarray[np.int64_t, ndim=1] level):
         cdef int num = pxs.shape[0]
-        cdef int p
+        cdef int p, rv
         cdef cnp.float64_t *vals
         cdef cnp.int64_t pos[2]
         for p in range(num):
             pos[0] = pxs[p]
             pos[1] = pys[p]
-            self.add_to_position(level[p], pos, NULL, 0.0, 1)
+            rv = self.add_to_position(level[p], pos, NULL, 0.0, 1)
+            if rv == -1:
+                raise YTIntDomainOverflow(
+                    (self.last_dims[0], self.last_dims[1]),
+                    (self.top_grid_dims[0], self.top_grid_dims[1]))
         return
 
     @cython.boundscheck(False)

diff -r 5479e1f7d8294c15e72f3a7e1e5db8500256e9fe -r 0e8cce58116d6de321400d39ed6319396b95254b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -649,7 +649,7 @@
             del nz
         else:
             nim = im
-        ax = self._pylab.imshow(nim[:,:,:3]/nim[:,:,:3].max(), origin='lower')
+        ax = self._pylab.imshow(nim[:,:,:3]/nim[:,:,:3].max(), origin='upper')
         return ax
 
     def draw(self):
@@ -747,6 +747,9 @@
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
                            info=self.get_information())
+
+        # flip it up/down to handle how the png orientation is donetest.png
+        image = image[:,::-1,:]
         self.save_image(image, fn=fn, clip_ratio=clip_ratio, 
                        transparent=transparent)
         return image

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list