[yt-svn] commit/yt: 20 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 7 11:17:30 PDT 2016


20 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2dbe7ded95f2/
Changeset:   2dbe7ded95f2
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 10:20:54+00:00
Summary:     Fixed Ramses particle ages and added deposit methods
Affected #:  3 files

diff -r 0a704f6bd0096052e97d0c338266b7c4d2ea2379 -r 2dbe7ded95f27f9e3677b9ca0fdd9e42db320c32 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -175,6 +175,17 @@
                 function=function, units=unit_system["velocity"], take_log=False,
                 validators=[ValidateSpatial(0)])
 
+    try : 
+        for method, name in zip(("cic", "sum"), ("cic", "nn")):
+            function = _get_density_weighted_deposit_field(
+                "particle_age", "s", method)
+            registry.add_field(
+                ("deposit", ("%s_"+name+"_age") % (ptype)),
+                function=function, units=unit_system["time"], take_log=False,
+                  validators=[ValidateSpatial(0)])
+    except :
+        print " No particle age available. "
+
     # Now some translation functions.
 
     def particle_ones(field, data):

diff -r 0a704f6bd0096052e97d0c338266b7c4d2ea2379 -r 2dbe7ded95f27f9e3677b9ca0fdd9e42db320c32 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -615,7 +615,6 @@
                 dom, mi, ma = f.readline().split()
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
         self.parameters.update(rheader)
-        self.current_time = self.parameters['time'] * self.parameters['unit_t']
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \
                         2**(self.min_level+1)
@@ -638,6 +637,93 @@
         self.max_level = rheader['levelmax'] - self.min_level - 1
         f.close()
 
+        def dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0):
+           return np.sqrt( aexp_tau**3 * (O_mat_0 + O_vac_0*aexp_tau**3 + O_k_0*aexp_tau) )
+
+        def dadt(aexp_t,O_mat_0,O_vac_0,O_k_0):
+           return np.sqrt( (1./aexp_t)*(O_mat_0 + O_vac_0*aexp_t**3 + O_k_0*aexp_t) )
+
+        def friedman(O_mat_0, O_vac_0, O_k_0):
+            alpha = 1.e-5
+            aexp_min = 1.e-3
+            aexp_tau = 1.
+            aexp_t = 1.
+            tau = 0.
+            t = 0.
+            nstep = 0
+            ntable = 100
+            aexp_out = np.zeros([ntable+1])
+            hexp_out = np.zeros([ntable+1])
+            tau_out = np.zeros([ntable+1])
+            t_out = np.zeros([ntable+1])
+
+            while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+              nstep = nstep + 1
+              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)   
+              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+              tau = tau - dtau
+
+              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+              t = t - dt
+
+            age_tot=-t
+            nskip=nstep/ntable
+
+            aexp_tau = 1.
+            aexp_t = 1.
+            tau = 0.
+            t = 0.
+            nstep = 0
+
+            n_out = 0
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            aexp_out[n_out] = aexp_tau
+            hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
+ 
+            while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+              nstep = nstep + 1
+              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+              tau = tau - dtau
+
+              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+              t = t - dt
+
+              if nstep % nskip == 0:
+                n_out = n_out + 1
+                t_out[n_out] = t
+                tau_out[n_out] = tau
+                aexp_out[n_out] = aexp_tau
+                hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
+           
+            n_out = ntable
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            aexp_out[n_out] = aexp_tau
+            hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
+
+            return aexp_out, hexp_out, tau_out, t_out, ntable, age_tot
+
+        self.aexp_frw, self.hexp_frw, self.tau_frw, self.t_frw, self.n_frw, self.time_tot = \
+          friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
+        i = 1
+        self.aexp = 1./(1. + self.current_redshift)
+        while self.aexp_frw[i] > self.aexp and i < self.n_frw:
+          i = i + 1
+
+        self.time_simu = self.t_frw[i  ]*(self.aexp-self.aexp_frw[i-1])/(self.aexp_frw[i]-self.aexp_frw[i-1])+ \
+                         self.t_frw[i-1]*(self.aexp-self.aexp_frw[i  ])/(self.aexp_frw[i-1]-self.aexp_frw[i])
+ 
+        self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
+
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not os.path.basename(args[0]).startswith("info_"): return False

diff -r 0a704f6bd0096052e97d0c338266b7c4d2ea2379 -r 2dbe7ded95f27f9e3677b9ca0fdd9e42db320c32 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -101,4 +101,19 @@
             tr[field] = fpu.read_vector(f, dt)
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
+            if field[1] == "particle_age":
+              t_frw = subset.domain.ds.t_frw
+              tau_frw = subset.domain.ds.tau_frw
+              tsim = subset.domain.ds.time_simu
+              h100 = subset.domain.ds.hubble_constant
+              for ipart, age in enumerate(tr[field]):
+                 if age < 0.:
+                   i = 1
+                   while tau_frw[i] > age and i < subset.domain.ds.n_frw:
+                     i = i + 1 
+
+                   t = t_frw[i  ]*(age-tau_frw[i-1])/(tau_frw[i]-tau_frw[i-1])+ \
+                       t_frw[i-1]*(age-tau_frw[i  ])/(tau_frw[i-1]-tau_frw[i])
+                   newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']
+                   tr[field][ipart] = newage
         return tr


https://bitbucket.org/yt_analysis/yt/commits/f55344206d51/
Changeset:   f55344206d51
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 13:20:29+00:00
Summary:     Optimized the age method
Affected #:  2 files

diff -r 2dbe7ded95f27f9e3677b9ca0fdd9e42db320c32 -r f55344206d51ec701f6192a21fce20ca104772ed yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -651,11 +651,6 @@
             tau = 0.
             t = 0.
             nstep = 0
-            ntable = 100
-            aexp_out = np.zeros([ntable+1])
-            hexp_out = np.zeros([ntable+1])
-            tau_out = np.zeros([ntable+1])
-            t_out = np.zeros([ntable+1])
 
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
               nstep = nstep + 1
@@ -670,7 +665,15 @@
               t = t - dt
 
             age_tot=-t
-            nskip=nstep/ntable
+            ntable = 1000
+            if nstep < ntable :
+              ntable = nstep
+              alpha = alpha / 2.
+
+            tau_out = np.zeros([ntable+1])
+            t_out = np.zeros([ntable+1])
+            # (sampling the first half of the table more finely than second half):
+            delta_tau = 20.*tau/ntable/11.
 
             aexp_tau = 1.
             aexp_t = 1.
@@ -681,9 +684,27 @@
             n_out = 0
             t_out[n_out] = t
             tau_out[n_out] = tau
-            aexp_out[n_out] = aexp_tau
-            hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
- 
+
+            next_tau = tau + delta_tau/10.
+
+            while n_out < ntable/2 : 
+              nstep = nstep + 1
+              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+              tau = tau - dtau
+
+              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+              t = t - dt
+
+              if tau < next_tau:
+                n_out = n_out + 1
+                t_out[n_out] = t
+                tau_out[n_out] = tau
+                next_tau = next_tau + delta_tau/10.
+
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
               nstep = nstep + 1
               dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
@@ -696,30 +717,28 @@
               aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
               t = t - dt
 
-              if nstep % nskip == 0:
+              if tau < next_tau:
                 n_out = n_out + 1
                 t_out[n_out] = t
                 tau_out[n_out] = tau
-                aexp_out[n_out] = aexp_tau
-                hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
-           
+                next_tau = next_tau + delta_tau
+
             n_out = ntable
             t_out[n_out] = t
             tau_out[n_out] = tau
-            aexp_out[n_out] = aexp_tau
-            hexp_out[n_out] = dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)/aexp_tau
 
-            return aexp_out, hexp_out, tau_out, t_out, ntable, age_tot
+            return tau_out, t_out, delta_tau, ntable, age_tot
 
-        self.aexp_frw, self.hexp_frw, self.tau_frw, self.t_frw, self.n_frw, self.time_tot = \
+        self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
           friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
-        i = 1
-        self.aexp = 1./(1. + self.current_redshift)
-        while self.aexp_frw[i] > self.aexp and i < self.n_frw:
-          i = i + 1
 
-        self.time_simu = self.t_frw[i  ]*(self.aexp-self.aexp_frw[i-1])/(self.aexp_frw[i]-self.aexp_frw[i-1])+ \
-                         self.t_frw[i-1]*(self.aexp-self.aexp_frw[i  ])/(self.aexp_frw[i-1]-self.aexp_frw[i])
+        age = self.parameters['time']
+        iage = 1 + int(10.*age/self.dtau)
+        if iage > self.n_frw/2:
+          iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
+
+        self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
+                         self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
  
         self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 

diff -r 2dbe7ded95f27f9e3677b9ca0fdd9e42db320c32 -r f55344206d51ec701f6192a21fce20ca104772ed yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -106,14 +106,14 @@
               tau_frw = subset.domain.ds.tau_frw
               tsim = subset.domain.ds.time_simu
               h100 = subset.domain.ds.hubble_constant
+              nOver2 = subset.domain.ds.n_frw/2
               for ipart, age in enumerate(tr[field]):
                  if age < 0.:
-                   i = 1
-                   while tau_frw[i] > age and i < subset.domain.ds.n_frw:
-                     i = i + 1 
-
-                   t = t_frw[i  ]*(age-tau_frw[i-1])/(tau_frw[i]-tau_frw[i-1])+ \
-                       t_frw[i-1]*(age-tau_frw[i  ])/(tau_frw[i-1]-tau_frw[i])
+                   iage = 1 + int(10.*age/subset.domain.ds.dtau)
+                   if iage > nOver2:
+                     iage = nOver2 + (iage - nOver2)/10
+                   t = t_frw[iage  ]*(age-tau_frw[iage-1])/(tau_frw[iage]-tau_frw[iage-1])+ \
+                       t_frw[iage-1]*(age-tau_frw[iage  ])/(tau_frw[iage-1]-tau_frw[iage])
                    newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']
-                   tr[field][ipart] = newage
+                   tr[field][ipart] = np.max([0.,newage])
         return tr


https://bitbucket.org/yt_analysis/yt/commits/02281d943344/
Changeset:   02281d943344
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 14:02:51+00:00
Summary:     Merged with main
Affected #:  170 files

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -115,7 +116,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
@@ -484,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -510,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -528,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -557,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -1411,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1425,14 +1438,21 @@
         log_cmd conda install --yes ${YT_DEP}
     done
 
+    if [ $INST_PY3 -eq 1 ]
+    then
+        echo "Installing mercurial"
+        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
+    fi
+
     log_cmd pip install python-hglib
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1479,22 +1499,15 @@
         fi
     fi
 
-    if [ $INST_PY3 -eq 1 ]
-    then
-        echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
-        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
-    fi
-
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -121,8 +121,8 @@
 allows for robust (grid-independent, shape-independent, and noise-
 resilient) tracking of substructure. The code is prepackaged with yt,
 but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
 In order to run the Rockstar halo finder in yt, make sure you've
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
@@ -192,6 +192,8 @@
 Inside the ``outbase`` directory there is a text file named ``datasets.txt``
 that records the connection between ds names and the Rockstar file names.
 
+.. _rockstar-installation:
+
 Installing Rockstar
 """""""""""""""""""
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')
@@ -67,9 +68,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/developing/extensions.rst
--- a/doc/source/developing/extensions.rst
+++ b/doc/source/developing/extensions.rst
@@ -3,7 +3,7 @@
 Extension Packages
 ==================
 
-.. note:: For some additional discussion, see :ref:`YTEP-0029
+.. note:: For some additional discussion, see `YTEP-0029
           <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
           this plan was designed.
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:
@@ -487,7 +484,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,7 +41,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import yt\n",
@@ -58,7 +60,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -74,7 +78,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -118,7 +124,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -140,7 +148,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -167,7 +177,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -193,7 +205,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -213,7 +227,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -229,7 +245,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -246,7 +264,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -256,7 +276,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -273,7 +295,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -299,7 +323,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -313,29 +339,15 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Create a Transfer Function that goes from the minimum to the maximum of the data:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
     "Define the properties and size of the `camera` viewport:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -358,24 +370,41 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
-    "                north_vector=[0,0,1], steady_north=True, \n",
-    "                sub_samples=5, log_fields=[False])\n",
+    "sc = yt.create_scene(ds, 'Temperature')\n",
+    "dd = ds.all_data()\n",
     "\n",
-    "cam.transfer_function.map_to_colormap(mi,ma, \n",
-    "                                      scale=15.0, colormap='algae')"
+    "source = sc[0]\n",
+    "\n",
+    "source.log_field = False\n",
+    "\n",
+    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)\n",
+    "tf.map_to_colormap(mi, ma, scale=15.0, colormap='algae')\n",
+    "\n",
+    "source.set_transfer_function(tf)\n",
+    "\n",
+    "sc.add_source(source)\n",
+    "\n",
+    "cam = sc.add_camera()\n",
+    "cam.width = W\n",
+    "cam.center = c\n",
+    "cam.normal_vector = L\n",
+    "cam.north_vector = [0, 0, 1]"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam.show()"
+    "sc.show(sigma_clip=4)"
    ]
   },
   {
@@ -395,7 +424,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -412,7 +443,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -429,7 +462,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -449,7 +484,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -467,7 +504,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -495,7 +534,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -520,7 +561,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -538,7 +581,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -561,7 +606,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -577,7 +624,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -613,7 +662,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3.0
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -625,4 +674,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
\ No newline at end of file
+}

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -325,7 +325,7 @@
   of length 1.0 in "code length" which may produce strange results for volume
   quantities.
 
-.. _loading-fits-data:
+.. _loading-exodusii-data:
 
 Exodus II Data
 --------------
@@ -481,6 +481,7 @@
     ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
                   displacements={'connect2': (5.0, [0.0, 0.0, 1.0])})
 
+.. _loading-fits-data:
 
 FITS Data
 ---------
@@ -1042,6 +1043,8 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+.. _loading-gamer-data:
+
 GAMER Data
 ----------
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -388,10 +388,10 @@
 To make things easier to load these sample datasets, you can add the parent
 directory to your downloaded sample data to your *yt path*.
 If you set the option ``test_data_dir``, in the section ``[yt]``,
-in ``~/.yt/config``, yt will search this path for them.
+in ``~/.config/yt/ytrc``, yt will search this path for them.
 
 This means you can download these datasets to ``/big_drive/data_for_yt`` , add
-the appropriate item to ``~/.yt/config``, and no matter which directory you are
+the appropriate item to ``~/.config/yt/ytrc``, and no matter which directory you are
 in when running yt, it will also check in *that* directory.
 
 
@@ -437,12 +437,11 @@
 hand, you may want it to output a lot more, since you can't figure out exactly what's going
 wrong, and you want to output some debugging information. The yt log level can be
 changed using the :ref:`configuration-file`, either by setting it in the
-``$HOME/.yt/config`` file:
+``$HOME/.config/yt/ytrc`` file:
 
 .. code-block:: bash
 
-   [yt]
-   loglevel = 10 # This sets the log level to "DEBUG"
+   $ yt config set yt loglevel 10  # This sets the log level to "DEBUG"
 
 which would produce debug (as well as info, warning, and error) messages, or at runtime:
 

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -133,6 +133,16 @@
      <tr valign="top"><td width="25%"><p>
+           <a href="sharing_data.html">Sharing Data</a>
+         </p>
+       </td>
+       <td width="75%">
+         <p class="linkdescr">The yt Hub</p>
+       </td>
+     </tr>
+     <tr valign="top">
+       <td width="25%">
+         <p><a href="reference/index.html">Reference Materials</a></p></td>
@@ -185,6 +195,7 @@
    analyzing/analysis_modules/index
    examining/index
    developing/index
+   sharing_data
    reference/index
    faq/index
    Getting Help <help/index>

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -105,7 +105,7 @@
 use external libraries or codes.  While they are installed with yt, they are
 not loaded by default in every session so you have to call them specifically.
 Examples include :ref:`halo analysis <halo-analysis>` (including
-:ref:`halo finding <halo_finding>`, :ref:`merger trees <merger_tree>`,
+:ref:`halo finding <halo-analysis>`, :ref:`merger trees <merger_tree>`,
 :ref:`halo mass functions <halo_mass_function>`), :ref:`synthetic observations
 <synthetic-observations>` (including :ref:`cosmological light cones
 <light-cone-generator>`, :ref:`cosmological light rays <light-ray-generator>`,

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/quickstart/6)_Volume_Rendering.ipynb
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -106,21 +106,21 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 2",
+   "display_name": "Python 3",
    "language": "python",
-   "name": "python2"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "name": "python",
    "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
   }
  },
  "nbformat": 4,

diff -r f55344206d51ec701f6192a21fce20ca104772ed -r 02281d9433440bd3f437ae948190829d35108f33 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -11,6 +11,316 @@
 The `CREDITS file <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ contains the
 most up-to-date list of everyone who has contributed to the yt source code.
 
+Version 3.3
+-----------
+
+Version 3.3 is the first major release of yt since July 2015. It includes more
+than 3000 commits from 41 contributors, including 12 new contributors.
+
+Major enhancements
+^^^^^^^^^^^^^^^^^^
+
+* Raw and processed data from selections, projections, profiles and so forth can
+  now be saved in a ytdata format and loaded back in by yt. See 
+  :ref:`saving_data`.
+* Totally re-worked volume rendering API. The old API is still available for users
+  who prefer it, however. See :ref:`volume_rendering`.
+* Support for unstructured mesh visualization. See 
+  :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`.
+* Interactive Data Visualization for AMR and unstructured mesh datasets. See
+  :ref:`interactive_data_visualization`.
+* Several new colormaps, including a new default, 'arbre'. The other new
+  colormaps are named 'octarine', 'kelp', and 'dusk'. All these new colormaps
+  were generated using the `viscm package
+  <https://github.com/matplotlib/viscm>`_ and should do a better job of
+  representing the data for colorblind viewers and when printed out in
+  grayscale. See :ref:`colormaps` for more detail.
+* New frontends for the :ref:`ExodusII <loading-exodusii-data>`, 
+  :ref:`GAMER <loading-gamer-data>`, and :ref:`Gizmo <loading-gizmo-data>` data 
+  formats.
+* The unit system associated with a dataset is now customizable, defaulting to
+  CGS. See :ref:`unit_systems`.
+* Enhancements and usability improvements for analysis modules, especially the
+  ``absorption_spectrum``, ``photon_simulator``, and ``light_ray`` modules. See
+  :ref:`synthetic-observations`.
+* Data objects can now be created via an alternative Numpy-like API. See
+  :ref:`quickly-selecting-data`.
+* A line integral convolution plot modification. See
+  :ref:`annotate-line-integral-convolution`.
+* Many speed optimizations, including to the volume rendering, units, tests,
+  covering grids, the absorption spectrum and photon simulator analysis modules,
+  and ghost zone generation.
+* Packaging and release-related improvements: better install and setup scripts,
+  automated PR backporting.
+* Readability improvements to the codebase, including linting, removing dead
+  code, and refactoring much of the Cython.
+* Improvements to the CI infrastructure, including more extensible answer tests
+  and automated testing for Python 3 and Windows.
+* Numerous documentation improvements, including formatting tweaks, bugfixes,
+  and many new cookbook recipes.
+* Support for geographic (lat/lon) coordinates.
+* Several improvements for SPH codes, including alternative smoothing kernels,
+  an ``add_smoothed_particle_field`` function, and particle type-aware octree
+  construction for Gadget data.
+* Roundtrip conversions between Pint and yt units.
+* Added halo data containers for gadget_fof frontend.
+* Enabled support for spherical datasets in the BoxLib frontend.
+* Many new tests have been added.
+* Better hashing for Selector objects.
+
+Minor enhancements and bugfixes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed many bugs related to Python 3 compatibility
+* Fixed bugs related to compatibility issues with newer versions of numpy
+* Added the ability to export data objects to a Pandas dataframe
+* Added support for the fabs ufunc to YTArray
+* Fixed two licensings issues
+* Fixed a number of bugs related to Windows compatability.
+* We now avoid hard-to-decipher tracebacks when loading empty files or
+  directories
+* Fixed a bug related to ART star particle creation time field
+* Fixed a bug caused by using the wrong int type for indexing in particle deposit
+* Fixed a NameError bug in comparing temperature units with offsets
+* Fixed an API bug in YTArray casting during coercion from YTQuantity
+* Added loadtxt and savetxt convenience functions for ``YTArray``
+* Fixed an issue caused by not sort species names with Enzo
+* Fixed a units bug for RAMSES when ``boxlen > 1``.
+* Fixed ``process_chunk`` function for non-cartesian geometry.
+* Added ``scale_factor`` attribute to cosmological simulation datasets
+* Fixed a bug where "center" vectors are used instead of "normal" vectors in
+  get_sph_phi(), etc.
+* Fixed issues involving invalid FRBs when uses called _setup_plots in their
+  scripts
+* Added a ``text_args`` keyword to ``annotate_scale()`` callback
+* Added a print_stats function for RAMSES
+* Fixed a number of bugs in the Photon Simulator
+* Added support for particle fields to the [Min,Max]Location derived quantities
+* Fixed some units bugs for Gadget cosmology simulations
+* Fixed a bug with Gadget/GIZMO StarFormationRate units
+* Fixed an issue in TimeSeriesData where all the filenames were getting passed
+  to ``load`` on each processor.
+* Fixed a units bug in the Tipsy frontend
+* Ensured that ARTIOIndex.get_smallest_dx() returns a quantity with units
+* Ensured that plots are valid after invalidating the figure
+* Fixed a bug regarding code unit labels
+* Fixed a bug with reading Tipsy Aux files
+* Added an effective redshift field to the Light Ray analysis module for use in
+  AbsorptionSpectrum
+* Fixed a bug with the redshift calculation in LightRay analysis module
+* Fixed a bug in the Orion frontend when you had more than 10 on-disk particle
+  fields in the file
+* Detect more types of ART files
+* Update derived_field_list in add_volume_weighted_smoothed_field
+* Fixed casting issues for 1D and 2D Enzo simulations
+* Avoid type indirection when setting up data object entry points
+* Fixed issues with SIMPUT files
+* Fixed loading athena data in python3 with provided parameters
+* Tipsy cosmology unit fixes
+* Fixed bad unit labels for compound units
+* Making the xlim and ylim of the PhasePlot plot axes controllable
+* Adding grid_arrays to grid_container
+* An Athena and a GDF bugfix
+* A small bugfix and some small enhancements for sunyaev_zeldovich
+* Defer to coordinate handlers for width
+* Make array_like_field return same units as get_data
+* Fixing bug in ray "dts" and "t" fields
+* Check against string_types not str
+* Closed a loophole that allowed improper LightRay use
+* Enabling AbsorptionSpectrum to deposit unresolved spectral lines
+* Fixed an ART byte/string/array issue
+* Changing AbsorptionSpectrum attribute lambda_bins to be lambda_field for
+  consistency
+* No longer require user to save to disk when generating an AbsorptionSpectrum
+* ParticlePlot FRBs can now use save_as_dataset and save attributes properly
+* Added checks to assure ARTIO creates a metal_density field from existing metal
+  fields.
+* Added mask to LightRay to assure output elements have non-zero density (a
+  problem in some SPH datasets)
+* Added a "fields" attribute to datasets
+* Updated the TransferFunctionHelper to work with new profiles
+* Fixed a bug where the field_units kwarg to load_amr_grids didn't do anything
+* Changed photon_simulator's output file structure
+* Fixed a bug related to setting output_units.
+* Implemented ptp operation.
+* Added effects of transverse doppler redshift to LightRay
+* Fixed a casting error for float and int64 multiplication in sdf class
+* Added ability to read and write YTArrays to and from groups within HDF5 files
+* Made ftype of "on-disk" stream fields "stream"
+* Fixed a strings decoding issue in the photon simulator
+* Fixed an incorrect docstring in load_uniform_grid
+* Made PlotWindow show/hide helpers for axes and colorbar return self
+* Made Profile objects store field metadata.
+* Ensured GDF unit names are strings
+* Tought off_axis_projection about its resolution keyword.
+* Reintroduced sanitize_width for polar/cyl coordinates.
+* We now fail early when load_uniform_grid is passed data with an incorrect shape
+* Replaced progress bar with tqdm
+* Fixed redshift scaling of "Overdensity" field in yt-2.x
+* Fixed several bugs in the eps_writer
+* Fixed bug affecting 2D BoxLib simulations.
+* Implemented to_json and from_json for the UnitRegistry object
+* Fixed a number of issues with ds.find_field_values_at_point[s]
+* Fixed a bug where sunrise_exporter was using wrong imports
+* Import HUGE from utilities.physical_ratios
+* Fixed bug in ARTIO table look ups
+* Adding support for longitude and latitude
+* Adding halo data containers for gadget_fof frontend.
+* Can now compare YTArrays without copying them
+* Fixed several bugs related to active particle datasets
+* Angular_momentum_vector now only includes space for particle fields if they
+  exist.
+* Image comparison tests now print a meaningful error message if they fail.
+* Fixed numpy 1.11 compatibility issues.
+* Changed _skip_cache to be True by default.
+* Enable support for spherical datasets in the BoxLib frontend.
+* Fixed a bug in add_deposited_particle_field.
+* Fixed issues with input sanitization in the point data object.
+* Fixed a copy/paste error introduced by refactoring WeightedMenParticleField
+* Fixed many formatting issues in the docs build
+* Now avoid creating particle unions for particle types that have no common
+  fields
+* Patched ParticlePlot to work with filtered particle fields.
+* Fixed a couple corner cases in gadget_fof frontend
+* We now properly normalise all normal vectors in functions that take a normal
+  vector (for e.g get_sph_theta)
+* Fixed a bug where the transfer function features were not always getting
+  cleared properly.
+* Made the Chombo frontend is_valid method smarter.
+* Added a get_hash() function to yt/funcs.py which returns a hash for a file
+* Added Sievert to the default unit symbol table
+* Corrected an issue with periodic "wiggle" in AbsorptionSpectrum instances
+* Made ``ds.field_list`` sorted by default
+* Bug fixes for the Nyx frontend
+* Fixed a bug where the index needed to be created before calling derived
+  quantities
+* Made latex_repr a property, computed on-demand
+* Fixed a bug in off-axis slice deposition
+* Fixed a bug with some types of octree block traversal
+* Ensured that mpi operations retain ImageArray type instead of downgrading to
+  YTArray parent class
+* Added a call to _setup_plots in the custom colorbar tickmark example
+* Fixed two minor bugs in save_annocated
+* Added ability to specify that DatasetSeries is not a mixed data type
+* Fixed a memory leak in ARTIO
+* Fixed copy/paste error in to_frb method.
+* Ensured that particle dataset max_level is consistent with the index max_level
+* Fixed an issue where fields were getting added multiple times to
+  field_info.field_list
+* Enhanced annotate_ray and annotate_arrow callbacks
+* Added GDF answer tests
+* Made the YTFieldTypeNotFound exception more informative
+* Added a new function, fake_vr_orientation_test_ds(), for use in testing
+* Ensured that instances of subclasses of YTArray have the correct type
+* Re-enabled max_level for projections, ProjectionPlot, and OffAxisProjectionPlot
+* Fixed a bug in the Orion 2 field definitions
+* Fixed a bug caused by matplotlib not being added to install_requires
+* Edited PhasePlot class to have an annotate_title method
+* Implemented annotate_cell_edges
+* Handled KeyboardInterrupt in volume rendering Cython loop
+* Made old halo finders now accept ptype
+* Updated the latex commands in yt cheatsheet
+* Fixed a circular dependency loop bug in abar field definition for FLASH
+  datasets
+* Added neutral species aliases as described in YTEP 0003
+* Fixed a logging issue: don't create a StreamHandler unless we will use it
+* Correcting how theta and phi are calculated in
+  ``_particle_velocity_spherical_radius``,
+  ``_particle_velocity_spherical_theta``,
+  ``_particle_velocity_cylindrical_radius``, and
+  ``_particle_velocity_cylindrical_theta``
+* Fixed a bug related to the field dictionary in ``load_particles``
+* Allowed for the special case of supplying width as a tuple of tuples
+* Made yt compile with MSVC on Windows
+* Fixed a bug involving mask for dt in octree
+* Merged the get_yt.sh and install_script.sh into one
+* Added tests for the install script
+* Allowed use axis names instead of dimensions for spherical pixelization
+* Fixed a bug where close() wasn't being called in HDF5FileHandler
+* Enhanced commandline image upload/delete
+* Added get_brewer_cmap to get brewer colormaps without importing palettable at
+  the top level
+* Fixed a bug where a parallel_root_only function was getting called inside
+  another parallel_root_only function
+* Exit the install script early if python can't import '_ssl' module
+* Make PlotWindow's annotate_clear method invalidate the plot
+* Adding int wrapper to avoid deprecation warning from numpy
+* Automatically create vector fields for magnetic_field
+* Allow users to completely specify the filename of a 1D profile
+* Force nose to produce meaningful traceback for cookbook recipes' tests
+* Fixed x-ray display_name and documentation
+* Try to guess and load particle file for FLASH dataset
+* Sped up top-level yt import
+* Set the field type correctly for fields added as particle fields
+* Added a position location method for octrees
+* Fixed a copy/paste error in uhstack function
+* Made trig functions give correct results when supplied data with dimensions of
+  angle but units that aren't radian
+* Print out some useful diagnostic information if check_for_openmp() fails
+* Give user-added derived fields a default field type
+* Added support for periodicity in annotate_particles.
+* Added a check for whether returned field has units in volume-weighted smoothed
+  fields
+* Casting array indices as ints in colormaps infrastructure
+* Fixed a bug where the standard particle fields weren't getting set up
+  correctly for the Orion frontends
+* Enabled LightRay to accept loaded datasets instead of just filenames
+* Allowed for adding or subtracting arrays filled with zeros without checking
+  units.
+* Fixed a bug in selection for semistructured meshes.
+* Removed 'io' from enzo particle types for active particle datasets
+* Added support for FLASH particle datasets.
+* Silenced a deprecation warning from IPython
+* Eliminated segfaults in KDTree construction
+* Fixed add_field handling when passed a tuple
+* Ensure field parameters are correct for fields that need ghost zones
+* Made it possible to use DerivedField instances to access data
+* Added ds.particle_type_counts
+* Bug fix and improvement for generating Google Cardboard VR in
+  StereoSphericalLens
+* Made DarkMatterARTDataset more robust in its _is_valid
+* Added Earth radius to units
+* Deposit hydrogen fields to grid in gizmo frontend
+* Switch to index values being int64
+* ValidateParameter ensures parameter values are used during field detection
+* Switched to using cythonize to manage dependencies in the setup script
+* ProfilePlot style changes and refactoring
+* Cancel terms with identical LaTeX representations in a LaTeX representation of
+  a unit
+* Only return early from comparison validation if base values are equal
+* Enabled particle fields for clump objects
+* Added validation checks for data types in callbacks
+* Enabled modification of image axis names in coordinate handlers
+* Only add OWLS/EAGLE ion fields if they are present
+* Ensured that PlotWindow plots continue to look the same under matplotlib 2.0
+* Fixed bug in quiver callbacks for off-axis slice plots
+* Only visit octree children if going to next level
+* Check that CIC always gets at least two cells
+* Fixed compatibility with matplotlib 1.4.3 and earlier
+* Fixed two EnzoSimulation bugs
+* Moved extraction code from YTSearchCmd to its own utility module
+* Changed amr_kdtree functions to be Node class methods
+* Sort block indices in order of ascending levels to match order of grid patches
+* MKS code unit system fixes
+* Disabled bounds checking on pixelize_element_mesh
+* Updated light_ray.py for domain width != 1
+* Implemented a DOAP file generator
+* Fixed bugs for 2D and 1D enzo IO
+* Converted mutable Dataset attributes to be properties that return copies
+* Allowing LightRay segments to extend further than one box length
+* Fixed a divide-by-zero error that occasionally happens in
+  triangle_plane_intersect
+* Make sure we have an index in subclassed derived quantities
+* Added an initial draft of an extensions document
+* Made it possible to pass field tuples to command-line plotting
+* Ensured the positions of coordinate vector lines are in code units
+* Added a minus sign to definition of sz_kinetic field
+* Added grid_levels and grid_indices fields to octrees
+* Added a morton_index derived field
+* Added Exception to AMRKDTree in the case of particle of oct-based data
+
+
+
 Version 3.2
 -----------
 
@@ -611,7 +921,7 @@
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
  * Adaptive HEALPix support
- * Column density calculations (see :ref:`radial-column-density`)
+ * Column density calculations
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.
  * Substantial improvements to the documentation, including
@@ -733,9 +1043,9 @@
 -----------
 
 Version 1.6 is a point release, primarily notable for the new parallel halo
-finder (see :ref:`halo_finding`)
+finder (see :ref:`halo-analysis`)
 
- * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo_finding` )
+ * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo-analysis` )
  * (Beta) Software ray casting and volume rendering
    (see :ref:`volume_rendering`)
  * Rewritten, faster and better contouring engine for clump identification

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/836ef6193c3c/
Changeset:   836ef6193c3c
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 15:17:22+00:00
Summary:     Removed message for when particle_age doesn't exist
Affected #:  1 file

diff -r 02281d9433440bd3f437ae948190829d35108f33 -r 836ef6193c3c2f014eb3cc9b45167b6a9f588f2e yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -184,7 +184,7 @@
                 function=function, units=unit_system["time"], take_log=False,
                   validators=[ValidateSpatial(0)])
     except :
-        print " No particle age available. "
+        None
 
     # Now some translation functions.
 


https://bitbucket.org/yt_analysis/yt/commits/e5aed79c186f/
Changeset:   e5aed79c186f
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 15:39:25+00:00
Summary:     Added conditions for cosmological simulation for age fixes
Affected #:  2 files

diff -r 836ef6193c3c2f014eb3cc9b45167b6a9f588f2e -r e5aed79c186f1f0d98f8371c8d2b079ca7e5c06a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -734,18 +734,21 @@
 
             return tau_out, t_out, delta_tau, ntable, age_tot
 
-        self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
-          friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
+        if self.cosmological_simulation == 0:
+            self.current_time = self.parameters['time'] * self.parameters['unit_t']
+        else :
+            self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
+              friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
 
-        age = self.parameters['time']
-        iage = 1 + int(10.*age/self.dtau)
-        if iage > self.n_frw/2:
-          iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
+            age = self.parameters['time']
+            iage = 1 + int(10.*age/self.dtau)
+            if iage > self.n_frw/2:
+              iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
 
-        self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
-                         self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
+            self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
+                             self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
  
-        self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
+            self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
 
 
     @classmethod

diff -r 836ef6193c3c2f014eb3cc9b45167b6a9f588f2e -r e5aed79c186f1f0d98f8371c8d2b079ca7e5c06a yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -101,7 +101,7 @@
             tr[field] = fpu.read_vector(f, dt)
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
-            if field[1] == "particle_age":
+            if subset.domain.ds.cosmological_simulation == 1 and field[1] == "particle_age":
               t_frw = subset.domain.ds.t_frw
               tau_frw = subset.domain.ds.tau_frw
               tsim = subset.domain.ds.time_simu


https://bitbucket.org/yt_analysis/yt/commits/4d4fb8fb8719/
Changeset:   4d4fb8fb8719
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 15:45:38+00:00
Summary:     tidied up indenting
Affected #:  2 files

diff -r e5aed79c186f1f0d98f8371c8d2b079ca7e5c06a -r 4d4fb8fb8719c41a44265456ebba06bce3624ae9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -658,22 +658,22 @@
             nstep = 0
 
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
-              nstep = nstep + 1
-              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)   
-              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-              tau = tau - dtau
+                nstep = nstep + 1
+                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)   
+                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+                tau = tau - dtau
 
-              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-              t = t - dt
+                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+                t = t - dt
 
             age_tot=-t
             ntable = 1000
             if nstep < ntable :
-              ntable = nstep
-              alpha = alpha / 2.
+                ntable = nstep
+                alpha = alpha / 2.
 
             tau_out = np.zeros([ntable+1])
             t_out = np.zeros([ntable+1])
@@ -693,40 +693,40 @@
             next_tau = tau + delta_tau/10.
 
             while n_out < ntable/2 : 
-              nstep = nstep + 1
-              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
-              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-              tau = tau - dtau
+                nstep = nstep + 1
+                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+                tau = tau - dtau
 
-              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-              t = t - dt
+                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+                t = t - dt
 
-              if tau < next_tau:
-                n_out = n_out + 1
-                t_out[n_out] = t
-                tau_out[n_out] = tau
-                next_tau = next_tau + delta_tau/10.
+                if tau < next_tau:
+                    n_out = n_out + 1
+                    t_out[n_out] = t
+                    tau_out[n_out] = tau
+                    next_tau = next_tau + delta_tau/10.
 
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
-              nstep = nstep + 1
-              dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
-              aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-              aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-              tau = tau - dtau
+                nstep = nstep + 1
+                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+                tau = tau - dtau
 
-              dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-              aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-              aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-              t = t - dt
+                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+                t = t - dt
 
-              if tau < next_tau:
-                n_out = n_out + 1
-                t_out[n_out] = t
-                tau_out[n_out] = tau
-                next_tau = next_tau + delta_tau
+                if tau < next_tau:
+                    n_out = n_out + 1
+                    t_out[n_out] = t
+                    tau_out[n_out] = tau
+                    next_tau = next_tau + delta_tau
 
             n_out = ntable
             t_out[n_out] = t
@@ -738,12 +738,12 @@
             self.current_time = self.parameters['time'] * self.parameters['unit_t']
         else :
             self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
-              friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
+                friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
 
             age = self.parameters['time']
             iage = 1 + int(10.*age/self.dtau)
             if iage > self.n_frw/2:
-              iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
+                iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])

diff -r e5aed79c186f1f0d98f8371c8d2b079ca7e5c06a -r 4d4fb8fb8719c41a44265456ebba06bce3624ae9 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -102,18 +102,18 @@
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
             if subset.domain.ds.cosmological_simulation == 1 and field[1] == "particle_age":
-              t_frw = subset.domain.ds.t_frw
-              tau_frw = subset.domain.ds.tau_frw
-              tsim = subset.domain.ds.time_simu
-              h100 = subset.domain.ds.hubble_constant
-              nOver2 = subset.domain.ds.n_frw/2
-              for ipart, age in enumerate(tr[field]):
-                 if age < 0.:
-                   iage = 1 + int(10.*age/subset.domain.ds.dtau)
-                   if iage > nOver2:
-                     iage = nOver2 + (iage - nOver2)/10
-                   t = t_frw[iage  ]*(age-tau_frw[iage-1])/(tau_frw[iage]-tau_frw[iage-1])+ \
-                       t_frw[iage-1]*(age-tau_frw[iage  ])/(tau_frw[iage-1]-tau_frw[iage])
-                   newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']
-                   tr[field][ipart] = np.max([0.,newage])
+                t_frw = subset.domain.ds.t_frw
+                tau_frw = subset.domain.ds.tau_frw
+                tsim = subset.domain.ds.time_simu
+                h100 = subset.domain.ds.hubble_constant
+                nOver2 = subset.domain.ds.n_frw/2
+                for ipart, age in enumerate(tr[field]):
+                    if age < 0.:
+                        iage = 1 + int(10.*age/subset.domain.ds.dtau)
+                        if iage > nOver2:
+                            iage = nOver2 + (iage - nOver2)/10
+                        t = t_frw[iage  ]*(age-tau_frw[iage-1])/(tau_frw[iage]-tau_frw[iage-1])+ \
+                            t_frw[iage-1]*(age-tau_frw[iage  ])/(tau_frw[iage-1]-tau_frw[iage])
+                        newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']
+                        tr[field][ipart] = np.max([0.,newage])
         return tr


https://bitbucket.org/yt_analysis/yt/commits/b69c95e63743/
Changeset:   b69c95e63743
Branch:      yt
User:        Astrodude87
Date:        2016-08-23 16:10:50+00:00
Summary:     Tidied up while loops
Affected #:  2 files

diff -r 4d4fb8fb8719c41a44265456ebba06bce3624ae9 -r b69c95e6374399db3d986efa2576afff529448b5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -184,7 +184,7 @@
                 function=function, units=unit_system["time"], take_log=False,
                   validators=[ValidateSpatial(0)])
     except :
-        None
+        pass
 
     # Now some translation functions.
 

diff -r 4d4fb8fb8719c41a44265456ebba06bce3624ae9 -r b69c95e6374399db3d986efa2576afff529448b5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -648,6 +648,19 @@
         def dadt(aexp_t,O_mat_0,O_vac_0,O_k_0):
            return np.sqrt( (1./aexp_t)*(O_mat_0 + O_vac_0*aexp_t**3 + O_k_0*aexp_t) )
 
+        def step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0):
+            dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+            aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+            aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+            tau = tau - dtau
+
+            dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+            aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+            aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+            t = t - dt
+
+            return tau,aexp_tau,t,aexp_t
+
         def friedman(O_mat_0, O_vac_0, O_k_0):
             alpha = 1.e-5
             aexp_min = 1.e-3
@@ -659,15 +672,7 @@
 
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
                 nstep = nstep + 1
-                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)   
-                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-                tau = tau - dtau
-
-                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-                t = t - dt
+                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
 
             age_tot=-t
             ntable = 1000
@@ -684,7 +689,6 @@
             aexp_t = 1.
             tau = 0.
             t = 0.
-            nstep = 0
 
             n_out = 0
             t_out[n_out] = t
@@ -693,16 +697,7 @@
             next_tau = tau + delta_tau/10.
 
             while n_out < ntable/2 : 
-                nstep = nstep + 1
-                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
-                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-                tau = tau - dtau
-
-                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-                t = t - dt
+                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
 
                 if tau < next_tau:
                     n_out = n_out + 1
@@ -711,16 +706,7 @@
                     next_tau = next_tau + delta_tau/10.
 
             while aexp_tau >= aexp_min or aexp_t >= aexp_min:
-                nstep = nstep + 1
-                dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
-                aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-                aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-                tau = tau - dtau
-
-                dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-                aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-                aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-                t = t - dt
+                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
 
                 if tau < next_tau:
                     n_out = n_out + 1


https://bitbucket.org/yt_analysis/yt/commits/72307a87dd9a/
Changeset:   72307a87dd9a
Branch:      yt
User:        Ben Thompson
Date:        2016-08-26 01:48:34+00:00
Summary:     cythonised the age loading routine, included it in the build tools.
Affected #:  2 files

diff -r b69c95e6374399db3d986efa2576afff529448b5 -r 72307a87dd9aa0a7f5684f19017cfb7b7779a99f setup.py
--- a/setup.py
+++ b/setup.py
@@ -152,6 +152,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.time_arrays",
+              ["yt/utilities/lib/time_arrays.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],

diff -r b69c95e6374399db3d986efa2576afff529448b5 -r 72307a87dd9aa0a7f5684f19017cfb7b7779a99f yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -43,6 +43,9 @@
     RAMSESOctreeContainer
 from yt.arraytypes import blankRecordArray
 
+from yt.utilities.lib.time_arrays import \
+    friedman
+
 class RAMSESDomainFile(object):
     _last_mask = None
     _last_selector_id = None
@@ -642,83 +645,6 @@
         self.max_level = rheader['levelmax'] - self.min_level - 1
         f.close()
 
-        def dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0):
-           return np.sqrt( aexp_tau**3 * (O_mat_0 + O_vac_0*aexp_tau**3 + O_k_0*aexp_tau) )
-
-        def dadt(aexp_t,O_mat_0,O_vac_0,O_k_0):
-           return np.sqrt( (1./aexp_t)*(O_mat_0 + O_vac_0*aexp_t**3 + O_k_0*aexp_t) )
-
-        def step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0):
-            dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
-            aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
-            aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
-            tau = tau - dtau
-
-            dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
-            aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
-            aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
-            t = t - dt
-
-            return tau,aexp_tau,t,aexp_t
-
-        def friedman(O_mat_0, O_vac_0, O_k_0):
-            alpha = 1.e-5
-            aexp_min = 1.e-3
-            aexp_tau = 1.
-            aexp_t = 1.
-            tau = 0.
-            t = 0.
-            nstep = 0
-
-            while aexp_tau >= aexp_min or aexp_t >= aexp_min:
-                nstep = nstep + 1
-                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
-
-            age_tot=-t
-            ntable = 1000
-            if nstep < ntable :
-                ntable = nstep
-                alpha = alpha / 2.
-
-            tau_out = np.zeros([ntable+1])
-            t_out = np.zeros([ntable+1])
-            # (sampling the first half of the table more finely than second half):
-            delta_tau = 20.*tau/ntable/11.
-
-            aexp_tau = 1.
-            aexp_t = 1.
-            tau = 0.
-            t = 0.
-
-            n_out = 0
-            t_out[n_out] = t
-            tau_out[n_out] = tau
-
-            next_tau = tau + delta_tau/10.
-
-            while n_out < ntable/2 : 
-                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
-
-                if tau < next_tau:
-                    n_out = n_out + 1
-                    t_out[n_out] = t
-                    tau_out[n_out] = tau
-                    next_tau = next_tau + delta_tau/10.
-
-            while aexp_tau >= aexp_min or aexp_t >= aexp_min:
-                tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
-
-                if tau < next_tau:
-                    n_out = n_out + 1
-                    t_out[n_out] = t
-                    tau_out[n_out] = tau
-                    next_tau = next_tau + delta_tau
-
-            n_out = ntable
-            t_out[n_out] = t
-            tau_out[n_out] = tau
-
-            return tau_out, t_out, delta_tau, ntable, age_tot
 
         if self.cosmological_simulation == 0:
             self.current_time = self.parameters['time'] * self.parameters['unit_t']


https://bitbucket.org/yt_analysis/yt/commits/01731406f39f/
Changeset:   01731406f39f
Branch:      yt
User:        cosmosquark
Date:        2016-08-26 01:54:49+00:00
Summary:     adding filex
Affected #:  2 files

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/d1e9e52f559e/
Changeset:   d1e9e52f559e
Branch:      yt
User:        ngoldbaum
Date:        2016-08-26 20:18:28+00:00
Summary:     Removing autogenerated C file
Affected #:  2 files

diff -r 01731406f39f12785ada952e937671b1c5bb9ffc -r d1e9e52f559e00b5b2854d96dc377beca6cc38c0 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -61,6 +61,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/time_arrays.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/dd63fcc6a78d/
Changeset:   dd63fcc6a78d
Branch:      yt
User:        ngoldbaum
Date:        2016-08-26 20:27:34+00:00
Summary:     remove try/except. use 'age' instead of 'particle_age', add 'age' to ramses 'particle_age' aliases
Affected #:  2 files

diff -r d1e9e52f559e00b5b2854d96dc377beca6cc38c0 -r dd63fcc6a78de53e42addc57f8e520b597c30901 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -175,16 +175,13 @@
                 function=function, units=unit_system["velocity"], take_log=False,
                 validators=[ValidateSpatial(0)])
 
-    try : 
-        for method, name in zip(("cic", "sum"), ("cic", "nn")):
-            function = _get_density_weighted_deposit_field(
-                "particle_age", "s", method)
-            registry.add_field(
-                ("deposit", ("%s_"+name+"_age") % (ptype)),
-                function=function, units=unit_system["time"], take_log=False,
-                  validators=[ValidateSpatial(0)])
-    except :
-        pass
+    for method, name in zip(("cic", "sum"), ("cic", "nn")):
+        function = _get_density_weighted_deposit_field(
+            "age", "s", method)
+        registry.add_field(
+            ("deposit", ("%s_"+name+"_age") % (ptype)),
+            function=function, units=unit_system["time"], take_log=False,
+            validators=[ValidateSpatial(0)])
 
     # Now some translation functions.
 

diff -r d1e9e52f559e00b5b2854d96dc377beca6cc38c0 -r dd63fcc6a78de53e42addc57f8e520b597c30901 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -80,7 +80,7 @@
         ("particle_mass", ("code_mass", [], None)),
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
-        ("particle_age", ("code_time", [], None)),
+        ("particle_age", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
     )
 


https://bitbucket.org/yt_analysis/yt/commits/e4d2d36c7301/
Changeset:   e4d2d36c7301
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 21:35:45+00:00
Summary:     Sped up age indexing and renamed new utiilities file
Affected #:  3 files

diff -r dd63fcc6a78de53e42addc57f8e520b597c30901 -r e4d2d36c7301c479e3e31a99072c9ae5a0f3a6ed setup.py
--- a/setup.py
+++ b/setup.py
@@ -152,8 +152,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
-    Extension("yt.utilities.lib.time_arrays",
-              ["yt/utilities/lib/time_arrays.pyx"]),
+    Extension("yt.utilities.lib.cosmology_time",
+              ["yt/utilities/lib/cosmology_time.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],

diff -r dd63fcc6a78de53e42addc57f8e520b597c30901 -r e4d2d36c7301c479e3e31a99072c9ae5a0f3a6ed yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -654,8 +654,7 @@
 
             age = self.parameters['time']
             iage = 1 + int(10.*age/self.dtau)
-            if iage > self.n_frw/2:
-                iage = self.n_frw/2 + (iage - self.n_frw/2 )/10
+            iage = np.min([iage,self.n_frw/2 + (iage - self.n_frw/2)/10])
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])

diff -r dd63fcc6a78de53e42addc57f8e520b597c30901 -r e4d2d36c7301c479e3e31a99072c9ae5a0f3a6ed yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -110,8 +110,8 @@
                 for ipart, age in enumerate(tr[field]):
                     if age < 0.:
                         iage = 1 + int(10.*age/subset.domain.ds.dtau)
-                        if iage > nOver2:
-                            iage = nOver2 + (iage - nOver2)/10
+                        iage = np.min([iage,nOver2 + (iage - nOver2)/10])
+
                         t = t_frw[iage  ]*(age-tau_frw[iage-1])/(tau_frw[iage]-tau_frw[iage-1])+ \
                             t_frw[iage-1]*(age-tau_frw[iage  ])/(tau_frw[iage-1]-tau_frw[iage])
                         newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']


https://bitbucket.org/yt_analysis/yt/commits/8579e9b74ced/
Changeset:   8579e9b74ced
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 21:41:34+00:00
Summary:     Forgot to change name of import
Affected #:  1 file

diff -r e4d2d36c7301c479e3e31a99072c9ae5a0f3a6ed -r 8579e9b74cedb50e9d2cace9b9e86667fdcb0df6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -43,7 +43,7 @@
     RAMSESOctreeContainer
 from yt.arraytypes import blankRecordArray
 
-from yt.utilities.lib.time_arrays import \
+from yt.utilities.lib.cosmology_time import \
     friedman
 
 class RAMSESDomainFile(object):


https://bitbucket.org/yt_analysis/yt/commits/7e409d0bc97c/
Changeset:   7e409d0bc97c
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 21:44:37+00:00
Summary:     Added and removed files.
Affected #:  3 files

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/930390a2faf5/
Changeset:   930390a2faf5
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 21:47:56+00:00
Summary:     Removed c file
Affected #:  1 file

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/52220df56bfd/
Changeset:   52220df56bfd
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 22:01:51+00:00
Summary:     Added ignore
Affected #:  1 file

diff -r 930390a2faf5e3d79669bf2e933a3a7306a447c0 -r 52220df56bfdce6c90d46c85784d0949dcd3d8d6 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -61,7 +61,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/time_arrays.c
+yt/utilities/lib/cosmology_time.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h


https://bitbucket.org/yt_analysis/yt/commits/25eb3dfab319/
Changeset:   25eb3dfab319
Branch:      yt
User:        ngoldbaum
Date:        2016-08-26 22:35:04+00:00
Summary:     vectorize age calculation
Affected #:  1 file

diff -r 52220df56bfdce6c90d46c85784d0949dcd3d8d6 -r 25eb3dfab319b4d996637699d2881e986a5830ed yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -19,6 +19,7 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.physical_ratios import cm_per_km, cm_per_mpc
 import yt.utilities.fortran_utils as fpu
 from yt.extern.six import PY3
 
@@ -101,19 +102,22 @@
             tr[field] = fpu.read_vector(f, dt)
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
-            if subset.domain.ds.cosmological_simulation == 1 and field[1] == "particle_age":
-                t_frw = subset.domain.ds.t_frw
-                tau_frw = subset.domain.ds.tau_frw
+            cosmo = subset.domain.ds.cosmological_simulation
+            if cosmo == 1 and field[1] == "particle_age":
+                tf = subset.domain.ds.t_frw
+                tauf = subset.domain.ds.tau_frw
                 tsim = subset.domain.ds.time_simu
                 h100 = subset.domain.ds.hubble_constant
                 nOver2 = subset.domain.ds.n_frw/2
-                for ipart, age in enumerate(tr[field]):
-                    if age < 0.:
-                        iage = 1 + int(10.*age/subset.domain.ds.dtau)
-                        iage = np.min([iage,nOver2 + (iage - nOver2)/10])
-
-                        t = t_frw[iage  ]*(age-tau_frw[iage-1])/(tau_frw[iage]-tau_frw[iage-1])+ \
-                            t_frw[iage-1]*(age-tau_frw[iage  ])/(tau_frw[iage-1]-tau_frw[iage])
-                        newage = (tsim-t)/(h100*1e7/3.08e24)/subset.domain.ds['unit_t']
-                        tr[field][ipart] = np.max([0.,newage])
+                ages = tr[field]
+                wh = ages < 0
+                iage = 1 + (10*ages[wh]/subset.domain.ds.dtau)
+                iage = np.minimum(iage, nOver2 + (iage - nOver2)/10.)
+                iage = iage.astype('int')
+                t = (tf[iage]*(ages[wh] - tauf[iage - 1]) /
+                     (tauf[iage] - tauf[iage - 1]))
+                t = t + (tf[iage-1]*(ages[wh]-tauf[iage]) /
+                         (tauf[iage-1]-tauf[iage]))
+                newages = (tsim - t)/(h100 * 100 * cm_per_km / cm_per_mpc)
+                tr[field][wh] = np.maximum(np.zeros_like(newages), newages)
         return tr


https://bitbucket.org/yt_analysis/yt/commits/a82e844da980/
Changeset:   a82e844da980
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 22:42:13+00:00
Summary:     rescaled by unit_t so ages are correct
Affected #:  1 file

diff -r 25eb3dfab319b4d996637699d2881e986a5830ed -r a82e844da980b4bd39e61bf5fa06321390a1a891 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -118,6 +118,6 @@
                      (tauf[iage] - tauf[iage - 1]))
                 t = t + (tf[iage-1]*(ages[wh]-tauf[iage]) /
                          (tauf[iage-1]-tauf[iage]))
-                newages = (tsim - t)/(h100 * 100 * cm_per_km / cm_per_mpc)
+                newages = (tsim - t)/(h100 * 100 * cm_per_km / cm_per_mpc)/subset.domain.ds['unit_t']
                 tr[field][wh] = np.maximum(np.zeros_like(newages), newages)
         return tr


https://bitbucket.org/yt_analysis/yt/commits/c6b2faeab9ed/
Changeset:   c6b2faeab9ed
Branch:      yt
User:        Astrodude87
Date:        2016-08-26 23:31:49+00:00
Summary:     Moved age vectorisation to cython utility file.
Affected #:  2 files

diff -r a82e844da980b4bd39e61bf5fa06321390a1a891 -r c6b2faeab9ed743f065bcea735683f5a4bc0e10f yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -21,6 +21,8 @@
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.physical_ratios import cm_per_km, cm_per_mpc
 import yt.utilities.fortran_utils as fpu
+from yt.utilities.lib.cosmology_time import \
+    get_ramses_ages
 from yt.extern.six import PY3
 
 if PY3:
@@ -105,19 +107,12 @@
             cosmo = subset.domain.ds.cosmological_simulation
             if cosmo == 1 and field[1] == "particle_age":
                 tf = subset.domain.ds.t_frw
+                dtau = subset.domain.ds.dtau
                 tauf = subset.domain.ds.tau_frw
                 tsim = subset.domain.ds.time_simu
                 h100 = subset.domain.ds.hubble_constant
                 nOver2 = subset.domain.ds.n_frw/2
+                t_scale = 1./(h100 * 100 * cm_per_km / cm_per_mpc)/subset.domain.ds['unit_t']
                 ages = tr[field]
-                wh = ages < 0
-                iage = 1 + (10*ages[wh]/subset.domain.ds.dtau)
-                iage = np.minimum(iage, nOver2 + (iage - nOver2)/10.)
-                iage = iage.astype('int')
-                t = (tf[iage]*(ages[wh] - tauf[iage - 1]) /
-                     (tauf[iage] - tauf[iage - 1]))
-                t = t + (tf[iage-1]*(ages[wh]-tauf[iage]) /
-                         (tauf[iage-1]-tauf[iage]))
-                newages = (tsim - t)/(h100 * 100 * cm_per_km / cm_per_mpc)/subset.domain.ds['unit_t']
-                tr[field][wh] = np.maximum(np.zeros_like(newages), newages)
+                tr[field] = get_ramses_ages(tf,tauf,dtau,tsim,t_scale,ages,nOver2,len(ages))            
         return tr

diff -r a82e844da980b4bd39e61bf5fa06321390a1a891 -r c6b2faeab9ed743f065bcea735683f5a4bc0e10f yt/utilities/lib/cosmology_time.pyx
--- a/yt/utilities/lib/cosmology_time.pyx
+++ b/yt/utilities/lib/cosmology_time.pyx
@@ -77,3 +77,24 @@
 
     return tau_out,t_out,delta_tau,ntable,age_tot
 
+cpdef get_ramses_ages(np.ndarray[double,mode='c'] tf, 
+                     np.ndarray[double,mode='c'] tauf,  
+                     double dtau, 
+                     double tsim, 
+                     double t_scale, 
+                     np.ndarray[double,mode='c'] ages, 
+                     int nOver2, 
+                     int ntot):
+
+    cdef np.ndarray[double,mode='c'] t
+    cdef np.ndarray[double,mode='c'] dage
+    cdef np.ndarray[int,mode='c'] iage
+
+    dage = 1 + (10*ages/dtau)
+    dage = np.minimum(dage, nOver2 + (dage - nOver2)/10.)
+    iage = np.array(dage,dtype=np.int32)
+
+    t = (tf[iage]*(ages - tauf[iage - 1]) / (tauf[iage] - tauf[iage - 1]))
+    t = t + (tf[iage-1]*(ages-tauf[iage]) / (tauf[iage-1]-tauf[iage]))
+    return  (tsim - t)*t_scale
+ 


https://bitbucket.org/yt_analysis/yt/commits/146a5e02f7fe/
Changeset:   146a5e02f7fe
Branch:      yt
User:        ngoldbaum
Date:        2016-09-07 18:17:01+00:00
Summary:     Merged in Astrodude87/yt (pull request #2346)

Fixing Ramses time values
Affected #:  7 files

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -63,6 +63,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/cosmology_time.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -155,6 +155,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.cosmology_time",
+              ["yt/utilities/lib/cosmology_time.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -175,6 +175,14 @@
                 function=function, units=unit_system["velocity"], take_log=False,
                 validators=[ValidateSpatial(0)])
 
+    for method, name in zip(("cic", "sum"), ("cic", "nn")):
+        function = _get_density_weighted_deposit_field(
+            "age", "s", method)
+        registry.add_field(
+            ("deposit", ("%s_"+name+"_age") % (ptype)),
+            function=function, units=unit_system["time"], take_log=False,
+            validators=[ValidateSpatial(0)])
+
     # Now some translation functions.
 
     def particle_ones(field, data):

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -43,6 +43,9 @@
     RAMSESOctreeContainer
 from yt.arraytypes import blankRecordArray
 
+from yt.utilities.lib.cosmology_time import \
+    friedman
+
 class RAMSESDomainFile(object):
     _last_mask = None
     _last_selector_id = None
@@ -620,7 +623,6 @@
                 dom, mi, ma = f.readline().split()
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
         self.parameters.update(rheader)
-        self.current_time = self.parameters['time'] * self.parameters['unit_t']
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \
                         2**(self.min_level+1)
@@ -643,6 +645,23 @@
         self.max_level = rheader['levelmax'] - self.min_level - 1
         f.close()
 
+
+        if self.cosmological_simulation == 0:
+            self.current_time = self.parameters['time'] * self.parameters['unit_t']
+        else :
+            self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
+                friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
+
+            age = self.parameters['time']
+            iage = 1 + int(10.*age/self.dtau)
+            iage = np.min([iage,self.n_frw/2 + (iage - self.n_frw/2)/10])
+
+            self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
+                             self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
+ 
+            self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
+
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not os.path.basename(args[0]).startswith("info_"): return False

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -80,7 +80,7 @@
         ("particle_mass", ("code_mass", [], None)),
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
-        ("particle_age", ("code_time", [], None)),
+        ("particle_age", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
     )
 

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -19,7 +19,10 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.physical_ratios import cm_per_km, cm_per_mpc
 import yt.utilities.fortran_utils as fpu
+from yt.utilities.lib.cosmology_time import \
+    get_ramses_ages
 from yt.extern.six import PY3
 
 if PY3:
@@ -101,4 +104,15 @@
             tr[field] = fpu.read_vector(f, dt)
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
+            cosmo = subset.domain.ds.cosmological_simulation
+            if cosmo == 1 and field[1] == "particle_age":
+                tf = subset.domain.ds.t_frw
+                dtau = subset.domain.ds.dtau
+                tauf = subset.domain.ds.tau_frw
+                tsim = subset.domain.ds.time_simu
+                h100 = subset.domain.ds.hubble_constant
+                nOver2 = subset.domain.ds.n_frw/2
+                t_scale = 1./(h100 * 100 * cm_per_km / cm_per_mpc)/subset.domain.ds['unit_t']
+                ages = tr[field]
+                tr[field] = get_ramses_ages(tf,tauf,dtau,tsim,t_scale,ages,nOver2,len(ages))            
         return tr

diff -r c144003a5fd281a25c7d283899141cca10cb340b -r 146a5e02f7fe72bf4496b89871244c5f795a2ff6 yt/utilities/lib/cosmology_time.pyx
--- /dev/null
+++ b/yt/utilities/lib/cosmology_time.pyx
@@ -0,0 +1,100 @@
+cimport numpy as np
+import numpy as np
+
+
+cdef double dadtau(double aexp_tau,double O_mat_0,double O_vac_0,double O_k_0):
+    return ( aexp_tau**3 * (O_mat_0 + O_vac_0*aexp_tau**3 + O_k_0*aexp_tau) )**0.5
+
+cdef double dadt(double aexp_t,double O_mat_0,double O_vac_0,double O_k_0):
+    return ( (1./aexp_t)*(O_mat_0 + O_vac_0*aexp_t**3 + O_k_0*aexp_t) )**0.5
+
+
+cdef step_cosmo(double alpha,double tau,double aexp_tau,double t,double aexp_t,double O_mat_0,double O_vac_0,double O_k_0):
+    cdef double dtau,aexp_tau_pre,dt,aexp_t_pre
+
+    dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+    aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+    aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+    tau = tau - dtau
+
+    dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+    aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+    aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+    t = t - dt
+
+    return tau,aexp_tau,t,aexp_t
+
+
+cpdef friedman(double O_mat_0,double O_vac_0,double O_k_0):
+    cdef double alpha=1.e-5,aexp_min=1.e-3,aexp_tau=1.,aexp_t=1.,tau=0.,t=0.
+    cdef int nstep=0,ntable=1000,n_out
+    cdef np.ndarray[double,mode='c'] t_out=np.zeros([ntable+1]),tau_out=np.zeros([ntable+1])
+    cdef double age_tot,delta_tau,next_tau
+
+    while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+       nstep = nstep + 1
+       tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+    age_tot=-t
+    if nstep < ntable :
+        ntable = nstep
+        alpha = alpha / 2.
+
+    delta_tau = 20.*tau/ntable/11.
+
+    aexp_tau = 1.
+    aexp_t = 1.
+    tau = 0.
+    t = 0.
+
+    n_out = 0
+    t_out[n_out] = t
+    tau_out[n_out] = tau
+
+    next_tau = tau + delta_tau/10.
+
+    while n_out < ntable/2 :
+        tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+        if tau < next_tau:
+            n_out = n_out + 1
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            next_tau = next_tau + delta_tau/10.
+
+    while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+        tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+        if tau < next_tau:
+            n_out = n_out + 1
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            next_tau = next_tau + delta_tau
+
+    n_out = ntable
+    t_out[n_out] = t
+    tau_out[n_out] = tau
+
+    return tau_out,t_out,delta_tau,ntable,age_tot
+
+cpdef get_ramses_ages(np.ndarray[double,mode='c'] tf, 
+                     np.ndarray[double,mode='c'] tauf,  
+                     double dtau, 
+                     double tsim, 
+                     double t_scale, 
+                     np.ndarray[double,mode='c'] ages, 
+                     int nOver2, 
+                     int ntot):
+
+    cdef np.ndarray[double,mode='c'] t
+    cdef np.ndarray[double,mode='c'] dage
+    cdef np.ndarray[int,mode='c'] iage
+
+    dage = 1 + (10*ages/dtau)
+    dage = np.minimum(dage, nOver2 + (dage - nOver2)/10.)
+    iage = np.array(dage,dtype=np.int32)
+
+    t = (tf[iage]*(ages - tauf[iage - 1]) / (tauf[iage] - tauf[iage - 1]))
+    t = t + (tf[iage-1]*(ages-tauf[iage]) / (tauf[iage-1]-tauf[iage]))
+    return  (tsim - t)*t_scale
+

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list